PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /opt/golang/1.22.0/src/go/scanner
Viewing File: /opt/golang/1.22.0/src/go/scanner/scanner_test.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package scanner import ( "go/token" "os" "path/filepath" "runtime" "strings" "testing" ) var fset = token.NewFileSet() const /* class */ ( special = iota literal operator keyword ) func tokenclass(tok token.Token) int { switch { case tok.IsLiteral(): return literal case tok.IsOperator(): return operator case tok.IsKeyword(): return keyword } return special } type elt struct { tok token.Token lit string class int } var tokens = []elt{ // Special tokens {token.COMMENT, "/* a comment */", special}, {token.COMMENT, "// a comment \n", special}, {token.COMMENT, "/*\r*/", special}, {token.COMMENT, "/**\r/*/", special}, // issue 11151 {token.COMMENT, "/**\r\r/*/", special}, {token.COMMENT, "//\r\n", special}, // Identifiers and basic type literals {token.IDENT, "foobar", literal}, {token.IDENT, "a۰۱۸", literal}, {token.IDENT, "foo६४", literal}, {token.IDENT, "bar9876", literal}, {token.IDENT, "ŝ", literal}, // was bug (issue 4000) {token.IDENT, "ŝfoo", literal}, // was bug (issue 4000) {token.INT, "0", literal}, {token.INT, "1", literal}, {token.INT, "123456789012345678890", literal}, {token.INT, "01234567", literal}, {token.INT, "0xcafebabe", literal}, {token.FLOAT, "0.", literal}, {token.FLOAT, ".0", literal}, {token.FLOAT, "3.14159265", literal}, {token.FLOAT, "1e0", literal}, {token.FLOAT, "1e+100", literal}, {token.FLOAT, "1e-100", literal}, {token.FLOAT, "2.71828e-1000", literal}, {token.IMAG, "0i", literal}, {token.IMAG, "1i", literal}, {token.IMAG, "012345678901234567889i", literal}, {token.IMAG, "123456789012345678890i", literal}, {token.IMAG, "0.i", literal}, {token.IMAG, ".0i", literal}, {token.IMAG, "3.14159265i", literal}, {token.IMAG, "1e0i", literal}, {token.IMAG, "1e+100i", literal}, {token.IMAG, "1e-100i", literal}, {token.IMAG, "2.71828e-1000i", literal}, {token.CHAR, "'a'", literal}, {token.CHAR, "'\\000'", literal}, {token.CHAR, "'\\xFF'", literal}, {token.CHAR, "'\\uff16'", literal}, {token.CHAR, "'\\U0000ff16'", literal}, {token.STRING, "`foobar`", literal}, {token.STRING, "`" + `foo bar` + "`", literal, }, {token.STRING, "`\r`", literal}, {token.STRING, "`foo\r\nbar`", literal}, // Operators and delimiters {token.ADD, "+", operator}, {token.SUB, "-", operator}, {token.MUL, "*", operator}, {token.QUO, "/", operator}, {token.REM, "%", operator}, {token.AND, "&", operator}, {token.OR, "|", operator}, {token.XOR, "^", operator}, {token.SHL, "<<", operator}, {token.SHR, ">>", operator}, {token.AND_NOT, "&^", operator}, {token.ADD_ASSIGN, "+=", operator}, {token.SUB_ASSIGN, "-=", operator}, {token.MUL_ASSIGN, "*=", operator}, {token.QUO_ASSIGN, "/=", operator}, {token.REM_ASSIGN, "%=", operator}, {token.AND_ASSIGN, "&=", operator}, {token.OR_ASSIGN, "|=", operator}, {token.XOR_ASSIGN, "^=", operator}, {token.SHL_ASSIGN, "<<=", operator}, {token.SHR_ASSIGN, ">>=", operator}, {token.AND_NOT_ASSIGN, "&^=", operator}, {token.LAND, "&&", operator}, {token.LOR, "||", operator}, {token.ARROW, "<-", operator}, {token.INC, "++", operator}, {token.DEC, "--", operator}, {token.EQL, "==", operator}, {token.LSS, "<", operator}, {token.GTR, ">", operator}, {token.ASSIGN, "=", operator}, {token.NOT, "!", operator}, {token.NEQ, "!=", operator}, {token.LEQ, "<=", operator}, {token.GEQ, ">=", operator}, {token.DEFINE, ":=", operator}, {token.ELLIPSIS, "...", operator}, {token.LPAREN, "(", operator}, {token.LBRACK, "[", operator}, {token.LBRACE, "{", operator}, {token.COMMA, ",", operator}, {token.PERIOD, ".", operator}, {token.RPAREN, ")", operator}, {token.RBRACK, "]", operator}, {token.RBRACE, "}", operator}, {token.SEMICOLON, ";", operator}, {token.COLON, ":", operator}, {token.TILDE, "~", operator}, // Keywords {token.BREAK, "break", keyword}, {token.CASE, "case", keyword}, {token.CHAN, "chan", keyword}, {token.CONST, "const", keyword}, {token.CONTINUE, "continue", keyword}, {token.DEFAULT, "default", keyword}, {token.DEFER, "defer", keyword}, {token.ELSE, "else", keyword}, {token.FALLTHROUGH, "fallthrough", keyword}, {token.FOR, "for", keyword}, {token.FUNC, "func", keyword}, {token.GO, "go", keyword}, {token.GOTO, "goto", keyword}, {token.IF, "if", keyword}, {token.IMPORT, "import", keyword}, {token.INTERFACE, "interface", keyword}, {token.MAP, "map", keyword}, {token.PACKAGE, "package", keyword}, {token.RANGE, "range", keyword}, {token.RETURN, "return", keyword}, {token.SELECT, "select", keyword}, {token.STRUCT, "struct", keyword}, {token.SWITCH, "switch", keyword}, {token.TYPE, "type", keyword}, {token.VAR, "var", keyword}, } const whitespace = " \t \n\n\n" // to separate tokens var source = func() []byte { var src []byte for _, t := range tokens { src = append(src, t.lit...) src = append(src, whitespace...) } return src }() func newlineCount(s string) int { n := 0 for i := 0; i < len(s); i++ { if s[i] == '\n' { n++ } } return n } func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) { pos := fset.Position(p) // Check cleaned filenames so that we don't have to worry about // different os.PathSeparator values. if pos.Filename != expected.Filename && filepath.Clean(pos.Filename) != filepath.Clean(expected.Filename) { t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename) } if pos.Offset != expected.Offset { t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset) } if pos.Line != expected.Line { t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line) } if pos.Column != expected.Column { t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column) } } // Verify that calling Scan() provides the correct results. func TestScan(t *testing.T) { whitespace_linecount := newlineCount(whitespace) // error handler eh := func(_ token.Position, msg string) { t.Errorf("error handler called (msg = %s)", msg) } // verify scan var s Scanner s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis) // set up expected position epos := token.Position{ Filename: "", Offset: 0, Line: 1, Column: 1, } index := 0 for { pos, tok, lit := s.Scan() // check position if tok == token.EOF { // correction for EOF epos.Line = newlineCount(string(source)) epos.Column = 2 } checkPos(t, lit, pos, epos) // check token e := elt{token.EOF, "", special} if index < len(tokens) { e = tokens[index] index++ } if tok != e.tok { t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok) } // check token class if tokenclass(tok) != e.class { t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class) } // check literal elit := "" switch e.tok { case token.COMMENT: // no CRs in comments elit = string(stripCR([]byte(e.lit), e.lit[1] == '*')) //-style comment literal doesn't contain newline if elit[1] == '/' { elit = elit[0 : len(elit)-1] } case token.IDENT: elit = e.lit case token.SEMICOLON: elit = ";" default: if e.tok.IsLiteral() { // no CRs in raw string literals elit = e.lit if elit[0] == '`' { elit = string(stripCR([]byte(elit), false)) } } else if e.tok.IsKeyword() { elit = e.lit } } if lit != elit { t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit) } if tok == token.EOF { break } // update position epos.Offset += len(e.lit) + len(whitespace) epos.Line += newlineCount(e.lit) + whitespace_linecount } if s.ErrorCount != 0 { t.Errorf("found %d errors", s.ErrorCount) } } func TestStripCR(t *testing.T) { for _, test := range []struct{ have, want string }{ {"//\n", "//\n"}, {"//\r\n", "//\n"}, {"//\r\r\r\n", "//\n"}, {"//\r*\r/\r\n", "//*/\n"}, {"/**/", "/**/"}, {"/*\r/*/", "/*/*/"}, {"/*\r*/", "/**/"}, {"/**\r/*/", "/**\r/*/"}, {"/*\r/\r*\r/*/", "/*/*\r/*/"}, {"/*\r\r\r\r*/", "/**/"}, } { got := string(stripCR([]byte(test.have), len(test.have) >= 2 && test.have[1] == '*')) if got != test.want { t.Errorf("stripCR(%q) = %q; want %q", test.have, got, test.want) } } } func checkSemi(t *testing.T, input, want string, mode Mode) { if mode&ScanComments == 0 { want = strings.ReplaceAll(want, "COMMENT ", "") want = strings.ReplaceAll(want, " COMMENT", "") // if at end want = strings.ReplaceAll(want, "COMMENT", "") // if sole token } file := fset.AddFile("TestSemis", fset.Base(), len(input)) var scan Scanner scan.Init(file, []byte(input), nil, mode) var tokens []string for { pos, tok, lit := scan.Scan() if tok == token.EOF { break } if tok == token.SEMICOLON && lit != ";" { // Artificial semicolon: // assert that position is EOF or that of a newline. off := file.Offset(pos) if off != len(input) && input[off] != '\n' { t.Errorf("scanning <<%s>>, got SEMICOLON at offset %d, want newline or EOF", input, off) } } lit = tok.String() // "\n" => ";" tokens = append(tokens, lit) } if got := strings.Join(tokens, " "); got != want { t.Errorf("scanning <<%s>>, got [%s], want [%s]", input, got, want) } } var semicolonTests = [...]struct{ input, want string }{ {"", ""}, {"\ufeff;", ";"}, // first BOM is ignored {";", ";"}, {"foo\n", "IDENT ;"}, {"123\n", "INT ;"}, {"1.2\n", "FLOAT ;"}, {"'x'\n", "CHAR ;"}, {`"x"` + "\n", "STRING ;"}, {"`x`\n", "STRING ;"}, {"+\n", "+"}, {"-\n", "-"}, {"*\n", "*"}, {"/\n", "/"}, {"%\n", "%"}, {"&\n", "&"}, {"|\n", "|"}, {"^\n", "^"}, {"<<\n", "<<"}, {">>\n", ">>"}, {"&^\n", "&^"}, {"+=\n", "+="}, {"-=\n", "-="}, {"*=\n", "*="}, {"/=\n", "/="}, {"%=\n", "%="}, {"&=\n", "&="}, {"|=\n", "|="}, {"^=\n", "^="}, {"<<=\n", "<<="}, {">>=\n", ">>="}, {"&^=\n", "&^="}, {"&&\n", "&&"}, {"||\n", "||"}, {"<-\n", "<-"}, {"++\n", "++ ;"}, {"--\n", "-- ;"}, {"==\n", "=="}, {"<\n", "<"}, {">\n", ">"}, {"=\n", "="}, {"!\n", "!"}, {"!=\n", "!="}, {"<=\n", "<="}, {">=\n", ">="}, {":=\n", ":="}, {"...\n", "..."}, {"(\n", "("}, {"[\n", "["}, {"{\n", "{"}, {",\n", ","}, {".\n", "."}, {")\n", ") ;"}, {"]\n", "] ;"}, {"}\n", "} ;"}, {";\n", ";"}, {":\n", ":"}, {"break\n", "break ;"}, {"case\n", "case"}, {"chan\n", "chan"}, {"const\n", "const"}, {"continue\n", "continue ;"}, {"default\n", "default"}, {"defer\n", "defer"}, {"else\n", "else"}, {"fallthrough\n", "fallthrough ;"}, {"for\n", "for"}, {"func\n", "func"}, {"go\n", "go"}, {"goto\n", "goto"}, {"if\n", "if"}, {"import\n", "import"}, {"interface\n", "interface"}, {"map\n", "map"}, {"package\n", "package"}, {"range\n", "range"}, {"return\n", "return ;"}, {"select\n", "select"}, {"struct\n", "struct"}, {"switch\n", "switch"}, {"type\n", "type"}, {"var\n", "var"}, {"foo//comment\n", "IDENT COMMENT ;"}, {"foo//comment", "IDENT COMMENT ;"}, {"foo/*comment*/\n", "IDENT COMMENT ;"}, {"foo/*\n*/", "IDENT COMMENT ;"}, {"foo/*comment*/ \n", "IDENT COMMENT ;"}, {"foo/*\n*/ ", "IDENT COMMENT ;"}, {"foo // comment\n", "IDENT COMMENT ;"}, {"foo // comment", "IDENT COMMENT ;"}, {"foo /*comment*/\n", "IDENT COMMENT ;"}, {"foo /*\n*/", "IDENT COMMENT ;"}, {"foo /* */ /* \n */ bar/**/\n", "IDENT COMMENT COMMENT ; IDENT COMMENT ;"}, {"foo /*0*/ /*1*/ /*2*/\n", "IDENT COMMENT COMMENT COMMENT ;"}, {"foo /*comment*/ \n", "IDENT COMMENT ;"}, {"foo /*0*/ /*1*/ /*2*/ \n", "IDENT COMMENT COMMENT COMMENT ;"}, {"foo /**/ /*-------------*/ /*----\n*/bar /* \n*/baa\n", "IDENT COMMENT COMMENT COMMENT ; IDENT COMMENT ; IDENT ;"}, {"foo /* an EOF terminates a line */", "IDENT COMMENT ;"}, {"foo /* an EOF terminates a line */ /*", "IDENT COMMENT COMMENT ;"}, {"foo /* an EOF terminates a line */ //", "IDENT COMMENT COMMENT ;"}, {"package main\n\nfunc main() {\n\tif {\n\t\treturn /* */ }\n}\n", "package IDENT ; func IDENT ( ) { if { return COMMENT } ; } ;"}, {"package main", "package IDENT ;"}, } func TestSemicolons(t *testing.T) { for _, test := range semicolonTests { input, want := test.input, test.want checkSemi(t, input, want, 0) checkSemi(t, input, want, ScanComments) // if the input ended in newlines, the input must tokenize the // same with or without those newlines for i := len(input) - 1; i >= 0 && input[i] == '\n'; i-- { checkSemi(t, input[0:i], want, 0) checkSemi(t, input[0:i], want, ScanComments) } } } type segment struct { srcline string // a line of source text filename string // filename for current token; error message for invalid line directives line, column int // line and column for current token; error position for invalid line directives } var segments = []segment{ // exactly one token per line since the test consumes one token per segment {" line1", "TestLineDirectives", 1, 3}, {"\nline2", "TestLineDirectives", 2, 1}, {"\nline3 //line File1.go:100", "TestLineDirectives", 3, 1}, // bad line comment, ignored {"\nline4", "TestLineDirectives", 4, 1}, {"\n//line File1.go:100\n line100", "File1.go", 100, 0}, {"\n//line \t :42\n line1", " \t ", 42, 0}, {"\n//line File2.go:200\n line200", "File2.go", 200, 0}, {"\n//line foo\t:42\n line42", "foo\t", 42, 0}, {"\n //line foo:42\n line43", "foo\t", 44, 0}, // bad line comment, ignored (use existing, prior filename) {"\n//line foo 42\n line44", "foo\t", 46, 0}, // bad line comment, ignored (use existing, prior filename) {"\n//line /bar:42\n line45", "/bar", 42, 0}, {"\n//line ./foo:42\n line46", "foo", 42, 0}, {"\n//line a/b/c/File1.go:100\n line100", "a/b/c/File1.go", 100, 0}, {"\n//line c:\\bar:42\n line200", "c:\\bar", 42, 0}, {"\n//line c:\\dir\\File1.go:100\n line201", "c:\\dir\\File1.go", 100, 0}, // tests for new line directive syntax {"\n//line :100\na1", "", 100, 0}, // missing filename means empty filename {"\n//line bar:100\nb1", "bar", 100, 0}, {"\n//line :100:10\nc1", "bar", 100, 10}, // missing filename means current filename {"\n//line foo:100:10\nd1", "foo", 100, 10}, {"\n/*line :100*/a2", "", 100, 0}, // missing filename means empty filename {"\n/*line bar:100*/b2", "bar", 100, 0}, {"\n/*line :100:10*/c2", "bar", 100, 10}, // missing filename means current filename {"\n/*line foo:100:10*/d2", "foo", 100, 10}, {"\n/*line foo:100:10*/ e2", "foo", 100, 14}, // line-directive relative column {"\n/*line foo:100:10*/\n\nf2", "foo", 102, 1}, // absolute column since on new line } var dirsegments = []segment{ // exactly one token per line since the test consumes one token per segment {" line1", "TestLineDir/TestLineDirectives", 1, 3}, {"\n//line File1.go:100\n line100", "TestLineDir/File1.go", 100, 0}, } var dirUnixSegments = []segment{ {"\n//line /bar:42\n line42", "/bar", 42, 0}, } var dirWindowsSegments = []segment{ {"\n//line c:\\bar:42\n line42", "c:\\bar", 42, 0}, } // Verify that line directives are interpreted correctly. func TestLineDirectives(t *testing.T) { testSegments(t, segments, "TestLineDirectives") testSegments(t, dirsegments, "TestLineDir/TestLineDirectives") if runtime.GOOS == "windows" { testSegments(t, dirWindowsSegments, "TestLineDir/TestLineDirectives") } else { testSegments(t, dirUnixSegments, "TestLineDir/TestLineDirectives") } } func testSegments(t *testing.T, segments []segment, filename string) { var src string for _, e := range segments { src += e.srcline } // verify scan var S Scanner file := fset.AddFile(filename, fset.Base(), len(src)) S.Init(file, []byte(src), func(pos token.Position, msg string) { t.Error(Error{pos, msg}) }, dontInsertSemis) for _, s := range segments { p, _, lit := S.Scan() pos := file.Position(p) checkPos(t, lit, p, token.Position{ Filename: s.filename, Offset: pos.Offset, Line: s.line, Column: s.column, }) } if S.ErrorCount != 0 { t.Errorf("got %d errors", S.ErrorCount) } } // The filename is used for the error message in these test cases. // The first line directive is valid and used to control the expected error line. var invalidSegments = []segment{ {"\n//line :1:1\n//line foo:42 extra text\ndummy", "invalid line number: 42 extra text", 1, 12}, {"\n//line :2:1\n//line foobar:\ndummy", "invalid line number: ", 2, 15}, {"\n//line :5:1\n//line :0\ndummy", "invalid line number: 0", 5, 9}, {"\n//line :10:1\n//line :1:0\ndummy", "invalid column number: 0", 10, 11}, {"\n//line :1:1\n//line :foo:0\ndummy", "invalid line number: 0", 1, 13}, // foo is considered part of the filename } // Verify that invalid line directives get the correct error message. func TestInvalidLineDirectives(t *testing.T) { // make source var src string for _, e := range invalidSegments { src += e.srcline } // verify scan var S Scanner var s segment // current segment file := fset.AddFile(filepath.Join("dir", "TestInvalidLineDirectives"), fset.Base(), len(src)) S.Init(file, []byte(src), func(pos token.Position, msg string) { if msg != s.filename { t.Errorf("got error %q; want %q", msg, s.filename) } if pos.Line != s.line || pos.Column != s.column { t.Errorf("got position %d:%d; want %d:%d", pos.Line, pos.Column, s.line, s.column) } }, dontInsertSemis) for _, s = range invalidSegments { S.Scan() } if S.ErrorCount != len(invalidSegments) { t.Errorf("got %d errors; want %d", S.ErrorCount, len(invalidSegments)) } } // Verify that initializing the same scanner more than once works correctly. func TestInit(t *testing.T) { var s Scanner // 1st init src1 := "if true { }" f1 := fset.AddFile("src1", fset.Base(), len(src1)) s.Init(f1, []byte(src1), nil, dontInsertSemis) if f1.Size() != len(src1) { t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1)) } s.Scan() // if s.Scan() // true _, tok, _ := s.Scan() // { if tok != token.LBRACE { t.Errorf("bad token: got %s, expected %s", tok, token.LBRACE) } // 2nd init src2 := "go true { ]" f2 := fset.AddFile("src2", fset.Base(), len(src2)) s.Init(f2, []byte(src2), nil, dontInsertSemis) if f2.Size() != len(src2) { t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2)) } _, tok, _ = s.Scan() // go if tok != token.GO { t.Errorf("bad token: got %s, expected %s", tok, token.GO) } if s.ErrorCount != 0 { t.Errorf("found %d errors", s.ErrorCount) } } func TestStdErrorHandler(t *testing.T) { const src = "@\n" + // illegal character, cause an error "@ @\n" + // two errors on the same line "//line File2:20\n" + "@\n" + // different file, but same line "//line File2:1\n" + "@ @\n" + // same file, decreasing line number "//line File1:1\n" + "@ @ @" // original file, line 1 again var list ErrorList eh := func(pos token.Position, msg string) { list.Add(pos, msg) } var s Scanner s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, dontInsertSemis) for { if _, tok, _ := s.Scan(); tok == token.EOF { break } } if len(list) != s.ErrorCount { t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount) } if len(list) != 9 { t.Errorf("found %d raw errors, expected 9", len(list)) PrintError(os.Stderr, list) } list.Sort() if len(list) != 9 { t.Errorf("found %d sorted errors, expected 9", len(list)) PrintError(os.Stderr, list) } list.RemoveMultiples() if len(list) != 4 { t.Errorf("found %d one-per-line errors, expected 4", len(list)) PrintError(os.Stderr, list) } } type errorCollector struct { cnt int // number of errors encountered msg string // last error message encountered pos token.Position // last error position encountered } func checkError(t *testing.T, src string, tok token.Token, pos int, lit, err string) { var s Scanner var h errorCollector eh := func(pos token.Position, msg string) { h.cnt++ h.msg = msg h.pos = pos } s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments|dontInsertSemis) _, tok0, lit0 := s.Scan() if tok0 != tok { t.Errorf("%q: got %s, expected %s", src, tok0, tok) } if tok0 != token.ILLEGAL && lit0 != lit { t.Errorf("%q: got literal %q, expected %q", src, lit0, lit) } cnt := 0 if err != "" { cnt = 1 } if h.cnt != cnt { t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt) } if h.msg != err { t.Errorf("%q: got msg %q, expected %q", src, h.msg, err) } if h.pos.Offset != pos { t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos) } } var errors = []struct { src string tok token.Token pos int lit string err string }{ {"\a", token.ILLEGAL, 0, "", "illegal character U+0007"}, {`#`, token.ILLEGAL, 0, "", "illegal character U+0023 '#'"}, {`…`, token.ILLEGAL, 0, "", "illegal character U+2026 '…'"}, {"..", token.PERIOD, 0, "", ""}, // two periods, not invalid token (issue #28112) {`' '`, token.CHAR, 0, `' '`, ""}, {`''`, token.CHAR, 0, `''`, "illegal rune literal"}, {`'12'`, token.CHAR, 0, `'12'`, "illegal rune literal"}, {`'123'`, token.CHAR, 0, `'123'`, "illegal rune literal"}, {`'\0'`, token.CHAR, 3, `'\0'`, "illegal character U+0027 ''' in escape sequence"}, {`'\07'`, token.CHAR, 4, `'\07'`, "illegal character U+0027 ''' in escape sequence"}, {`'\8'`, token.CHAR, 2, `'\8'`, "unknown escape sequence"}, {`'\08'`, token.CHAR, 3, `'\08'`, "illegal character U+0038 '8' in escape sequence"}, {`'\x'`, token.CHAR, 3, `'\x'`, "illegal character U+0027 ''' in escape sequence"}, {`'\x0'`, token.CHAR, 4, `'\x0'`, "illegal character U+0027 ''' in escape sequence"}, {`'\x0g'`, token.CHAR, 4, `'\x0g'`, "illegal character U+0067 'g' in escape sequence"}, {`'\u'`, token.CHAR, 3, `'\u'`, "illegal character U+0027 ''' in escape sequence"}, {`'\u0'`, token.CHAR, 4, `'\u0'`, "illegal character U+0027 ''' in escape sequence"}, {`'\u00'`, token.CHAR, 5, `'\u00'`, "illegal character U+0027 ''' in escape sequence"}, {`'\u000'`, token.CHAR, 6, `'\u000'`, "illegal character U+0027 ''' in escape sequence"}, {`'\u000`, token.CHAR, 6, `'\u000`, "escape sequence not terminated"}, {`'\u0000'`, token.CHAR, 0, `'\u0000'`, ""}, {`'\U'`, token.CHAR, 3, `'\U'`, "illegal character U+0027 ''' in escape sequence"}, {`'\U0'`, token.CHAR, 4, `'\U0'`, "illegal character U+0027 ''' in escape sequence"}, {`'\U00'`, token.CHAR, 5, `'\U00'`, "illegal character U+0027 ''' in escape sequence"}, {`'\U000'`, token.CHAR, 6, `'\U000'`, "illegal character U+0027 ''' in escape sequence"}, {`'\U0000'`, token.CHAR, 7, `'\U0000'`, "illegal character U+0027 ''' in escape sequence"}, {`'\U00000'`, token.CHAR, 8, `'\U00000'`, "illegal character U+0027 ''' in escape sequence"}, {`'\U000000'`, token.CHAR, 9, `'\U000000'`, "illegal character U+0027 ''' in escape sequence"}, {`'\U0000000'`, token.CHAR, 10, `'\U0000000'`, "illegal character U+0027 ''' in escape sequence"}, {`'\U0000000`, token.CHAR, 10, `'\U0000000`, "escape sequence not terminated"}, {`'\U00000000'`, token.CHAR, 0, `'\U00000000'`, ""}, {`'\Uffffffff'`, token.CHAR, 2, `'\Uffffffff'`, "escape sequence is invalid Unicode code point"}, {`'`, token.CHAR, 0, `'`, "rune literal not terminated"}, {`'\`, token.CHAR, 2, `'\`, "escape sequence not terminated"}, {"'\n", token.CHAR, 0, "'", "rune literal not terminated"}, {"'\n ", token.CHAR, 0, "'", "rune literal not terminated"}, {`""`, token.STRING, 0, `""`, ""}, {`"abc`, token.STRING, 0, `"abc`, "string literal not terminated"}, {"\"abc\n", token.STRING, 0, `"abc`, "string literal not terminated"}, {"\"abc\n ", token.STRING, 0, `"abc`, "string literal not terminated"}, {"``", token.STRING, 0, "``", ""}, {"`", token.STRING, 0, "`", "raw string literal not terminated"}, {"/**/", token.COMMENT, 0, "/**/", ""}, {"/*", token.COMMENT, 0, "/*", "comment not terminated"}, {"077", token.INT, 0, "077", ""}, {"078.", token.FLOAT, 0, "078.", ""}, {"07801234567.", token.FLOAT, 0, "07801234567.", ""}, {"078e0", token.FLOAT, 0, "078e0", ""}, {"0E", token.FLOAT, 2, "0E", "exponent has no digits"}, // issue 17621 {"078", token.INT, 2, "078", "invalid digit '8' in octal literal"}, {"07090000008", token.INT, 3, "07090000008", "invalid digit '9' in octal literal"}, {"0x", token.INT, 2, "0x", "hexadecimal literal has no digits"}, {"\"abc\x00def\"", token.STRING, 4, "\"abc\x00def\"", "illegal character NUL"}, {"\"abc\x80def\"", token.STRING, 4, "\"abc\x80def\"", "illegal UTF-8 encoding"}, {"\ufeff\ufeff", token.ILLEGAL, 3, "\ufeff\ufeff", "illegal byte order mark"}, // only first BOM is ignored {"//\ufeff", token.COMMENT, 2, "//\ufeff", "illegal byte order mark"}, // only first BOM is ignored {"'\ufeff" + `'`, token.CHAR, 1, "'\ufeff" + `'`, "illegal byte order mark"}, // only first BOM is ignored {`"` + "abc\ufeffdef" + `"`, token.STRING, 4, `"` + "abc\ufeffdef" + `"`, "illegal byte order mark"}, // only first BOM is ignored {"abc\x00def", token.IDENT, 3, "abc", "illegal character NUL"}, {"abc\x00", token.IDENT, 3, "abc", "illegal character NUL"}, {"“abc”", token.ILLEGAL, 0, "abc", `curly quotation mark '“' (use neutral '"')`}, } func TestScanErrors(t *testing.T) { for _, e := range errors { checkError(t, e.src, e.tok, e.pos, e.lit, e.err) } } // Verify that no comments show up as literal values when skipping comments. func TestIssue10213(t *testing.T) { const src = ` var ( A = 1 // foo ) var ( B = 2 // foo ) var C = 3 // foo var D = 4 // foo func anycode() { // foo } ` var s Scanner s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0) for { pos, tok, lit := s.Scan() class := tokenclass(tok) if lit != "" && class != keyword && class != literal && tok != token.SEMICOLON { t.Errorf("%s: tok = %s, lit = %q", fset.Position(pos), tok, lit) } if tok <= token.EOF { break } } } func TestIssue28112(t *testing.T) { const src = "... .. 0.. .." // make sure to have stand-alone ".." immediately before EOF to test EOF behavior tokens := []token.Token{token.ELLIPSIS, token.PERIOD, token.PERIOD, token.FLOAT, token.PERIOD, token.PERIOD, token.PERIOD, token.EOF} var s Scanner s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0) for _, want := range tokens { pos, got, lit := s.Scan() if got != want { t.Errorf("%s: got %s, want %s", fset.Position(pos), got, want) } // literals expect to have a (non-empty) literal string and we don't care about other tokens for this test if tokenclass(got) == literal && lit == "" { t.Errorf("%s: for %s got empty literal string", fset.Position(pos), got) } } } func BenchmarkScan(b *testing.B) { b.StopTimer() fset := token.NewFileSet() file := fset.AddFile("", fset.Base(), len(source)) var s Scanner b.StartTimer() for i := 0; i < b.N; i++ { s.Init(file, source, nil, ScanComments) for { _, tok, _ := s.Scan() if tok == token.EOF { break } } } } func BenchmarkScanFiles(b *testing.B) { // Scan a few arbitrary large files, and one small one, to provide some // variety in benchmarks. for _, p := range []string{ "go/types/expr.go", "go/parser/parser.go", "net/http/server.go", "go/scanner/errors.go", } { b.Run(p, func(b *testing.B) { b.StopTimer() filename := filepath.Join("..", "..", filepath.FromSlash(p)) src, err := os.ReadFile(filename) if err != nil { b.Fatal(err) } fset := token.NewFileSet() file := fset.AddFile(filename, fset.Base(), len(src)) b.SetBytes(int64(len(src))) var s Scanner b.StartTimer() for i := 0; i < b.N; i++ { s.Init(file, src, nil, ScanComments) for { _, tok, _ := s.Scan() if tok == token.EOF { break } } } }) } } func TestNumbers(t *testing.T) { for _, test := range []struct { tok token.Token src, tokens, err string }{ // binaries {token.INT, "0b0", "0b0", ""}, {token.INT, "0b1010", "0b1010", ""}, {token.INT, "0B1110", "0B1110", ""}, {token.INT, "0b", "0b", "binary literal has no digits"}, {token.INT, "0b0190", "0b0190", "invalid digit '9' in binary literal"}, {token.INT, "0b01a0", "0b01 a0", ""}, // only accept 0-9 {token.FLOAT, "0b.", "0b.", "invalid radix point in binary literal"}, {token.FLOAT, "0b.1", "0b.1", "invalid radix point in binary literal"}, {token.FLOAT, "0b1.0", "0b1.0", "invalid radix point in binary literal"}, {token.FLOAT, "0b1e10", "0b1e10", "'e' exponent requires decimal mantissa"}, {token.FLOAT, "0b1P-1", "0b1P-1", "'P' exponent requires hexadecimal mantissa"}, {token.IMAG, "0b10i", "0b10i", ""}, {token.IMAG, "0b10.0i", "0b10.0i", "invalid radix point in binary literal"}, // octals {token.INT, "0o0", "0o0", ""}, {token.INT, "0o1234", "0o1234", ""}, {token.INT, "0O1234", "0O1234", ""}, {token.INT, "0o", "0o", "octal literal has no digits"}, {token.INT, "0o8123", "0o8123", "invalid digit '8' in octal literal"}, {token.INT, "0o1293", "0o1293", "invalid digit '9' in octal literal"}, {token.INT, "0o12a3", "0o12 a3", ""}, // only accept 0-9 {token.FLOAT, "0o.", "0o.", "invalid radix point in octal literal"}, {token.FLOAT, "0o.2", "0o.2", "invalid radix point in octal literal"}, {token.FLOAT, "0o1.2", "0o1.2", "invalid radix point in octal literal"}, {token.FLOAT, "0o1E+2", "0o1E+2", "'E' exponent requires decimal mantissa"}, {token.FLOAT, "0o1p10", "0o1p10", "'p' exponent requires hexadecimal mantissa"}, {token.IMAG, "0o10i", "0o10i", ""}, {token.IMAG, "0o10e0i", "0o10e0i", "'e' exponent requires decimal mantissa"}, // 0-octals {token.INT, "0", "0", ""}, {token.INT, "0123", "0123", ""}, {token.INT, "08123", "08123", "invalid digit '8' in octal literal"}, {token.INT, "01293", "01293", "invalid digit '9' in octal literal"}, {token.INT, "0F.", "0 F .", ""}, // only accept 0-9 {token.INT, "0123F.", "0123 F .", ""}, {token.INT, "0123456x", "0123456 x", ""}, // decimals {token.INT, "1", "1", ""}, {token.INT, "1234", "1234", ""}, {token.INT, "1f", "1 f", ""}, // only accept 0-9 {token.IMAG, "0i", "0i", ""}, {token.IMAG, "0678i", "0678i", ""}, // decimal floats {token.FLOAT, "0.", "0.", ""}, {token.FLOAT, "123.", "123.", ""}, {token.FLOAT, "0123.", "0123.", ""}, {token.FLOAT, ".0", ".0", ""}, {token.FLOAT, ".123", ".123", ""}, {token.FLOAT, ".0123", ".0123", ""}, {token.FLOAT, "0.0", "0.0", ""}, {token.FLOAT, "123.123", "123.123", ""}, {token.FLOAT, "0123.0123", "0123.0123", ""}, {token.FLOAT, "0e0", "0e0", ""}, {token.FLOAT, "123e+0", "123e+0", ""}, {token.FLOAT, "0123E-1", "0123E-1", ""}, {token.FLOAT, "0.e+1", "0.e+1", ""}, {token.FLOAT, "123.E-10", "123.E-10", ""}, {token.FLOAT, "0123.e123", "0123.e123", ""}, {token.FLOAT, ".0e-1", ".0e-1", ""}, {token.FLOAT, ".123E+10", ".123E+10", ""}, {token.FLOAT, ".0123E123", ".0123E123", ""}, {token.FLOAT, "0.0e1", "0.0e1", ""}, {token.FLOAT, "123.123E-10", "123.123E-10", ""}, {token.FLOAT, "0123.0123e+456", "0123.0123e+456", ""}, {token.FLOAT, "0e", "0e", "exponent has no digits"}, {token.FLOAT, "0E+", "0E+", "exponent has no digits"}, {token.FLOAT, "1e+f", "1e+ f", "exponent has no digits"}, {token.FLOAT, "0p0", "0p0", "'p' exponent requires hexadecimal mantissa"}, {token.FLOAT, "1.0P-1", "1.0P-1", "'P' exponent requires hexadecimal mantissa"}, {token.IMAG, "0.i", "0.i", ""}, {token.IMAG, ".123i", ".123i", ""}, {token.IMAG, "123.123i", "123.123i", ""}, {token.IMAG, "123e+0i", "123e+0i", ""}, {token.IMAG, "123.E-10i", "123.E-10i", ""}, {token.IMAG, ".123E+10i", ".123E+10i", ""}, // hexadecimals {token.INT, "0x0", "0x0", ""}, {token.INT, "0x1234", "0x1234", ""}, {token.INT, "0xcafef00d", "0xcafef00d", ""}, {token.INT, "0XCAFEF00D", "0XCAFEF00D", ""}, {token.INT, "0x", "0x", "hexadecimal literal has no digits"}, {token.INT, "0x1g", "0x1 g", ""}, {token.IMAG, "0xf00i", "0xf00i", ""}, // hexadecimal floats {token.FLOAT, "0x0p0", "0x0p0", ""}, {token.FLOAT, "0x12efp-123", "0x12efp-123", ""}, {token.FLOAT, "0xABCD.p+0", "0xABCD.p+0", ""}, {token.FLOAT, "0x.0189P-0", "0x.0189P-0", ""}, {token.FLOAT, "0x1.ffffp+1023", "0x1.ffffp+1023", ""}, {token.FLOAT, "0x.", "0x.", "hexadecimal literal has no digits"}, {token.FLOAT, "0x0.", "0x0.", "hexadecimal mantissa requires a 'p' exponent"}, {token.FLOAT, "0x.0", "0x.0", "hexadecimal mantissa requires a 'p' exponent"}, {token.FLOAT, "0x1.1", "0x1.1", "hexadecimal mantissa requires a 'p' exponent"}, {token.FLOAT, "0x1.1e0", "0x1.1e0", "hexadecimal mantissa requires a 'p' exponent"}, {token.FLOAT, "0x1.2gp1a", "0x1.2 gp1a", "hexadecimal mantissa requires a 'p' exponent"}, {token.FLOAT, "0x0p", "0x0p", "exponent has no digits"}, {token.FLOAT, "0xeP-", "0xeP-", "exponent has no digits"}, {token.FLOAT, "0x1234PAB", "0x1234P AB", "exponent has no digits"}, {token.FLOAT, "0x1.2p1a", "0x1.2p1 a", ""}, {token.IMAG, "0xf00.bap+12i", "0xf00.bap+12i", ""}, // separators {token.INT, "0b_1000_0001", "0b_1000_0001", ""}, {token.INT, "0o_600", "0o_600", ""}, {token.INT, "0_466", "0_466", ""}, {token.INT, "1_000", "1_000", ""}, {token.FLOAT, "1_000.000_1", "1_000.000_1", ""}, {token.IMAG, "10e+1_2_3i", "10e+1_2_3i", ""}, {token.INT, "0x_f00d", "0x_f00d", ""}, {token.FLOAT, "0x_f00d.0p1_2", "0x_f00d.0p1_2", ""}, {token.INT, "0b__1000", "0b__1000", "'_' must separate successive digits"}, {token.INT, "0o60___0", "0o60___0", "'_' must separate successive digits"}, {token.INT, "0466_", "0466_", "'_' must separate successive digits"}, {token.FLOAT, "1_.", "1_.", "'_' must separate successive digits"}, {token.FLOAT, "0._1", "0._1", "'_' must separate successive digits"}, {token.FLOAT, "2.7_e0", "2.7_e0", "'_' must separate successive digits"}, {token.IMAG, "10e+12_i", "10e+12_i", "'_' must separate successive digits"}, {token.INT, "0x___0", "0x___0", "'_' must separate successive digits"}, {token.FLOAT, "0x1.0_p0", "0x1.0_p0", "'_' must separate successive digits"}, } { var s Scanner var err string s.Init(fset.AddFile("", fset.Base(), len(test.src)), []byte(test.src), func(_ token.Position, msg string) { if err == "" { err = msg } }, 0) for i, want := range strings.Split(test.tokens, " ") { err = "" _, tok, lit := s.Scan() // compute lit where for tokens where lit is not defined switch tok { case token.PERIOD: lit = "." case token.ADD: lit = "+" case token.SUB: lit = "-" } if i == 0 { if tok != test.tok { t.Errorf("%q: got token %s; want %s", test.src, tok, test.tok) } if err != test.err { t.Errorf("%q: got error %q; want %q", test.src, err, test.err) } } if lit != want { t.Errorf("%q: got literal %q (%s); want %s", test.src, lit, tok, want) } } // make sure we read all _, tok, _ := s.Scan() if tok == token.SEMICOLON { _, tok, _ = s.Scan() } if tok != token.EOF { t.Errorf("%q: got %s; want EOF", test.src, tok) } } }