1 /* 2 * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package com.sun.javafx.css.parser; 27 28 import java.io.CharArrayReader; 29 import java.io.Reader; 30 import java.util.ArrayList; 31 import java.util.Arrays; 32 import java.util.Collections; 33 import java.util.List; 34 import static org.junit.Assert.*; 35 import static org.junit.Assert.assertEquals; 36 37 import org.junit.Test; 38 39 40 public class CSSLexerTest { 41 42 public CSSLexerTest() { 43 } 44 45 private void checkTokens(List<Token> resultTokens, Token... expectedTokens) 46 throws org.junit.ComparisonFailure { 47 48 if (expectedTokens.length != resultTokens.size()) { 49 throw new org.junit.ComparisonFailure( 50 "lengths do not match", 51 Arrays.toString(expectedTokens), 52 resultTokens.toString() 53 ); 54 } 55 56 for (int n = 0; n<expectedTokens.length; n++) { 57 58 final Token result = resultTokens.get(n); 59 final Token expected = expectedTokens[n]; 60 61 if (expected.getType() != result.getType()) { 62 throw new org.junit.ComparisonFailure( 63 "token " + n + " types do not match", 64 Arrays.toString(expectedTokens), 65 resultTokens.toString() 66 ); 67 } 68 69 final String expectedText = expected.getText(); 70 final String resultText = result.getText(); 71 72 if (expectedText == null ? resultText != null : !expectedText.equals(resultText)) { 73 throw new org.junit.ComparisonFailure( 74 "token " + n + " text does not match", 75 Arrays.toString(expectedTokens), 76 resultTokens.toString() 77 ); 78 } 79 } 80 } 81 82 List<Token> getTokens(String string) { 83 84 Reader reader = new CharArrayReader(string.toCharArray()); 85 final CSSLexer lexer = CSSLexer.getInstance(); 86 lexer.setReader(reader); 87 88 final List<Token> tokens = new ArrayList<Token>(); 89 90 Token token = null; 91 do { 92 token = lexer.nextToken(); 93 tokens.add(token); 94 } while (token.getType() != Token.EOF); 95 96 return Collections.unmodifiableList(tokens); 97 } 98 99 private void lexDigitsWithUnits(String units, int type) throws org.junit.ComparisonFailure { 100 101 checkTokens(getTokens("123"+units), new Token(type, "123"+units), Token.EOF_TOKEN); 102 checkTokens(getTokens("123.45"+units), new Token(type, "123.45"+units), Token.EOF_TOKEN); 103 checkTokens(getTokens(".45"+units), new Token(type, ".45"+units), Token.EOF_TOKEN); 104 checkTokens(getTokens("-123"+units), new Token(type, "-123"+units), Token.EOF_TOKEN); 105 checkTokens(getTokens("-.45"+units), new Token(type, "-.45"+units), Token.EOF_TOKEN); 106 checkTokens(getTokens("+123"+units), new Token(type, "+123"+units), Token.EOF_TOKEN); 107 checkTokens(getTokens("+.45"+units), new Token(type, "+.45"+units), Token.EOF_TOKEN); 108 } 109 110 @Test 111 public void testLexValidDigits() { 112 lexDigitsWithUnits("", CSSLexer.NUMBER); 113 } 114 115 @Test 116 public void testLexValidDigitsWithCM() { 117 lexDigitsWithUnits("cm", CSSLexer.CM); 118 // case should be ignored 119 lexDigitsWithUnits("cM", CSSLexer.CM); 120 } 121 @Test 122 public void testLexValidDigitsWithDEG() { 123 lexDigitsWithUnits("deg", CSSLexer.DEG); 124 // case should be ignored 125 lexDigitsWithUnits("dEg", CSSLexer.DEG); 126 } 127 @Test 128 public void testLexValidDigitsWithEM() { 129 lexDigitsWithUnits("em", CSSLexer.EMS); 130 // case should be ignored 131 lexDigitsWithUnits("Em", CSSLexer.EMS); 132 } 133 @Test 134 public void testLexValidDigitsWithEX() { 135 lexDigitsWithUnits("ex", CSSLexer.EXS); 136 // case should be ignored 137 lexDigitsWithUnits("Ex", CSSLexer.EXS); 138 } 139 @Test 140 public void testLexValidDigitsWithGRAD() { 141 lexDigitsWithUnits("grad", CSSLexer.GRAD); 142 // case should be ignored 143 lexDigitsWithUnits("gRad", CSSLexer.GRAD); 144 } 145 @Test 146 public void testLexValidDigitsWithIN() { 147 lexDigitsWithUnits("in", CSSLexer.IN); 148 // case should be ignored 149 lexDigitsWithUnits("In", CSSLexer.IN); 150 } 151 @Test 152 public void testLexValidDigitsWithMM() { 153 lexDigitsWithUnits("mm", CSSLexer.MM); 154 // case should be ignored 155 lexDigitsWithUnits("mM", CSSLexer.MM); 156 } 157 @Test 158 public void testLexValidDigitsWithPC() { 159 lexDigitsWithUnits("pc", CSSLexer.PC); 160 // case should be ignored 161 lexDigitsWithUnits("Pc", CSSLexer.PC); 162 } 163 @Test 164 public void testLexValidDigitsWithPT() { 165 lexDigitsWithUnits("pt", CSSLexer.PT); 166 // case should be ignored 167 lexDigitsWithUnits("PT", CSSLexer.PT); 168 } 169 @Test 170 public void testLexValidDigitsWithPX() { 171 lexDigitsWithUnits("px", CSSLexer.PX); 172 // case should be ignored 173 lexDigitsWithUnits("Px", CSSLexer.PX); 174 } 175 @Test 176 public void testLexValidDigitsWithRAD() { 177 lexDigitsWithUnits("rad", CSSLexer.RAD); 178 // case should be ignored 179 lexDigitsWithUnits("RaD", CSSLexer.RAD); 180 } 181 @Test 182 public void testLexValidDigitsWithTURN() { 183 lexDigitsWithUnits("turn", CSSLexer.TURN); 184 // case should be ignored 185 lexDigitsWithUnits("TurN", CSSLexer.TURN); 186 } 187 @Test 188 public void testLexValidDigitsWithS() { 189 lexDigitsWithUnits("s", CSSLexer.SECONDS); 190 // case should be ignored 191 lexDigitsWithUnits("S", CSSLexer.SECONDS); 192 } 193 @Test 194 public void testLexValidDigitsWithMS() { 195 lexDigitsWithUnits("ms", CSSLexer.MS); 196 // case should be ignored 197 lexDigitsWithUnits("mS", CSSLexer.MS); 198 } 199 @Test 200 public void testLexValidDigitsWithPCT() { 201 lexDigitsWithUnits("%", CSSLexer.PERCENTAGE); 202 } 203 @Test 204 public void testLexValidDigitsWithBadUnits() { 205 lexDigitsWithUnits("xyzzy", Token.INVALID); 206 } 207 @Test 208 public void textLexValidDigitsValidDigits() { 209 checkTokens( 210 getTokens("foo: 10pt; bar: 20%;"), 211 new Token(CSSLexer.IDENT, "foo"), 212 new Token(CSSLexer.COLON, ":"), 213 new Token(CSSLexer.WS, " "), 214 new Token(CSSLexer.PT, "10pt"), 215 new Token(CSSLexer.SEMI, ";"), 216 new Token(CSSLexer.WS, " "), 217 new Token(CSSLexer.IDENT, "bar"), 218 new Token(CSSLexer.COLON, ":"), 219 new Token(CSSLexer.WS, " "), 220 new Token(CSSLexer.PERCENTAGE, "20%"), 221 new Token(CSSLexer.SEMI, ";"), 222 Token.EOF_TOKEN 223 ); 224 } 225 @Test 226 public void textLexInvalidDigitsValidDigits() { 227 checkTokens( 228 getTokens("foo: 10pz; bar: 20%;"), 229 new Token(CSSLexer.IDENT, "foo"), 230 new Token(CSSLexer.COLON, ":"), 231 new Token(CSSLexer.WS, " "), 232 new Token(Token.INVALID, "10pz"), 233 new Token(CSSLexer.SEMI, ";"), 234 new Token(CSSLexer.WS, " "), 235 new Token(CSSLexer.IDENT, "bar"), 236 new Token(CSSLexer.COLON, ":"), 237 new Token(CSSLexer.WS, " "), 238 new Token(CSSLexer.PERCENTAGE, "20%"), 239 new Token(CSSLexer.SEMI, ";"), 240 Token.EOF_TOKEN 241 ); 242 } 243 @Test 244 public void textLexValidDigitsBangImportant() { 245 checkTokens( 246 getTokens("foo: 10pt !important;"), 247 new Token(CSSLexer.IDENT, "foo"), 248 new Token(CSSLexer.COLON, ":"), 249 new Token(CSSLexer.WS, " "), 250 new Token(CSSLexer.PT, "10pt"), 251 new Token(CSSLexer.WS, " "), 252 new Token(CSSLexer.IMPORTANT_SYM, "!important"), 253 new Token(CSSLexer.SEMI, ";"), 254 Token.EOF_TOKEN 255 ); 256 } 257 @Test 258 public void textLexInvalidDigitsBangImportant() { 259 checkTokens( 260 getTokens("foo: 10pz !important;"), 261 new Token(CSSLexer.IDENT, "foo"), 262 new Token(CSSLexer.COLON, ":"), 263 new Token(CSSLexer.WS, " "), 264 new Token(Token.INVALID, "10pz"), 265 new Token(CSSLexer.WS, " "), 266 new Token(CSSLexer.IMPORTANT_SYM, "!important"), 267 new Token(CSSLexer.SEMI, ";"), 268 Token.EOF_TOKEN 269 ); 270 } 271 @Test 272 public void textLexValidDigitsInSequence() { 273 checkTokens( 274 getTokens("-1 0px 1pt .5em;"), 275 new Token(CSSLexer.NUMBER, "-1"), 276 new Token(CSSLexer.WS, " "), 277 new Token(CSSLexer.PX, "0px"), 278 new Token(CSSLexer.WS, " "), 279 new Token(CSSLexer.PT, "1pt"), 280 new Token(CSSLexer.WS, " "), 281 new Token(CSSLexer.EMS, ".5em"), 282 new Token(CSSLexer.SEMI, ";"), 283 Token.EOF_TOKEN 284 ); 285 } 286 @Test 287 public void textLexInvalidDigitsInSequence() { 288 checkTokens( 289 getTokens("-1 0px 1pz .5em;"), 290 new Token(CSSLexer.NUMBER, "-1"), 291 new Token(CSSLexer.WS, " "), 292 new Token(CSSLexer.PX, "0px"), 293 new Token(CSSLexer.WS, " "), 294 new Token(Token.INVALID, "1pz"), 295 new Token(CSSLexer.WS, " "), 296 new Token(CSSLexer.EMS, ".5em"), 297 new Token(CSSLexer.SEMI, ";"), 298 Token.EOF_TOKEN 299 ); 300 } 301 302 @Test 303 public void testTokenOffset() { 304 305 String str = "a: b;"; 306 // [?][0] = line 307 // [?][1] = offset 308 Token[] expected = { 309 new Token(CSSLexer.IDENT, "a", 1, 0), 310 new Token(CSSLexer.COLON, ":", 1, 1), 311 new Token(CSSLexer.WS, " ", 1, 2), 312 new Token(CSSLexer.IDENT, "b", 1, 3), 313 new Token(CSSLexer.SEMI, ";", 1, 4), 314 Token.EOF_TOKEN 315 }; 316 317 List<Token> tlist = getTokens(str); 318 checkTokens(tlist, expected); 319 320 for(int n=0; n<tlist.size(); n++) { 321 Token tok = tlist.get(n); 322 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 323 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 324 } 325 326 } 327 328 @Test 329 public void testTokenLineAndOffsetWithCR() { 330 331 String str = "a: b;\rc: d;"; 332 // [?][0] = line 333 // [?][1] = offset 334 Token[] expected = { 335 new Token(CSSLexer.IDENT, "a", 1, 0), 336 new Token(CSSLexer.COLON, ":", 1, 1), 337 new Token(CSSLexer.WS, " ", 1, 2), 338 new Token(CSSLexer.IDENT, "b", 1, 3), 339 new Token(CSSLexer.SEMI, ";", 1, 4), 340 new Token(CSSLexer.NL, "\\r", 1, 5), 341 new Token(CSSLexer.IDENT, "c", 2, 0), 342 new Token(CSSLexer.COLON, ":", 2, 1), 343 new Token(CSSLexer.WS, " ", 2, 2), 344 new Token(CSSLexer.IDENT, "d", 2, 3), 345 new Token(CSSLexer.SEMI, ";", 2, 4), 346 Token.EOF_TOKEN 347 }; 348 349 List<Token> tlist = getTokens(str); 350 checkTokens(tlist, expected); 351 352 for(int n=0; n<tlist.size(); n++) { 353 Token tok = tlist.get(n); 354 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 355 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 356 } 357 358 } 359 360 @Test 361 public void testTokenLineAndOffsetWithLF() { 362 363 String str = "a: b;\nc: d;"; 364 // [?][0] = line 365 // [?][1] = offset 366 Token[] expected = { 367 new Token(CSSLexer.IDENT, "a", 1, 0), 368 new Token(CSSLexer.COLON, ":", 1, 1), 369 new Token(CSSLexer.WS, " ", 1, 2), 370 new Token(CSSLexer.IDENT, "b", 1, 3), 371 new Token(CSSLexer.SEMI, ";", 1, 4), 372 new Token(CSSLexer.NL, "\\n", 1, 5), 373 new Token(CSSLexer.IDENT, "c", 2, 0), 374 new Token(CSSLexer.COLON, ":", 2, 1), 375 new Token(CSSLexer.WS, " ", 2, 2), 376 new Token(CSSLexer.IDENT, "d", 2, 3), 377 new Token(CSSLexer.SEMI, ";", 2, 4), 378 Token.EOF_TOKEN 379 }; 380 381 List<Token> tlist = getTokens(str); 382 checkTokens(tlist, expected); 383 384 for(int n=0; n<tlist.size(); n++) { 385 Token tok = tlist.get(n); 386 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 387 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 388 } 389 390 } 391 392 @Test 393 public void testTokenLineAndOffsetWithCRLF() { 394 // 012345 01234 395 String str = "a: b;\r\nc: d;"; 396 // [?][0] = line 397 // [?][1] = offset 398 Token[] expected = { 399 new Token(CSSLexer.IDENT, "a", 1, 0), 400 new Token(CSSLexer.COLON, ":", 1, 1), 401 new Token(CSSLexer.WS, " ", 1, 2), 402 new Token(CSSLexer.IDENT, "b", 1, 3), 403 new Token(CSSLexer.SEMI, ";", 1, 4), 404 new Token(CSSLexer.NL, "\\r\\n", 1, 5), 405 new Token(CSSLexer.IDENT, "c", 2, 0), 406 new Token(CSSLexer.COLON, ":", 2, 1), 407 new Token(CSSLexer.WS, " ", 2, 2), 408 new Token(CSSLexer.IDENT, "d", 2, 3), 409 new Token(CSSLexer.SEMI, ";", 2, 4), 410 Token.EOF_TOKEN 411 }; 412 413 List<Token> tlist = getTokens(str); 414 checkTokens(tlist, expected); 415 416 for(int n=0; n<tlist.size(); n++) { 417 Token tok = tlist.get(n); 418 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 419 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 420 } 421 422 } 423 424 @Test 425 public void testTokenOffsetWithEmbeddedComment() { 426 // 0123456789012345 427 String str = "a: /*comment*/b;"; 428 // [?][0] = line 429 // [?][1] = offset 430 Token[] expected = { 431 new Token(CSSLexer.IDENT, "a", 1, 0), 432 new Token(CSSLexer.COLON, ":", 1, 1), 433 new Token(CSSLexer.WS, " ", 1, 2), 434 new Token(CSSLexer.IDENT, "b", 1, 14), 435 new Token(CSSLexer.SEMI, ";", 1, 15), 436 Token.EOF_TOKEN 437 }; 438 439 List<Token> tlist = getTokens(str); 440 checkTokens(tlist, expected); 441 442 for(int n=0; n<tlist.size(); n++) { 443 Token tok = tlist.get(n); 444 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 445 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 446 } 447 } 448 449 @Test 450 public void testTokenLineAndOffsetWithLeadingComment() { 451 // 012345678901 01234 452 String str = "/*comment*/\na: b;"; 453 // [?][0] = line 454 // [?][1] = offset 455 Token[] expected = { 456 new Token(CSSLexer.NL, "\\n", 1, 11), 457 new Token(CSSLexer.IDENT, "a", 2, 0), 458 new Token(CSSLexer.COLON, ":", 2, 1), 459 new Token(CSSLexer.WS, " ", 2, 2), 460 new Token(CSSLexer.IDENT, "b", 2, 3), 461 new Token(CSSLexer.SEMI, ";", 2, 4), 462 Token.EOF_TOKEN 463 }; 464 465 List<Token> tlist = getTokens(str); 466 checkTokens(tlist, expected); 467 468 for(int n=0; n<tlist.size(); n++) { 469 Token tok = tlist.get(n); 470 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 471 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 472 } 473 } 474 475 @Test 476 public void testTokenOffsetWithFunction() { 477 // 01234567890 478 String str = "a: b(arg);"; 479 // [?][0] = line 480 // [?][1] = offset 481 Token[] expected = { 482 new Token(CSSLexer.IDENT, "a", 1, 0), 483 new Token(CSSLexer.COLON, ":", 1, 1), 484 new Token(CSSLexer.WS, " ", 1, 2), 485 new Token(CSSLexer.IDENT, "b", 1, 3), 486 new Token(CSSLexer.LPAREN, "(", 1, 4), 487 new Token(CSSLexer.IDENT, "arg", 1, 5), 488 new Token(CSSLexer.RPAREN, ")", 1, 8), 489 new Token(CSSLexer.SEMI, ";", 1, 9), 490 Token.EOF_TOKEN 491 }; 492 493 List<Token> tlist = getTokens(str); 494 checkTokens(tlist, expected); 495 496 for(int n=0; n<tlist.size(); n++) { 497 Token tok = tlist.get(n); 498 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 499 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 500 } 501 } 502 503 @Test 504 public void testTokenOffsetWithHash() { 505 // 01234567890 506 String str = "a: #012345;"; 507 // [?][0] = line 508 // [?][1] = offset 509 Token[] expected = { 510 new Token(CSSLexer.IDENT, "a", 1, 0), 511 new Token(CSSLexer.COLON, ":", 1, 1), 512 new Token(CSSLexer.WS, " ", 1, 2), 513 new Token(CSSLexer.HASH, "#012345", 1, 3), 514 new Token(CSSLexer.SEMI, ";", 1, 10), 515 Token.EOF_TOKEN 516 }; 517 518 List<Token> tlist = getTokens(str); 519 checkTokens(tlist, expected); 520 521 for(int n=0; n<tlist.size(); n++) { 522 Token tok = tlist.get(n); 523 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 524 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 525 } 526 } 527 528 @Test 529 public void testTokenOffsetWithDigits() { 530 // 01234567890 531 String str = "a: 123.45;"; 532 // [?][0] = line 533 // [?][1] = offset 534 Token[] expected = { 535 new Token(CSSLexer.IDENT, "a", 1, 0), 536 new Token(CSSLexer.COLON, ":", 1, 1), 537 new Token(CSSLexer.WS, " ", 1, 2), 538 new Token(CSSLexer.NUMBER, "123.45", 1, 3), 539 new Token(CSSLexer.SEMI, ";", 1, 9), 540 Token.EOF_TOKEN 541 }; 542 543 List<Token> tlist = getTokens(str); 544 checkTokens(tlist, expected); 545 546 for(int n=0; n<tlist.size(); n++) { 547 Token tok = tlist.get(n); 548 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 549 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 550 } 551 } 552 553 @Test 554 public void testTokenOffsetWithBangImportant() { 555 // 0123456789012345 556 String str = "a: b !important;"; 557 // [?][0] = line 558 // [?][1] = offset 559 Token[] expected = { 560 new Token(CSSLexer.IDENT, "a", 1, 0), 561 new Token(CSSLexer.COLON, ":", 1, 1), 562 new Token(CSSLexer.WS, " ", 1, 2), 563 new Token(CSSLexer.IDENT, "b", 1, 3), 564 new Token(CSSLexer.WS, " ", 1, 4), 565 new Token(CSSLexer.IMPORTANT_SYM, "!important", 1, 5), 566 new Token(CSSLexer.SEMI, ";", 1, 15), 567 Token.EOF_TOKEN 568 }; 569 570 List<Token> tlist = getTokens(str); 571 checkTokens(tlist, expected); 572 573 for(int n=0; n<tlist.size(); n++) { 574 Token tok = tlist.get(n); 575 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 576 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 577 } 578 } 579 580 @Test 581 public void testTokenOffsetWithSkip() { 582 // 0123456789012345 583 String str = "a: b !imporzant;"; 584 // [?][0] = line 585 // [?][1] = offset 586 Token[] expected = { 587 new Token(CSSLexer.IDENT, "a", 1, 0), 588 new Token(CSSLexer.COLON, ":", 1, 1), 589 new Token(CSSLexer.WS, " ", 1, 2), 590 new Token(CSSLexer.IDENT, "b", 1, 3), 591 new Token(CSSLexer.WS, " ", 1, 4), 592 new Token(Token.SKIP, "!imporz", 1, 5), 593 new Token(CSSLexer.SEMI, ";", 1, 15), 594 Token.EOF_TOKEN 595 }; 596 597 List<Token> tlist = getTokens(str); 598 checkTokens(tlist, expected); 599 600 for(int n=0; n<tlist.size(); n++) { 601 Token tok = tlist.get(n); 602 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 603 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 604 } 605 } 606 607 @Test 608 public void testTokenOffsetWithInvalid() { 609 // 0123456789012345 610 String str = "a: 1pz;"; 611 // [?][0] = line 612 // [?][1] = offset 613 Token[] expected = { 614 new Token(CSSLexer.IDENT, "a", 1, 0), 615 new Token(CSSLexer.COLON, ":", 1, 1), 616 new Token(CSSLexer.WS, " ", 1, 2), 617 new Token(Token.INVALID, "1pz", 1, 3), 618 new Token(CSSLexer.SEMI, ";", 1, 6), 619 Token.EOF_TOKEN 620 }; 621 622 List<Token> tlist = getTokens(str); 623 checkTokens(tlist, expected); 624 625 for(int n=0; n<tlist.size(); n++) { 626 Token tok = tlist.get(n); 627 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 628 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 629 } 630 } 631 632 @Test 633 public void testTokenLineAndOffsetMoreFully() { 634 // 1 2 3 4 635 // 012345678901 0123456789012345 012345678 0 636 String str = "/*comment*/\n*.foo#bar:baz {\n\ta: 1em;\n}"; 637 // [?][0] = line 638 // [?][1] = offset 639 Token[] expected = { 640 new Token(CSSLexer.NL, "\\n", 1, 11), 641 new Token(CSSLexer.STAR, "*", 2, 0), 642 new Token(CSSLexer.DOT, ".", 2, 1), 643 new Token(CSSLexer.IDENT, "foo", 2, 2), 644 new Token(CSSLexer.HASH, "#bar", 2, 5), 645 new Token(CSSLexer.COLON, ":", 2, 9), 646 new Token(CSSLexer.IDENT, "baz", 2, 10), 647 new Token(CSSLexer.WS, " ", 2, 13), 648 new Token(CSSLexer.LBRACE, "{", 2, 14), 649 new Token(CSSLexer.NL, "\\n", 2, 15), 650 new Token(CSSLexer.WS, "\t", 3, 0), 651 new Token(CSSLexer.IDENT, "a", 3, 1), 652 new Token(CSSLexer.COLON, ":", 3, 2), 653 new Token(CSSLexer.WS, " ", 3, 3), 654 new Token(CSSLexer.EMS, "1em", 3, 4), 655 new Token(CSSLexer.SEMI, ";", 3, 7), 656 new Token(CSSLexer.NL, "\\n", 3, 8), 657 new Token(CSSLexer.RBRACE, "}", 4, 0), 658 Token.EOF_TOKEN 659 }; 660 661 List<Token> tlist = getTokens(str); 662 checkTokens(tlist, expected); 663 664 for(int n=0; n<tlist.size(); n++) { 665 Token tok = tlist.get(n); 666 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 667 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 668 } 669 } 670 671 @Test 672 public void testScanUrl() { 673 674 // 1 2 3 4 675 // 01234567890101234567890123450123456780123456789 676 String str = "url(http://foo.bar.com/fonts/serif/fubar.ttf)"; 677 Token[] expected = new Token[]{ 678 new Token(CSSLexer.URL, "http://foo.bar.com/fonts/serif/fubar.ttf", 1, 0), 679 Token.EOF_TOKEN 680 }; 681 682 List<Token> tlist = getTokens(str); 683 checkTokens(tlist, expected); 684 685 for(int n=0; n<tlist.size(); n++) { 686 Token tok = tlist.get(n); 687 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 688 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 689 } 690 691 } 692 693 @Test 694 public void testScanUrlWithWhiteSpace() { 695 696 // 1 2 3 4 697 // 01234567890101234567890123450123456780123456789 698 String str = "url( http://foo.bar.com/fonts/serif/fubar.ttf\t)"; 699 Token[] expected = new Token[]{ 700 new Token(CSSLexer.URL, "http://foo.bar.com/fonts/serif/fubar.ttf", 1, 0), 701 Token.EOF_TOKEN 702 }; 703 704 List<Token> tlist = getTokens(str); 705 checkTokens(tlist, expected); 706 707 for(int n=0; n<tlist.size(); n++) { 708 Token tok = tlist.get(n); 709 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 710 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 711 } 712 713 } 714 715 @Test 716 public void testScanQuotedUrlWithWhiteSpace() { 717 718 // 1 2 3 4 719 // 01234567890101234567890123450123456780123456789 720 String str = "url( 'http://foo.bar.com/fonts/serif/fubar.ttf'\t)"; 721 Token[] expected = new Token[]{ 722 new Token(CSSLexer.URL, "http://foo.bar.com/fonts/serif/fubar.ttf", 1, 0), 723 Token.EOF_TOKEN 724 }; 725 726 List<Token> tlist = getTokens(str); 727 checkTokens(tlist, expected); 728 729 for(int n=0; n<tlist.size(); n++) { 730 Token tok = tlist.get(n); 731 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 732 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 733 } 734 735 } 736 737 @Test 738 public void testScanQuotedUrl() { 739 740 // 1 2 3 4 741 // 01234567890101234567890123450123456780123456789 742 String str = "url(\"http://foo.bar.com/fonts/serif/fubar.ttf\")"; 743 Token[] expected = new Token[]{ 744 new Token(CSSLexer.URL, "http://foo.bar.com/fonts/serif/fubar.ttf", 1, 0), 745 Token.EOF_TOKEN 746 }; 747 748 List<Token> tlist = getTokens(str); 749 checkTokens(tlist, expected); 750 751 for(int n=0; n<tlist.size(); n++) { 752 Token tok = tlist.get(n); 753 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 754 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 755 } 756 757 } 758 759 @Test 760 public void testScanUrlWithEscapes() { 761 762 // 1 2 3 4 763 // 01234567890101234567890123450123456780123456789 764 String str = "url(http://foo.bar.com/fonts/true\\ type/fubar.ttf)"; 765 Token[] expected = new Token[]{ 766 new Token(CSSLexer.URL, "http://foo.bar.com/fonts/true type/fubar.ttf", 1, 0), 767 Token.EOF_TOKEN 768 }; 769 770 List<Token> tlist = getTokens(str); 771 checkTokens(tlist, expected); 772 773 for(int n=0; n<tlist.size(); n++) { 774 Token tok = tlist.get(n); 775 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 776 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 777 } 778 779 } 780 781 @Test 782 public void testScanQuotedUrlWithEscapes() { 783 784 // 1 2 3 4 785 // 01234567890101234567890123450123456780123456789 786 String str = "url(\"http://foo.bar.com/fonts/true\\ type/fubar.ttf\")"; 787 Token[] expected = new Token[]{ 788 new Token(CSSLexer.URL, "http://foo.bar.com/fonts/true type/fubar.ttf", 1, 0), 789 Token.EOF_TOKEN 790 }; 791 792 List<Token> tlist = getTokens(str); 793 checkTokens(tlist, expected); 794 795 for(int n=0; n<tlist.size(); n++) { 796 Token tok = tlist.get(n); 797 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 798 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 799 } 800 801 } 802 803 @Test 804 public void testScanUrlWithSyntaxError() { 805 806 // 1 2 3 4 807 // 01234567890101234567890123450123456780123456789 808 String str = "url(http://foo.bar.com/fonts/true'type/fubar.ttf)"; 809 Token[] expected = new Token[]{ 810 new Token(Token.INVALID, "http://foo.bar.com/fonts/true", 1, 0), 811 Token.EOF_TOKEN 812 }; 813 814 List<Token> tlist = getTokens(str); 815 checkTokens(tlist, expected); 816 817 for(int n=0; n<tlist.size(); n++) { 818 Token tok = tlist.get(n); 819 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 820 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 821 } 822 823 } 824 825 @Test 826 public void testScanQuotedUrlWithSyntaxError() { 827 828 // 1 2 3 4 829 // 01234567890101234567890123450123456780123456789 830 String str = "url('http://foo.bar.com/fonts/true\rtype/fubar.ttf')"; 831 Token[] expected = new Token[]{ 832 new Token(Token.INVALID, "http://foo.bar.com/fonts/true", 2, 0), 833 Token.EOF_TOKEN 834 }; 835 836 List<Token> tlist = getTokens(str); 837 checkTokens(tlist, expected); 838 839 for(int n=0; n<tlist.size(); n++) { 840 Token tok = tlist.get(n); 841 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 842 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 843 } 844 845 } 846 847 }