1 /* 2 * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package javafx.css; 27 28 import com.sun.javafx.css.parser.Token; 29 30 import java.io.CharArrayReader; 31 import java.io.Reader; 32 import java.util.ArrayList; 33 import java.util.Arrays; 34 import java.util.Collections; 35 import java.util.List; 36 import static org.junit.Assert.*; 37 import static org.junit.Assert.assertEquals; 38 39 import org.junit.Test; 40 41 42 public class CssLexerTest { 43 44 public CssLexerTest() { 45 } 46 47 private void checkTokens(List<Token> resultTokens, Token... expectedTokens) 48 throws org.junit.ComparisonFailure { 49 50 if (expectedTokens.length != resultTokens.size()) { 51 throw new org.junit.ComparisonFailure( 52 "lengths do not match", 53 Arrays.toString(expectedTokens), 54 resultTokens.toString() 55 ); 56 } 57 58 for (int n = 0; n<expectedTokens.length; n++) { 59 60 final Token result = resultTokens.get(n); 61 final Token expected = expectedTokens[n]; 62 63 if (expected.getType() != result.getType()) { 64 throw new org.junit.ComparisonFailure( 65 "token " + n + " types do not match", 66 Arrays.toString(expectedTokens), 67 resultTokens.toString() 68 ); 69 } 70 71 final String expectedText = expected.getText(); 72 final String resultText = result.getText(); 73 74 if (expectedText == null ? resultText != null : !expectedText.equals(resultText)) { 75 throw new org.junit.ComparisonFailure( 76 "token " + n + " text does not match", 77 Arrays.toString(expectedTokens), 78 resultTokens.toString() 79 ); 80 } 81 } 82 } 83 84 List<Token> getTokens(String string) { 85 86 Reader reader = new CharArrayReader(string.toCharArray()); 87 final CssLexer lexer = new CssLexer(); 88 lexer.setReader(reader); 89 90 final List<Token> tokens = new ArrayList<Token>(); 91 92 Token token = null; 93 do { 94 token = lexer.nextToken(); 95 tokens.add(token); 96 } while (token.getType() != Token.EOF); 97 98 return Collections.unmodifiableList(tokens); 99 } 100 101 private void lexDigitsWithUnits(String units, int type) throws org.junit.ComparisonFailure { 102 103 checkTokens(getTokens("123"+units), new Token(type, "123"+units), Token.EOF_TOKEN); 104 checkTokens(getTokens("123.45"+units), new Token(type, "123.45"+units), Token.EOF_TOKEN); 105 checkTokens(getTokens(".45"+units), new Token(type, ".45"+units), Token.EOF_TOKEN); 106 checkTokens(getTokens("-123"+units), new Token(type, "-123"+units), Token.EOF_TOKEN); 107 checkTokens(getTokens("-.45"+units), new Token(type, "-.45"+units), Token.EOF_TOKEN); 108 checkTokens(getTokens("+123"+units), new Token(type, "+123"+units), Token.EOF_TOKEN); 109 checkTokens(getTokens("+.45"+units), new Token(type, "+.45"+units), Token.EOF_TOKEN); 110 } 111 112 @Test 113 public void testLexValidDigits() { 114 lexDigitsWithUnits("", CssLexer.NUMBER); 115 } 116 117 @Test 118 public void testLexValidDigitsWithCM() { 119 lexDigitsWithUnits("cm", CssLexer.CM); 120 // case should be ignored 121 lexDigitsWithUnits("cM", CssLexer.CM); 122 } 123 @Test 124 public void testLexValidDigitsWithDEG() { 125 lexDigitsWithUnits("deg", CssLexer.DEG); 126 // case should be ignored 127 lexDigitsWithUnits("dEg", CssLexer.DEG); 128 } 129 @Test 130 public void testLexValidDigitsWithEM() { 131 lexDigitsWithUnits("em", CssLexer.EMS); 132 // case should be ignored 133 lexDigitsWithUnits("Em", CssLexer.EMS); 134 } 135 @Test 136 public void testLexValidDigitsWithEX() { 137 lexDigitsWithUnits("ex", CssLexer.EXS); 138 // case should be ignored 139 lexDigitsWithUnits("Ex", CssLexer.EXS); 140 } 141 @Test 142 public void testLexValidDigitsWithGRAD() { 143 lexDigitsWithUnits("grad", CssLexer.GRAD); 144 // case should be ignored 145 lexDigitsWithUnits("gRad", CssLexer.GRAD); 146 } 147 @Test 148 public void testLexValidDigitsWithIN() { 149 lexDigitsWithUnits("in", CssLexer.IN); 150 // case should be ignored 151 lexDigitsWithUnits("In", CssLexer.IN); 152 } 153 @Test 154 public void testLexValidDigitsWithMM() { 155 lexDigitsWithUnits("mm", CssLexer.MM); 156 // case should be ignored 157 lexDigitsWithUnits("mM", CssLexer.MM); 158 } 159 @Test 160 public void testLexValidDigitsWithPC() { 161 lexDigitsWithUnits("pc", CssLexer.PC); 162 // case should be ignored 163 lexDigitsWithUnits("Pc", CssLexer.PC); 164 } 165 @Test 166 public void testLexValidDigitsWithPT() { 167 lexDigitsWithUnits("pt", CssLexer.PT); 168 // case should be ignored 169 lexDigitsWithUnits("PT", CssLexer.PT); 170 } 171 @Test 172 public void testLexValidDigitsWithPX() { 173 lexDigitsWithUnits("px", CssLexer.PX); 174 // case should be ignored 175 lexDigitsWithUnits("Px", CssLexer.PX); 176 } 177 @Test 178 public void testLexValidDigitsWithRAD() { 179 lexDigitsWithUnits("rad", CssLexer.RAD); 180 // case should be ignored 181 lexDigitsWithUnits("RaD", CssLexer.RAD); 182 } 183 @Test 184 public void testLexValidDigitsWithTURN() { 185 lexDigitsWithUnits("turn", CssLexer.TURN); 186 // case should be ignored 187 lexDigitsWithUnits("TurN", CssLexer.TURN); 188 } 189 @Test 190 public void testLexValidDigitsWithS() { 191 lexDigitsWithUnits("s", CssLexer.SECONDS); 192 // case should be ignored 193 lexDigitsWithUnits("S", CssLexer.SECONDS); 194 } 195 @Test 196 public void testLexValidDigitsWithMS() { 197 lexDigitsWithUnits("ms", CssLexer.MS); 198 // case should be ignored 199 lexDigitsWithUnits("mS", CssLexer.MS); 200 } 201 @Test 202 public void testLexValidDigitsWithPCT() { 203 lexDigitsWithUnits("%", CssLexer.PERCENTAGE); 204 } 205 @Test 206 public void testLexValidDigitsWithBadUnits() { 207 lexDigitsWithUnits("xyzzy", Token.INVALID); 208 } 209 @Test 210 public void textLexValidDigitsValidDigits() { 211 checkTokens( 212 getTokens("foo: 10pt; bar: 20%;"), 213 new Token(CssLexer.IDENT, "foo"), 214 new Token(CssLexer.COLON, ":"), 215 new Token(CssLexer.WS, " "), 216 new Token(CssLexer.PT, "10pt"), 217 new Token(CssLexer.SEMI, ";"), 218 new Token(CssLexer.WS, " "), 219 new Token(CssLexer.IDENT, "bar"), 220 new Token(CssLexer.COLON, ":"), 221 new Token(CssLexer.WS, " "), 222 new Token(CssLexer.PERCENTAGE, "20%"), 223 new Token(CssLexer.SEMI, ";"), 224 Token.EOF_TOKEN 225 ); 226 } 227 @Test 228 public void textLexInvalidDigitsValidDigits() { 229 checkTokens( 230 getTokens("foo: 10pz; bar: 20%;"), 231 new Token(CssLexer.IDENT, "foo"), 232 new Token(CssLexer.COLON, ":"), 233 new Token(CssLexer.WS, " "), 234 new Token(Token.INVALID, "10pz"), 235 new Token(CssLexer.SEMI, ";"), 236 new Token(CssLexer.WS, " "), 237 new Token(CssLexer.IDENT, "bar"), 238 new Token(CssLexer.COLON, ":"), 239 new Token(CssLexer.WS, " "), 240 new Token(CssLexer.PERCENTAGE, "20%"), 241 new Token(CssLexer.SEMI, ";"), 242 Token.EOF_TOKEN 243 ); 244 } 245 @Test 246 public void textLexValidDigitsBangImportant() { 247 checkTokens( 248 getTokens("foo: 10pt !important;"), 249 new Token(CssLexer.IDENT, "foo"), 250 new Token(CssLexer.COLON, ":"), 251 new Token(CssLexer.WS, " "), 252 new Token(CssLexer.PT, "10pt"), 253 new Token(CssLexer.WS, " "), 254 new Token(CssLexer.IMPORTANT_SYM, "!important"), 255 new Token(CssLexer.SEMI, ";"), 256 Token.EOF_TOKEN 257 ); 258 } 259 @Test 260 public void textLexInvalidDigitsBangImportant() { 261 checkTokens( 262 getTokens("foo: 10pz !important;"), 263 new Token(CssLexer.IDENT, "foo"), 264 new Token(CssLexer.COLON, ":"), 265 new Token(CssLexer.WS, " "), 266 new Token(Token.INVALID, "10pz"), 267 new Token(CssLexer.WS, " "), 268 new Token(CssLexer.IMPORTANT_SYM, "!important"), 269 new Token(CssLexer.SEMI, ";"), 270 Token.EOF_TOKEN 271 ); 272 } 273 @Test 274 public void textLexValidDigitsInSequence() { 275 checkTokens( 276 getTokens("-1 0px 1pt .5em;"), 277 new Token(CssLexer.NUMBER, "-1"), 278 new Token(CssLexer.WS, " "), 279 new Token(CssLexer.PX, "0px"), 280 new Token(CssLexer.WS, " "), 281 new Token(CssLexer.PT, "1pt"), 282 new Token(CssLexer.WS, " "), 283 new Token(CssLexer.EMS, ".5em"), 284 new Token(CssLexer.SEMI, ";"), 285 Token.EOF_TOKEN 286 ); 287 } 288 @Test 289 public void textLexInvalidDigitsInSequence() { 290 checkTokens( 291 getTokens("-1 0px 1pz .5em;"), 292 new Token(CssLexer.NUMBER, "-1"), 293 new Token(CssLexer.WS, " "), 294 new Token(CssLexer.PX, "0px"), 295 new Token(CssLexer.WS, " "), 296 new Token(Token.INVALID, "1pz"), 297 new Token(CssLexer.WS, " "), 298 new Token(CssLexer.EMS, ".5em"), 299 new Token(CssLexer.SEMI, ";"), 300 Token.EOF_TOKEN 301 ); 302 } 303 304 @Test 305 public void testTokenOffset() { 306 307 String str = "a: b;"; 308 // [?][0] = line 309 // [?][1] = offset 310 Token[] expected = { 311 new Token(CssLexer.IDENT, "a", 1, 0), 312 new Token(CssLexer.COLON, ":", 1, 1), 313 new Token(CssLexer.WS, " ", 1, 2), 314 new Token(CssLexer.IDENT, "b", 1, 3), 315 new Token(CssLexer.SEMI, ";", 1, 4), 316 Token.EOF_TOKEN 317 }; 318 319 List<Token> tlist = getTokens(str); 320 checkTokens(tlist, expected); 321 322 for(int n=0; n<tlist.size(); n++) { 323 Token tok = tlist.get(n); 324 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 325 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 326 } 327 328 } 329 330 @Test 331 public void testTokenLineAndOffsetWithCR() { 332 333 String str = "a: b;\rc: d;"; 334 // [?][0] = line 335 // [?][1] = offset 336 Token[] expected = { 337 new Token(CssLexer.IDENT, "a", 1, 0), 338 new Token(CssLexer.COLON, ":", 1, 1), 339 new Token(CssLexer.WS, " ", 1, 2), 340 new Token(CssLexer.IDENT, "b", 1, 3), 341 new Token(CssLexer.SEMI, ";", 1, 4), 342 new Token(CssLexer.NL, "\\r", 1, 5), 343 new Token(CssLexer.IDENT, "c", 2, 0), 344 new Token(CssLexer.COLON, ":", 2, 1), 345 new Token(CssLexer.WS, " ", 2, 2), 346 new Token(CssLexer.IDENT, "d", 2, 3), 347 new Token(CssLexer.SEMI, ";", 2, 4), 348 Token.EOF_TOKEN 349 }; 350 351 List<Token> tlist = getTokens(str); 352 checkTokens(tlist, expected); 353 354 for(int n=0; n<tlist.size(); n++) { 355 Token tok = tlist.get(n); 356 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 357 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 358 } 359 360 } 361 362 @Test 363 public void testTokenLineAndOffsetWithLF() { 364 365 String str = "a: b;\nc: d;"; 366 // [?][0] = line 367 // [?][1] = offset 368 Token[] expected = { 369 new Token(CssLexer.IDENT, "a", 1, 0), 370 new Token(CssLexer.COLON, ":", 1, 1), 371 new Token(CssLexer.WS, " ", 1, 2), 372 new Token(CssLexer.IDENT, "b", 1, 3), 373 new Token(CssLexer.SEMI, ";", 1, 4), 374 new Token(CssLexer.NL, "\\n", 1, 5), 375 new Token(CssLexer.IDENT, "c", 2, 0), 376 new Token(CssLexer.COLON, ":", 2, 1), 377 new Token(CssLexer.WS, " ", 2, 2), 378 new Token(CssLexer.IDENT, "d", 2, 3), 379 new Token(CssLexer.SEMI, ";", 2, 4), 380 Token.EOF_TOKEN 381 }; 382 383 List<Token> tlist = getTokens(str); 384 checkTokens(tlist, expected); 385 386 for(int n=0; n<tlist.size(); n++) { 387 Token tok = tlist.get(n); 388 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 389 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 390 } 391 392 } 393 394 @Test 395 public void testTokenLineAndOffsetWithCRLF() { 396 // 012345 01234 397 String str = "a: b;\r\nc: d;"; 398 // [?][0] = line 399 // [?][1] = offset 400 Token[] expected = { 401 new Token(CssLexer.IDENT, "a", 1, 0), 402 new Token(CssLexer.COLON, ":", 1, 1), 403 new Token(CssLexer.WS, " ", 1, 2), 404 new Token(CssLexer.IDENT, "b", 1, 3), 405 new Token(CssLexer.SEMI, ";", 1, 4), 406 new Token(CssLexer.NL, "\\r\\n", 1, 5), 407 new Token(CssLexer.IDENT, "c", 2, 0), 408 new Token(CssLexer.COLON, ":", 2, 1), 409 new Token(CssLexer.WS, " ", 2, 2), 410 new Token(CssLexer.IDENT, "d", 2, 3), 411 new Token(CssLexer.SEMI, ";", 2, 4), 412 Token.EOF_TOKEN 413 }; 414 415 List<Token> tlist = getTokens(str); 416 checkTokens(tlist, expected); 417 418 for(int n=0; n<tlist.size(); n++) { 419 Token tok = tlist.get(n); 420 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 421 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 422 } 423 424 } 425 426 @Test 427 public void testTokenOffsetWithEmbeddedComment() { 428 // 0123456789012345 429 String str = "a: /*comment*/b;"; 430 // [?][0] = line 431 // [?][1] = offset 432 Token[] expected = { 433 new Token(CssLexer.IDENT, "a", 1, 0), 434 new Token(CssLexer.COLON, ":", 1, 1), 435 new Token(CssLexer.WS, " ", 1, 2), 436 new Token(CssLexer.IDENT, "b", 1, 14), 437 new Token(CssLexer.SEMI, ";", 1, 15), 438 Token.EOF_TOKEN 439 }; 440 441 List<Token> tlist = getTokens(str); 442 checkTokens(tlist, expected); 443 444 for(int n=0; n<tlist.size(); n++) { 445 Token tok = tlist.get(n); 446 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 447 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 448 } 449 } 450 451 @Test 452 public void testTokenLineAndOffsetWithLeadingComment() { 453 // 012345678901 01234 454 String str = "/*comment*/\na: b;"; 455 // [?][0] = line 456 // [?][1] = offset 457 Token[] expected = { 458 new Token(CssLexer.NL, "\\n", 1, 11), 459 new Token(CssLexer.IDENT, "a", 2, 0), 460 new Token(CssLexer.COLON, ":", 2, 1), 461 new Token(CssLexer.WS, " ", 2, 2), 462 new Token(CssLexer.IDENT, "b", 2, 3), 463 new Token(CssLexer.SEMI, ";", 2, 4), 464 Token.EOF_TOKEN 465 }; 466 467 List<Token> tlist = getTokens(str); 468 checkTokens(tlist, expected); 469 470 for(int n=0; n<tlist.size(); n++) { 471 Token tok = tlist.get(n); 472 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 473 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 474 } 475 } 476 477 @Test 478 public void testTokenOffsetWithFunction() { 479 // 01234567890 480 String str = "a: b(arg);"; 481 // [?][0] = line 482 // [?][1] = offset 483 Token[] expected = { 484 new Token(CssLexer.IDENT, "a", 1, 0), 485 new Token(CssLexer.COLON, ":", 1, 1), 486 new Token(CssLexer.WS, " ", 1, 2), 487 new Token(CssLexer.IDENT, "b", 1, 3), 488 new Token(CssLexer.LPAREN, "(", 1, 4), 489 new Token(CssLexer.IDENT, "arg", 1, 5), 490 new Token(CssLexer.RPAREN, ")", 1, 8), 491 new Token(CssLexer.SEMI, ";", 1, 9), 492 Token.EOF_TOKEN 493 }; 494 495 List<Token> tlist = getTokens(str); 496 checkTokens(tlist, expected); 497 498 for(int n=0; n<tlist.size(); n++) { 499 Token tok = tlist.get(n); 500 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 501 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 502 } 503 } 504 505 @Test 506 public void testTokenOffsetWithHash() { 507 // 01234567890 508 String str = "a: #012345;"; 509 // [?][0] = line 510 // [?][1] = offset 511 Token[] expected = { 512 new Token(CssLexer.IDENT, "a", 1, 0), 513 new Token(CssLexer.COLON, ":", 1, 1), 514 new Token(CssLexer.WS, " ", 1, 2), 515 new Token(CssLexer.HASH, "#012345", 1, 3), 516 new Token(CssLexer.SEMI, ";", 1, 10), 517 Token.EOF_TOKEN 518 }; 519 520 List<Token> tlist = getTokens(str); 521 checkTokens(tlist, expected); 522 523 for(int n=0; n<tlist.size(); n++) { 524 Token tok = tlist.get(n); 525 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 526 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 527 } 528 } 529 530 @Test 531 public void testTokenOffsetWithDigits() { 532 // 01234567890 533 String str = "a: 123.45;"; 534 // [?][0] = line 535 // [?][1] = offset 536 Token[] expected = { 537 new Token(CssLexer.IDENT, "a", 1, 0), 538 new Token(CssLexer.COLON, ":", 1, 1), 539 new Token(CssLexer.WS, " ", 1, 2), 540 new Token(CssLexer.NUMBER, "123.45", 1, 3), 541 new Token(CssLexer.SEMI, ";", 1, 9), 542 Token.EOF_TOKEN 543 }; 544 545 List<Token> tlist = getTokens(str); 546 checkTokens(tlist, expected); 547 548 for(int n=0; n<tlist.size(); n++) { 549 Token tok = tlist.get(n); 550 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 551 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 552 } 553 } 554 555 @Test 556 public void testTokenOffsetWithBangImportant() { 557 // 0123456789012345 558 String str = "a: b !important;"; 559 // [?][0] = line 560 // [?][1] = offset 561 Token[] expected = { 562 new Token(CssLexer.IDENT, "a", 1, 0), 563 new Token(CssLexer.COLON, ":", 1, 1), 564 new Token(CssLexer.WS, " ", 1, 2), 565 new Token(CssLexer.IDENT, "b", 1, 3), 566 new Token(CssLexer.WS, " ", 1, 4), 567 new Token(CssLexer.IMPORTANT_SYM, "!important", 1, 5), 568 new Token(CssLexer.SEMI, ";", 1, 15), 569 Token.EOF_TOKEN 570 }; 571 572 List<Token> tlist = getTokens(str); 573 checkTokens(tlist, expected); 574 575 for(int n=0; n<tlist.size(); n++) { 576 Token tok = tlist.get(n); 577 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 578 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 579 } 580 } 581 582 @Test 583 public void testTokenOffsetWithSkip() { 584 // 0123456789012345 585 String str = "a: b !imporzant;"; 586 // [?][0] = line 587 // [?][1] = offset 588 Token[] expected = { 589 new Token(CssLexer.IDENT, "a", 1, 0), 590 new Token(CssLexer.COLON, ":", 1, 1), 591 new Token(CssLexer.WS, " ", 1, 2), 592 new Token(CssLexer.IDENT, "b", 1, 3), 593 new Token(CssLexer.WS, " ", 1, 4), 594 new Token(Token.SKIP, "!imporz", 1, 5), 595 new Token(CssLexer.SEMI, ";", 1, 15), 596 Token.EOF_TOKEN 597 }; 598 599 List<Token> tlist = getTokens(str); 600 checkTokens(tlist, expected); 601 602 for(int n=0; n<tlist.size(); n++) { 603 Token tok = tlist.get(n); 604 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 605 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 606 } 607 } 608 609 @Test 610 public void testTokenOffsetWithInvalid() { 611 // 0123456789012345 612 String str = "a: 1pz;"; 613 // [?][0] = line 614 // [?][1] = offset 615 Token[] expected = { 616 new Token(CssLexer.IDENT, "a", 1, 0), 617 new Token(CssLexer.COLON, ":", 1, 1), 618 new Token(CssLexer.WS, " ", 1, 2), 619 new Token(Token.INVALID, "1pz", 1, 3), 620 new Token(CssLexer.SEMI, ";", 1, 6), 621 Token.EOF_TOKEN 622 }; 623 624 List<Token> tlist = getTokens(str); 625 checkTokens(tlist, expected); 626 627 for(int n=0; n<tlist.size(); n++) { 628 Token tok = tlist.get(n); 629 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 630 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 631 } 632 } 633 634 @Test 635 public void testTokenLineAndOffsetMoreFully() { 636 // 1 2 3 4 637 // 012345678901 0123456789012345 012345678 0 638 String str = "/*comment*/\n*.foo#bar:baz {\n\ta: 1em;\n}"; 639 // [?][0] = line 640 // [?][1] = offset 641 Token[] expected = { 642 new Token(CssLexer.NL, "\\n", 1, 11), 643 new Token(CssLexer.STAR, "*", 2, 0), 644 new Token(CssLexer.DOT, ".", 2, 1), 645 new Token(CssLexer.IDENT, "foo", 2, 2), 646 new Token(CssLexer.HASH, "#bar", 2, 5), 647 new Token(CssLexer.COLON, ":", 2, 9), 648 new Token(CssLexer.IDENT, "baz", 2, 10), 649 new Token(CssLexer.WS, " ", 2, 13), 650 new Token(CssLexer.LBRACE, "{", 2, 14), 651 new Token(CssLexer.NL, "\\n", 2, 15), 652 new Token(CssLexer.WS, "\t", 3, 0), 653 new Token(CssLexer.IDENT, "a", 3, 1), 654 new Token(CssLexer.COLON, ":", 3, 2), 655 new Token(CssLexer.WS, " ", 3, 3), 656 new Token(CssLexer.EMS, "1em", 3, 4), 657 new Token(CssLexer.SEMI, ";", 3, 7), 658 new Token(CssLexer.NL, "\\n", 3, 8), 659 new Token(CssLexer.RBRACE, "}", 4, 0), 660 Token.EOF_TOKEN 661 }; 662 663 List<Token> tlist = getTokens(str); 664 checkTokens(tlist, expected); 665 666 for(int n=0; n<tlist.size(); n++) { 667 Token tok = tlist.get(n); 668 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 669 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 670 } 671 } 672 673 @Test 674 public void testScanUrl() { 675 676 // 1 2 3 4 677 // 01234567890101234567890123450123456780123456789 678 String str = "url(http://foo.bar.com/fonts/serif/fubar.ttf)"; 679 Token[] expected = new Token[]{ 680 new Token(CssLexer.URL, "http://foo.bar.com/fonts/serif/fubar.ttf", 1, 0), 681 Token.EOF_TOKEN 682 }; 683 684 List<Token> tlist = getTokens(str); 685 checkTokens(tlist, expected); 686 687 for(int n=0; n<tlist.size(); n++) { 688 Token tok = tlist.get(n); 689 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 690 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 691 } 692 693 } 694 695 @Test 696 public void testScanUrlWithWhiteSpace() { 697 698 // 1 2 3 4 699 // 01234567890101234567890123450123456780123456789 700 String str = "url( http://foo.bar.com/fonts/serif/fubar.ttf\t)"; 701 Token[] expected = new Token[]{ 702 new Token(CssLexer.URL, "http://foo.bar.com/fonts/serif/fubar.ttf", 1, 0), 703 Token.EOF_TOKEN 704 }; 705 706 List<Token> tlist = getTokens(str); 707 checkTokens(tlist, expected); 708 709 for(int n=0; n<tlist.size(); n++) { 710 Token tok = tlist.get(n); 711 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 712 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 713 } 714 715 } 716 717 @Test 718 public void testScanQuotedUrlWithWhiteSpace() { 719 720 // 1 2 3 4 721 // 01234567890101234567890123450123456780123456789 722 String str = "url( 'http://foo.bar.com/fonts/serif/fubar.ttf'\t)"; 723 Token[] expected = new Token[]{ 724 new Token(CssLexer.URL, "http://foo.bar.com/fonts/serif/fubar.ttf", 1, 0), 725 Token.EOF_TOKEN 726 }; 727 728 List<Token> tlist = getTokens(str); 729 checkTokens(tlist, expected); 730 731 for(int n=0; n<tlist.size(); n++) { 732 Token tok = tlist.get(n); 733 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 734 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 735 } 736 737 } 738 739 @Test 740 public void testScanQuotedUrl() { 741 742 // 1 2 3 4 743 // 01234567890101234567890123450123456780123456789 744 String str = "url(\"http://foo.bar.com/fonts/serif/fubar.ttf\")"; 745 Token[] expected = new Token[]{ 746 new Token(CssLexer.URL, "http://foo.bar.com/fonts/serif/fubar.ttf", 1, 0), 747 Token.EOF_TOKEN 748 }; 749 750 List<Token> tlist = getTokens(str); 751 checkTokens(tlist, expected); 752 753 for(int n=0; n<tlist.size(); n++) { 754 Token tok = tlist.get(n); 755 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 756 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 757 } 758 759 } 760 761 @Test 762 public void testScanUrlWithEscapes() { 763 764 // 1 2 3 4 765 // 01234567890101234567890123450123456780123456789 766 String str = "url(http://foo.bar.com/fonts/true\\ type/fubar.ttf)"; 767 Token[] expected = new Token[]{ 768 new Token(CssLexer.URL, "http://foo.bar.com/fonts/true type/fubar.ttf", 1, 0), 769 Token.EOF_TOKEN 770 }; 771 772 List<Token> tlist = getTokens(str); 773 checkTokens(tlist, expected); 774 775 for(int n=0; n<tlist.size(); n++) { 776 Token tok = tlist.get(n); 777 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 778 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 779 } 780 781 } 782 783 @Test 784 public void testScanQuotedUrlWithEscapes() { 785 786 // 1 2 3 4 787 // 01234567890101234567890123450123456780123456789 788 String str = "url(\"http://foo.bar.com/fonts/true\\ type/fubar.ttf\")"; 789 Token[] expected = new Token[]{ 790 new Token(CssLexer.URL, "http://foo.bar.com/fonts/true type/fubar.ttf", 1, 0), 791 Token.EOF_TOKEN 792 }; 793 794 List<Token> tlist = getTokens(str); 795 checkTokens(tlist, expected); 796 797 for(int n=0; n<tlist.size(); n++) { 798 Token tok = tlist.get(n); 799 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 800 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 801 } 802 803 } 804 805 @Test 806 public void testScanUrlWithSyntaxError() { 807 808 // 1 2 3 4 809 // 01234567890101234567890123450123456780123456789 810 String str = "url(http://foo.bar.com/fonts/true'type/fubar.ttf)"; 811 Token[] expected = new Token[]{ 812 new Token(Token.INVALID, "http://foo.bar.com/fonts/true", 1, 0), 813 Token.EOF_TOKEN 814 }; 815 816 List<Token> tlist = getTokens(str); 817 checkTokens(tlist, expected); 818 819 for(int n=0; n<tlist.size(); n++) { 820 Token tok = tlist.get(n); 821 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 822 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 823 } 824 825 } 826 827 @Test 828 public void testScanQuotedUrlWithSyntaxError() { 829 830 // 1 2 3 4 831 // 01234567890101234567890123450123456780123456789 832 String str = "url('http://foo.bar.com/fonts/true\rtype/fubar.ttf')"; 833 Token[] expected = new Token[]{ 834 new Token(Token.INVALID, "http://foo.bar.com/fonts/true", 2, 0), 835 Token.EOF_TOKEN 836 }; 837 838 List<Token> tlist = getTokens(str); 839 checkTokens(tlist, expected); 840 841 for(int n=0; n<tlist.size(); n++) { 842 Token tok = tlist.get(n); 843 assertEquals("bad line. tok="+tok, expected[n].getLine(), tok.getLine()); 844 assertEquals("bad offset. tok="+tok, expected[n].getOffset(), tok.getOffset()); 845 } 846 847 } 848 849 }