1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/templateTable.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/methodData.hpp" 31 #include "oops/objArrayKlass.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/methodHandles.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/stubRoutines.hpp" 36 #include "runtime/synchronizer.hpp" 37 38 #ifndef CC_INTERP 39 #define __ _masm-> 40 41 // Misc helpers 42 43 // Do an oop store like *(base + index + offset) = val 44 // index can be noreg, 45 static void do_oop_store(InterpreterMacroAssembler* _masm, 46 Register base, 47 Register index, 48 int offset, 49 Register val, 50 Register tmp, 51 BarrierSet::Name barrier, 52 bool precise) { 53 assert(tmp != val && tmp != base && tmp != index, "register collision"); 54 assert(index == noreg || offset == 0, "only one offset"); 55 switch (barrier) { 56 #ifndef SERIALGC 57 case BarrierSet::G1SATBCT: 58 case BarrierSet::G1SATBCTLogging: 59 { 60 // Load and record the previous value. 61 __ g1_write_barrier_pre(base, index, offset, 62 noreg /* pre_val */, 63 tmp, true /*preserve_o_regs*/); 64 65 if (index == noreg ) { 66 assert(Assembler::is_simm13(offset), "fix this code"); 67 __ store_heap_oop(val, base, offset); 68 } else { 69 __ store_heap_oop(val, base, index); 70 } 71 72 // No need for post barrier if storing NULL 73 if (val != G0) { 74 if (precise) { 75 if (index == noreg) { 76 __ add(base, offset, base); 77 } else { 78 __ add(base, index, base); 79 } 80 } 81 __ g1_write_barrier_post(base, val, tmp); 82 } 83 } 84 break; 85 #endif // SERIALGC 86 case BarrierSet::CardTableModRef: 87 case BarrierSet::CardTableExtension: 88 { 89 if (index == noreg ) { 90 assert(Assembler::is_simm13(offset), "fix this code"); 91 __ store_heap_oop(val, base, offset); 92 } else { 93 __ store_heap_oop(val, base, index); 94 } 95 // No need for post barrier if storing NULL 96 if (val != G0) { 97 if (precise) { 98 if (index == noreg) { 99 __ add(base, offset, base); 100 } else { 101 __ add(base, index, base); 102 } 103 } 104 __ card_write_barrier_post(base, val, tmp); 105 } 106 } 107 break; 108 case BarrierSet::ModRef: 109 case BarrierSet::Other: 110 ShouldNotReachHere(); 111 break; 112 default : 113 ShouldNotReachHere(); 114 115 } 116 } 117 118 119 //---------------------------------------------------------------------------------------------------- 120 // Platform-dependent initialization 121 122 void TemplateTable::pd_initialize() { 123 // (none) 124 } 125 126 127 //---------------------------------------------------------------------------------------------------- 128 // Condition conversion 129 Assembler::Condition ccNot(TemplateTable::Condition cc) { 130 switch (cc) { 131 case TemplateTable::equal : return Assembler::notEqual; 132 case TemplateTable::not_equal : return Assembler::equal; 133 case TemplateTable::less : return Assembler::greaterEqual; 134 case TemplateTable::less_equal : return Assembler::greater; 135 case TemplateTable::greater : return Assembler::lessEqual; 136 case TemplateTable::greater_equal: return Assembler::less; 137 } 138 ShouldNotReachHere(); 139 return Assembler::zero; 140 } 141 142 //---------------------------------------------------------------------------------------------------- 143 // Miscelaneous helper routines 144 145 146 Address TemplateTable::at_bcp(int offset) { 147 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); 148 return Address(Lbcp, offset); 149 } 150 151 152 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, 153 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, 154 int byte_no) { 155 // With sharing on, may need to test Method* flag. 156 if (!RewriteBytecodes) return; 157 Label L_patch_done; 158 159 switch (bc) { 160 case Bytecodes::_fast_aputfield: 161 case Bytecodes::_fast_bputfield: 162 case Bytecodes::_fast_cputfield: 163 case Bytecodes::_fast_dputfield: 164 case Bytecodes::_fast_fputfield: 165 case Bytecodes::_fast_iputfield: 166 case Bytecodes::_fast_lputfield: 167 case Bytecodes::_fast_sputfield: 168 { 169 // We skip bytecode quickening for putfield instructions when 170 // the put_code written to the constant pool cache is zero. 171 // This is required so that every execution of this instruction 172 // calls out to InterpreterRuntime::resolve_get_put to do 173 // additional, required work. 174 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 175 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 176 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); 177 __ set(bc, bc_reg); 178 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch 179 } 180 break; 181 default: 182 assert(byte_no == -1, "sanity"); 183 if (load_bc_into_bc_reg) { 184 __ set(bc, bc_reg); 185 } 186 } 187 188 if (JvmtiExport::can_post_breakpoint()) { 189 Label L_fast_patch; 190 __ ldub(at_bcp(0), temp_reg); 191 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); 192 // perform the quickening, slowly, in the bowels of the breakpoint table 193 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); 194 __ ba_short(L_patch_done); 195 __ bind(L_fast_patch); 196 } 197 198 #ifdef ASSERT 199 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); 200 Label L_okay; 201 __ ldub(at_bcp(0), temp_reg); 202 __ cmp(temp_reg, orig_bytecode); 203 __ br(Assembler::equal, false, Assembler::pt, L_okay); 204 __ delayed()->cmp(temp_reg, bc_reg); 205 __ br(Assembler::equal, false, Assembler::pt, L_okay); 206 __ delayed()->nop(); 207 __ stop("patching the wrong bytecode"); 208 __ bind(L_okay); 209 #endif 210 211 // patch bytecode 212 __ stb(bc_reg, at_bcp(0)); 213 __ bind(L_patch_done); 214 } 215 216 //---------------------------------------------------------------------------------------------------- 217 // Individual instructions 218 219 void TemplateTable::nop() { 220 transition(vtos, vtos); 221 // nothing to do 222 } 223 224 void TemplateTable::shouldnotreachhere() { 225 transition(vtos, vtos); 226 __ stop("shouldnotreachhere bytecode"); 227 } 228 229 void TemplateTable::aconst_null() { 230 transition(vtos, atos); 231 __ clr(Otos_i); 232 } 233 234 235 void TemplateTable::iconst(int value) { 236 transition(vtos, itos); 237 __ set(value, Otos_i); 238 } 239 240 241 void TemplateTable::lconst(int value) { 242 transition(vtos, ltos); 243 assert(value >= 0, "check this code"); 244 #ifdef _LP64 245 __ set(value, Otos_l); 246 #else 247 __ set(value, Otos_l2); 248 __ clr( Otos_l1); 249 #endif 250 } 251 252 253 void TemplateTable::fconst(int value) { 254 transition(vtos, ftos); 255 static float zero = 0.0, one = 1.0, two = 2.0; 256 float* p; 257 switch( value ) { 258 default: ShouldNotReachHere(); 259 case 0: p = &zero; break; 260 case 1: p = &one; break; 261 case 2: p = &two; break; 262 } 263 AddressLiteral a(p); 264 __ sethi(a, G3_scratch); 265 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); 266 } 267 268 269 void TemplateTable::dconst(int value) { 270 transition(vtos, dtos); 271 static double zero = 0.0, one = 1.0; 272 double* p; 273 switch( value ) { 274 default: ShouldNotReachHere(); 275 case 0: p = &zero; break; 276 case 1: p = &one; break; 277 } 278 AddressLiteral a(p); 279 __ sethi(a, G3_scratch); 280 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); 281 } 282 283 284 // %%%%% Should factore most snippet templates across platforms 285 286 void TemplateTable::bipush() { 287 transition(vtos, itos); 288 __ ldsb( at_bcp(1), Otos_i ); 289 } 290 291 void TemplateTable::sipush() { 292 transition(vtos, itos); 293 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); 294 } 295 296 void TemplateTable::ldc(bool wide) { 297 transition(vtos, vtos); 298 Label call_ldc, notInt, isString, notString, notClass, exit; 299 300 if (wide) { 301 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 302 } else { 303 __ ldub(Lbcp, 1, O1); 304 } 305 __ get_cpool_and_tags(O0, O2); 306 307 const int base_offset = ConstantPool::header_size() * wordSize; 308 const int tags_offset = Array<u1>::base_offset_in_bytes(); 309 310 // get type from tags 311 __ add(O2, tags_offset, O2); 312 __ ldub(O2, O1, O2); 313 314 // unresolved class? If so, must resolve 315 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); 316 317 // unresolved class in error state 318 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); 319 320 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class 321 __ brx(Assembler::notEqual, true, Assembler::pt, notClass); 322 __ delayed()->add(O0, base_offset, O0); 323 324 __ bind(call_ldc); 325 __ set(wide, O1); 326 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); 327 __ push(atos); 328 __ ba_short(exit); 329 330 __ bind(notClass); 331 // __ add(O0, base_offset, O0); 332 __ sll(O1, LogBytesPerWord, O1); 333 __ cmp(O2, JVM_CONSTANT_Integer); 334 __ brx(Assembler::notEqual, true, Assembler::pt, notInt); 335 __ delayed()->cmp(O2, JVM_CONSTANT_String); 336 __ ld(O0, O1, Otos_i); 337 __ push(itos); 338 __ ba_short(exit); 339 340 __ bind(notInt); 341 // __ cmp(O2, JVM_CONSTANT_String); 342 __ brx(Assembler::equal, true, Assembler::pt, isString); 343 __ delayed()->cmp(O2, JVM_CONSTANT_Object); 344 __ brx(Assembler::notEqual, true, Assembler::pt, notString); 345 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 346 __ bind(isString); 347 __ stop("string should be rewritten to fast_aldc"); 348 __ ba_short(exit); 349 350 __ bind(notString); 351 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); 352 __ push(ftos); 353 354 __ bind(exit); 355 } 356 357 // Fast path for caching oop constants. 358 // %%% We should use this to handle Class and String constants also. 359 // %%% It will simplify the ldc/primitive path considerably. 360 void TemplateTable::fast_aldc(bool wide) { 361 transition(vtos, atos); 362 363 int index_size = wide ? sizeof(u2) : sizeof(u1); 364 Label resolved; 365 366 // We are resolved if the resolved reference cache entry contains a 367 // non-null object (CallSite, etc.) 368 assert_different_registers(Otos_i, G3_scratch); 369 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch 370 __ load_resolved_reference_at_index(Otos_i, G3_scratch); 371 __ tst(Otos_i); 372 __ br(Assembler::notEqual, false, Assembler::pt, resolved); 373 __ delayed()->set((int)bytecode(), O1); 374 375 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 376 377 // first time invocation - must resolve first 378 __ call_VM(Otos_i, entry, O1); 379 __ bind(resolved); 380 __ verify_oop(Otos_i); 381 } 382 383 384 void TemplateTable::ldc2_w() { 385 transition(vtos, vtos); 386 Label Long, exit; 387 388 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); 389 __ get_cpool_and_tags(O0, O2); 390 391 const int base_offset = ConstantPool::header_size() * wordSize; 392 const int tags_offset = Array<u1>::base_offset_in_bytes(); 393 // get type from tags 394 __ add(O2, tags_offset, O2); 395 __ ldub(O2, O1, O2); 396 397 __ sll(O1, LogBytesPerWord, O1); 398 __ add(O0, O1, G3_scratch); 399 400 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); 401 // A double can be placed at word-aligned locations in the constant pool. 402 // Check out Conversions.java for an example. 403 // Also ConstantPool::header_size() is 20, which makes it very difficult 404 // to double-align double on the constant pool. SG, 11/7/97 405 #ifdef _LP64 406 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); 407 #else 408 FloatRegister f = Ftos_d; 409 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f); 410 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2, 411 f->successor()); 412 #endif 413 __ push(dtos); 414 __ ba_short(exit); 415 416 __ bind(Long); 417 #ifdef _LP64 418 __ ldx(G3_scratch, base_offset, Otos_l); 419 #else 420 __ ld(G3_scratch, base_offset, Otos_l); 421 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor()); 422 #endif 423 __ push(ltos); 424 425 __ bind(exit); 426 } 427 428 429 void TemplateTable::locals_index(Register reg, int offset) { 430 __ ldub( at_bcp(offset), reg ); 431 } 432 433 434 void TemplateTable::locals_index_wide(Register reg) { 435 // offset is 2, not 1, because Lbcp points to wide prefix code 436 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); 437 } 438 439 void TemplateTable::iload() { 440 transition(vtos, itos); 441 // Rewrite iload,iload pair into fast_iload2 442 // iload,caload pair into fast_icaload 443 if (RewriteFrequentPairs) { 444 Label rewrite, done; 445 446 // get next byte 447 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); 448 449 // if _iload, wait to rewrite to iload2. We only want to rewrite the 450 // last two iloads in a pair. Comparing against fast_iload means that 451 // the next bytecode is neither an iload or a caload, and therefore 452 // an iload pair. 453 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); 454 455 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); 456 __ br(Assembler::equal, false, Assembler::pn, rewrite); 457 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); 458 459 __ cmp(G3_scratch, (int)Bytecodes::_caload); 460 __ br(Assembler::equal, false, Assembler::pn, rewrite); 461 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); 462 463 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again 464 // rewrite 465 // G4_scratch: fast bytecode 466 __ bind(rewrite); 467 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); 468 __ bind(done); 469 } 470 471 // Get the local value into tos 472 locals_index(G3_scratch); 473 __ access_local_int( G3_scratch, Otos_i ); 474 } 475 476 void TemplateTable::fast_iload2() { 477 transition(vtos, itos); 478 locals_index(G3_scratch); 479 __ access_local_int( G3_scratch, Otos_i ); 480 __ push_i(); 481 locals_index(G3_scratch, 3); // get next bytecode's local index. 482 __ access_local_int( G3_scratch, Otos_i ); 483 } 484 485 void TemplateTable::fast_iload() { 486 transition(vtos, itos); 487 locals_index(G3_scratch); 488 __ access_local_int( G3_scratch, Otos_i ); 489 } 490 491 void TemplateTable::lload() { 492 transition(vtos, ltos); 493 locals_index(G3_scratch); 494 __ access_local_long( G3_scratch, Otos_l ); 495 } 496 497 498 void TemplateTable::fload() { 499 transition(vtos, ftos); 500 locals_index(G3_scratch); 501 __ access_local_float( G3_scratch, Ftos_f ); 502 } 503 504 505 void TemplateTable::dload() { 506 transition(vtos, dtos); 507 locals_index(G3_scratch); 508 __ access_local_double( G3_scratch, Ftos_d ); 509 } 510 511 512 void TemplateTable::aload() { 513 transition(vtos, atos); 514 locals_index(G3_scratch); 515 __ access_local_ptr( G3_scratch, Otos_i); 516 } 517 518 519 void TemplateTable::wide_iload() { 520 transition(vtos, itos); 521 locals_index_wide(G3_scratch); 522 __ access_local_int( G3_scratch, Otos_i ); 523 } 524 525 526 void TemplateTable::wide_lload() { 527 transition(vtos, ltos); 528 locals_index_wide(G3_scratch); 529 __ access_local_long( G3_scratch, Otos_l ); 530 } 531 532 533 void TemplateTable::wide_fload() { 534 transition(vtos, ftos); 535 locals_index_wide(G3_scratch); 536 __ access_local_float( G3_scratch, Ftos_f ); 537 } 538 539 540 void TemplateTable::wide_dload() { 541 transition(vtos, dtos); 542 locals_index_wide(G3_scratch); 543 __ access_local_double( G3_scratch, Ftos_d ); 544 } 545 546 547 void TemplateTable::wide_aload() { 548 transition(vtos, atos); 549 locals_index_wide(G3_scratch); 550 __ access_local_ptr( G3_scratch, Otos_i ); 551 __ verify_oop(Otos_i); 552 } 553 554 555 void TemplateTable::iaload() { 556 transition(itos, itos); 557 // Otos_i: index 558 // tos: array 559 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 560 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); 561 } 562 563 564 void TemplateTable::laload() { 565 transition(itos, ltos); 566 // Otos_i: index 567 // O2: array 568 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 569 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); 570 } 571 572 573 void TemplateTable::faload() { 574 transition(itos, ftos); 575 // Otos_i: index 576 // O2: array 577 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); 578 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); 579 } 580 581 582 void TemplateTable::daload() { 583 transition(itos, dtos); 584 // Otos_i: index 585 // O2: array 586 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); 587 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); 588 } 589 590 591 void TemplateTable::aaload() { 592 transition(itos, atos); 593 // Otos_i: index 594 // tos: array 595 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); 596 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); 597 __ verify_oop(Otos_i); 598 } 599 600 601 void TemplateTable::baload() { 602 transition(itos, itos); 603 // Otos_i: index 604 // tos: array 605 __ index_check(O2, Otos_i, 0, G3_scratch, O3); 606 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); 607 } 608 609 610 void TemplateTable::caload() { 611 transition(itos, itos); 612 // Otos_i: index 613 // tos: array 614 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 615 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 616 } 617 618 void TemplateTable::fast_icaload() { 619 transition(vtos, itos); 620 // Otos_i: index 621 // tos: array 622 locals_index(G3_scratch); 623 __ access_local_int( G3_scratch, Otos_i ); 624 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 625 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); 626 } 627 628 629 void TemplateTable::saload() { 630 transition(itos, itos); 631 // Otos_i: index 632 // tos: array 633 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); 634 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); 635 } 636 637 638 void TemplateTable::iload(int n) { 639 transition(vtos, itos); 640 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 641 } 642 643 644 void TemplateTable::lload(int n) { 645 transition(vtos, ltos); 646 assert(n+1 < Argument::n_register_parameters, "would need more code"); 647 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); 648 } 649 650 651 void TemplateTable::fload(int n) { 652 transition(vtos, ftos); 653 assert(n < Argument::n_register_parameters, "would need more code"); 654 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); 655 } 656 657 658 void TemplateTable::dload(int n) { 659 transition(vtos, dtos); 660 FloatRegister dst = Ftos_d; 661 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); 662 } 663 664 665 void TemplateTable::aload(int n) { 666 transition(vtos, atos); 667 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); 668 } 669 670 671 void TemplateTable::aload_0() { 672 transition(vtos, atos); 673 674 // According to bytecode histograms, the pairs: 675 // 676 // _aload_0, _fast_igetfield (itos) 677 // _aload_0, _fast_agetfield (atos) 678 // _aload_0, _fast_fgetfield (ftos) 679 // 680 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 681 // bytecode checks the next bytecode and then rewrites the current 682 // bytecode into a pair bytecode; otherwise it rewrites the current 683 // bytecode into _fast_aload_0 that doesn't do the pair check anymore. 684 // 685 if (RewriteFrequentPairs) { 686 Label rewrite, done; 687 688 // get next byte 689 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); 690 691 // do actual aload_0 692 aload(0); 693 694 // if _getfield then wait with rewrite 695 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); 696 697 // if _igetfield then rewrite to _fast_iaccess_0 698 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 699 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); 700 __ br(Assembler::equal, false, Assembler::pn, rewrite); 701 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); 702 703 // if _agetfield then rewrite to _fast_aaccess_0 704 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 705 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); 706 __ br(Assembler::equal, false, Assembler::pn, rewrite); 707 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); 708 709 // if _fgetfield then rewrite to _fast_faccess_0 710 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 711 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); 712 __ br(Assembler::equal, false, Assembler::pn, rewrite); 713 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); 714 715 // else rewrite to _fast_aload0 716 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); 717 __ set(Bytecodes::_fast_aload_0, G4_scratch); 718 719 // rewrite 720 // G4_scratch: fast bytecode 721 __ bind(rewrite); 722 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); 723 __ bind(done); 724 } else { 725 aload(0); 726 } 727 } 728 729 730 void TemplateTable::istore() { 731 transition(itos, vtos); 732 locals_index(G3_scratch); 733 __ store_local_int( G3_scratch, Otos_i ); 734 } 735 736 737 void TemplateTable::lstore() { 738 transition(ltos, vtos); 739 locals_index(G3_scratch); 740 __ store_local_long( G3_scratch, Otos_l ); 741 } 742 743 744 void TemplateTable::fstore() { 745 transition(ftos, vtos); 746 locals_index(G3_scratch); 747 __ store_local_float( G3_scratch, Ftos_f ); 748 } 749 750 751 void TemplateTable::dstore() { 752 transition(dtos, vtos); 753 locals_index(G3_scratch); 754 __ store_local_double( G3_scratch, Ftos_d ); 755 } 756 757 758 void TemplateTable::astore() { 759 transition(vtos, vtos); 760 __ load_ptr(0, Otos_i); 761 __ inc(Lesp, Interpreter::stackElementSize); 762 __ verify_oop_or_return_address(Otos_i, G3_scratch); 763 locals_index(G3_scratch); 764 __ store_local_ptr(G3_scratch, Otos_i); 765 } 766 767 768 void TemplateTable::wide_istore() { 769 transition(vtos, vtos); 770 __ pop_i(); 771 locals_index_wide(G3_scratch); 772 __ store_local_int( G3_scratch, Otos_i ); 773 } 774 775 776 void TemplateTable::wide_lstore() { 777 transition(vtos, vtos); 778 __ pop_l(); 779 locals_index_wide(G3_scratch); 780 __ store_local_long( G3_scratch, Otos_l ); 781 } 782 783 784 void TemplateTable::wide_fstore() { 785 transition(vtos, vtos); 786 __ pop_f(); 787 locals_index_wide(G3_scratch); 788 __ store_local_float( G3_scratch, Ftos_f ); 789 } 790 791 792 void TemplateTable::wide_dstore() { 793 transition(vtos, vtos); 794 __ pop_d(); 795 locals_index_wide(G3_scratch); 796 __ store_local_double( G3_scratch, Ftos_d ); 797 } 798 799 800 void TemplateTable::wide_astore() { 801 transition(vtos, vtos); 802 __ load_ptr(0, Otos_i); 803 __ inc(Lesp, Interpreter::stackElementSize); 804 __ verify_oop_or_return_address(Otos_i, G3_scratch); 805 locals_index_wide(G3_scratch); 806 __ store_local_ptr(G3_scratch, Otos_i); 807 } 808 809 810 void TemplateTable::iastore() { 811 transition(itos, vtos); 812 __ pop_i(O2); // index 813 // Otos_i: val 814 // O3: array 815 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 816 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); 817 } 818 819 820 void TemplateTable::lastore() { 821 transition(ltos, vtos); 822 __ pop_i(O2); // index 823 // Otos_l: val 824 // O3: array 825 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 826 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); 827 } 828 829 830 void TemplateTable::fastore() { 831 transition(ftos, vtos); 832 __ pop_i(O2); // index 833 // Ftos_f: val 834 // O3: array 835 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); 836 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); 837 } 838 839 840 void TemplateTable::dastore() { 841 transition(dtos, vtos); 842 __ pop_i(O2); // index 843 // Fos_d: val 844 // O3: array 845 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); 846 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); 847 } 848 849 850 void TemplateTable::aastore() { 851 Label store_ok, is_null, done; 852 transition(vtos, vtos); 853 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 854 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index 855 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array 856 // Otos_i: val 857 // O2: index 858 // O3: array 859 __ verify_oop(Otos_i); 860 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); 861 862 // do array store check - check for NULL value first 863 __ br_null_short( Otos_i, Assembler::pn, is_null ); 864 865 __ load_klass(O3, O4); // get array klass 866 __ load_klass(Otos_i, O5); // get value klass 867 868 // do fast instanceof cache test 869 870 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4); 871 872 assert(Otos_i == O0, "just checking"); 873 874 // Otos_i: value 875 // O1: addr - offset 876 // O2: index 877 // O3: array 878 // O4: array element klass 879 // O5: value klass 880 881 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 882 883 // Generate a fast subtype check. Branch to store_ok if no 884 // failure. Throw if failure. 885 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); 886 887 // Not a subtype; so must throw exception 888 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); 889 890 // Store is OK. 891 __ bind(store_ok); 892 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); 893 894 __ ba(done); 895 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 896 897 __ bind(is_null); 898 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); 899 900 __ profile_null_seen(G3_scratch); 901 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) 902 __ bind(done); 903 } 904 905 906 void TemplateTable::bastore() { 907 transition(itos, vtos); 908 __ pop_i(O2); // index 909 // Otos_i: val 910 // O3: array 911 __ index_check(O3, O2, 0, G3_scratch, O2); 912 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 913 } 914 915 916 void TemplateTable::castore() { 917 transition(itos, vtos); 918 __ pop_i(O2); // index 919 // Otos_i: val 920 // O3: array 921 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); 922 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); 923 } 924 925 926 void TemplateTable::sastore() { 927 // %%%%% Factor across platform 928 castore(); 929 } 930 931 932 void TemplateTable::istore(int n) { 933 transition(itos, vtos); 934 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); 935 } 936 937 938 void TemplateTable::lstore(int n) { 939 transition(ltos, vtos); 940 assert(n+1 < Argument::n_register_parameters, "only handle register cases"); 941 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); 942 943 } 944 945 946 void TemplateTable::fstore(int n) { 947 transition(ftos, vtos); 948 assert(n < Argument::n_register_parameters, "only handle register cases"); 949 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); 950 } 951 952 953 void TemplateTable::dstore(int n) { 954 transition(dtos, vtos); 955 FloatRegister src = Ftos_d; 956 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); 957 } 958 959 960 void TemplateTable::astore(int n) { 961 transition(vtos, vtos); 962 __ load_ptr(0, Otos_i); 963 __ inc(Lesp, Interpreter::stackElementSize); 964 __ verify_oop_or_return_address(Otos_i, G3_scratch); 965 __ store_local_ptr(n, Otos_i); 966 } 967 968 969 void TemplateTable::pop() { 970 transition(vtos, vtos); 971 __ inc(Lesp, Interpreter::stackElementSize); 972 } 973 974 975 void TemplateTable::pop2() { 976 transition(vtos, vtos); 977 __ inc(Lesp, 2 * Interpreter::stackElementSize); 978 } 979 980 981 void TemplateTable::dup() { 982 transition(vtos, vtos); 983 // stack: ..., a 984 // load a and tag 985 __ load_ptr(0, Otos_i); 986 __ push_ptr(Otos_i); 987 // stack: ..., a, a 988 } 989 990 991 void TemplateTable::dup_x1() { 992 transition(vtos, vtos); 993 // stack: ..., a, b 994 __ load_ptr( 1, G3_scratch); // get a 995 __ load_ptr( 0, Otos_l1); // get b 996 __ store_ptr(1, Otos_l1); // put b 997 __ store_ptr(0, G3_scratch); // put a - like swap 998 __ push_ptr(Otos_l1); // push b 999 // stack: ..., b, a, b 1000 } 1001 1002 1003 void TemplateTable::dup_x2() { 1004 transition(vtos, vtos); 1005 // stack: ..., a, b, c 1006 // get c and push on stack, reuse registers 1007 __ load_ptr( 0, G3_scratch); // get c 1008 __ push_ptr(G3_scratch); // push c with tag 1009 // stack: ..., a, b, c, c (c in reg) (Lesp - 4) 1010 // (stack offsets n+1 now) 1011 __ load_ptr( 3, Otos_l1); // get a 1012 __ store_ptr(3, G3_scratch); // put c at 3 1013 // stack: ..., c, b, c, c (a in reg) 1014 __ load_ptr( 2, G3_scratch); // get b 1015 __ store_ptr(2, Otos_l1); // put a at 2 1016 // stack: ..., c, a, c, c (b in reg) 1017 __ store_ptr(1, G3_scratch); // put b at 1 1018 // stack: ..., c, a, b, c 1019 } 1020 1021 1022 void TemplateTable::dup2() { 1023 transition(vtos, vtos); 1024 __ load_ptr(1, G3_scratch); // get a 1025 __ load_ptr(0, Otos_l1); // get b 1026 __ push_ptr(G3_scratch); // push a 1027 __ push_ptr(Otos_l1); // push b 1028 // stack: ..., a, b, a, b 1029 } 1030 1031 1032 void TemplateTable::dup2_x1() { 1033 transition(vtos, vtos); 1034 // stack: ..., a, b, c 1035 __ load_ptr( 1, Lscratch); // get b 1036 __ load_ptr( 2, Otos_l1); // get a 1037 __ store_ptr(2, Lscratch); // put b at a 1038 // stack: ..., b, b, c 1039 __ load_ptr( 0, G3_scratch); // get c 1040 __ store_ptr(1, G3_scratch); // put c at b 1041 // stack: ..., b, c, c 1042 __ store_ptr(0, Otos_l1); // put a at c 1043 // stack: ..., b, c, a 1044 __ push_ptr(Lscratch); // push b 1045 __ push_ptr(G3_scratch); // push c 1046 // stack: ..., b, c, a, b, c 1047 } 1048 1049 1050 // The spec says that these types can be a mixture of category 1 (1 word) 1051 // types and/or category 2 types (long and doubles) 1052 void TemplateTable::dup2_x2() { 1053 transition(vtos, vtos); 1054 // stack: ..., a, b, c, d 1055 __ load_ptr( 1, Lscratch); // get c 1056 __ load_ptr( 3, Otos_l1); // get a 1057 __ store_ptr(3, Lscratch); // put c at 3 1058 __ store_ptr(1, Otos_l1); // put a at 1 1059 // stack: ..., c, b, a, d 1060 __ load_ptr( 2, G3_scratch); // get b 1061 __ load_ptr( 0, Otos_l1); // get d 1062 __ store_ptr(0, G3_scratch); // put b at 0 1063 __ store_ptr(2, Otos_l1); // put d at 2 1064 // stack: ..., c, d, a, b 1065 __ push_ptr(Lscratch); // push c 1066 __ push_ptr(Otos_l1); // push d 1067 // stack: ..., c, d, a, b, c, d 1068 } 1069 1070 1071 void TemplateTable::swap() { 1072 transition(vtos, vtos); 1073 // stack: ..., a, b 1074 __ load_ptr( 1, G3_scratch); // get a 1075 __ load_ptr( 0, Otos_l1); // get b 1076 __ store_ptr(0, G3_scratch); // put b 1077 __ store_ptr(1, Otos_l1); // put a 1078 // stack: ..., b, a 1079 } 1080 1081 1082 void TemplateTable::iop2(Operation op) { 1083 transition(itos, itos); 1084 __ pop_i(O1); 1085 switch (op) { 1086 case add: __ add(O1, Otos_i, Otos_i); break; 1087 case sub: __ sub(O1, Otos_i, Otos_i); break; 1088 // %%%%% Mul may not exist: better to call .mul? 1089 case mul: __ smul(O1, Otos_i, Otos_i); break; 1090 case _and: __ and3(O1, Otos_i, Otos_i); break; 1091 case _or: __ or3(O1, Otos_i, Otos_i); break; 1092 case _xor: __ xor3(O1, Otos_i, Otos_i); break; 1093 case shl: __ sll(O1, Otos_i, Otos_i); break; 1094 case shr: __ sra(O1, Otos_i, Otos_i); break; 1095 case ushr: __ srl(O1, Otos_i, Otos_i); break; 1096 default: ShouldNotReachHere(); 1097 } 1098 } 1099 1100 1101 void TemplateTable::lop2(Operation op) { 1102 transition(ltos, ltos); 1103 __ pop_l(O2); 1104 switch (op) { 1105 #ifdef _LP64 1106 case add: __ add(O2, Otos_l, Otos_l); break; 1107 case sub: __ sub(O2, Otos_l, Otos_l); break; 1108 case _and: __ and3(O2, Otos_l, Otos_l); break; 1109 case _or: __ or3(O2, Otos_l, Otos_l); break; 1110 case _xor: __ xor3(O2, Otos_l, Otos_l); break; 1111 #else 1112 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break; 1113 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break; 1114 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break; 1115 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break; 1116 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break; 1117 #endif 1118 default: ShouldNotReachHere(); 1119 } 1120 } 1121 1122 1123 void TemplateTable::idiv() { 1124 // %%%%% Later: ForSPARC/V7 call .sdiv library routine, 1125 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. 1126 1127 transition(itos, itos); 1128 __ pop_i(O1); // get 1st op 1129 1130 // Y contains upper 32 bits of result, set it to 0 or all ones 1131 __ wry(G0); 1132 __ mov(~0, G3_scratch); 1133 1134 __ tst(O1); 1135 Label neg; 1136 __ br(Assembler::negative, true, Assembler::pn, neg); 1137 __ delayed()->wry(G3_scratch); 1138 __ bind(neg); 1139 1140 Label ok; 1141 __ tst(Otos_i); 1142 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); 1143 1144 const int min_int = 0x80000000; 1145 Label regular; 1146 __ cmp(Otos_i, -1); 1147 __ br(Assembler::notEqual, false, Assembler::pt, regular); 1148 #ifdef _LP64 1149 // Don't put set in delay slot 1150 // Set will turn into multiple instructions in 64 bit mode 1151 __ delayed()->nop(); 1152 __ set(min_int, G4_scratch); 1153 #else 1154 __ delayed()->set(min_int, G4_scratch); 1155 #endif 1156 Label done; 1157 __ cmp(O1, G4_scratch); 1158 __ br(Assembler::equal, true, Assembler::pt, done); 1159 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) 1160 1161 __ bind(regular); 1162 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! 1163 __ bind(done); 1164 } 1165 1166 1167 void TemplateTable::irem() { 1168 transition(itos, itos); 1169 __ mov(Otos_i, O2); // save divisor 1170 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 1171 __ smul(Otos_i, O2, Otos_i); 1172 __ sub(O1, Otos_i, Otos_i); 1173 } 1174 1175 1176 void TemplateTable::lmul() { 1177 transition(ltos, ltos); 1178 __ pop_l(O2); 1179 #ifdef _LP64 1180 __ mulx(Otos_l, O2, Otos_l); 1181 #else 1182 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul)); 1183 #endif 1184 1185 } 1186 1187 1188 void TemplateTable::ldiv() { 1189 transition(ltos, ltos); 1190 1191 // check for zero 1192 __ pop_l(O2); 1193 #ifdef _LP64 1194 __ tst(Otos_l); 1195 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1196 __ sdivx(O2, Otos_l, Otos_l); 1197 #else 1198 __ orcc(Otos_l1, Otos_l2, G0); 1199 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1200 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); 1201 #endif 1202 } 1203 1204 1205 void TemplateTable::lrem() { 1206 transition(ltos, ltos); 1207 1208 // check for zero 1209 __ pop_l(O2); 1210 #ifdef _LP64 1211 __ tst(Otos_l); 1212 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1213 __ sdivx(O2, Otos_l, Otos_l2); 1214 __ mulx (Otos_l2, Otos_l, Otos_l2); 1215 __ sub (O2, Otos_l2, Otos_l); 1216 #else 1217 __ orcc(Otos_l1, Otos_l2, G0); 1218 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); 1219 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); 1220 #endif 1221 } 1222 1223 1224 void TemplateTable::lshl() { 1225 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra 1226 1227 __ pop_l(O2); // shift value in O2, O3 1228 #ifdef _LP64 1229 __ sllx(O2, Otos_i, Otos_l); 1230 #else 1231 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1232 #endif 1233 } 1234 1235 1236 void TemplateTable::lshr() { 1237 transition(itos, ltos); // %%%% see lshl comment 1238 1239 __ pop_l(O2); // shift value in O2, O3 1240 #ifdef _LP64 1241 __ srax(O2, Otos_i, Otos_l); 1242 #else 1243 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1244 #endif 1245 } 1246 1247 1248 1249 void TemplateTable::lushr() { 1250 transition(itos, ltos); // %%%% see lshl comment 1251 1252 __ pop_l(O2); // shift value in O2, O3 1253 #ifdef _LP64 1254 __ srlx(O2, Otos_i, Otos_l); 1255 #else 1256 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); 1257 #endif 1258 } 1259 1260 1261 void TemplateTable::fop2(Operation op) { 1262 transition(ftos, ftos); 1263 switch (op) { 1264 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1265 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1266 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1267 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; 1268 case rem: 1269 assert(Ftos_f == F0, "just checking"); 1270 #ifdef _LP64 1271 // LP64 calling conventions use F1, F3 for passing 2 floats 1272 __ pop_f(F1); 1273 __ fmov(FloatRegisterImpl::S, Ftos_f, F3); 1274 #else 1275 __ pop_i(O0); 1276 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp); 1277 __ ld( __ d_tmp, O1 ); 1278 #endif 1279 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1280 assert( Ftos_f == F0, "fix this code" ); 1281 break; 1282 1283 default: ShouldNotReachHere(); 1284 } 1285 } 1286 1287 1288 void TemplateTable::dop2(Operation op) { 1289 transition(dtos, dtos); 1290 switch (op) { 1291 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1292 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1293 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1294 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; 1295 case rem: 1296 #ifdef _LP64 1297 // Pass arguments in D0, D2 1298 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); 1299 __ pop_d( F0 ); 1300 #else 1301 // Pass arguments in O0O1, O2O3 1302 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1303 __ ldd( __ d_tmp, O2 ); 1304 __ pop_d(Ftos_f); 1305 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); 1306 __ ldd( __ d_tmp, O0 ); 1307 #endif 1308 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1309 assert( Ftos_d == F0, "fix this code" ); 1310 break; 1311 1312 default: ShouldNotReachHere(); 1313 } 1314 } 1315 1316 1317 void TemplateTable::ineg() { 1318 transition(itos, itos); 1319 __ neg(Otos_i); 1320 } 1321 1322 1323 void TemplateTable::lneg() { 1324 transition(ltos, ltos); 1325 #ifdef _LP64 1326 __ sub(G0, Otos_l, Otos_l); 1327 #else 1328 __ lneg(Otos_l1, Otos_l2); 1329 #endif 1330 } 1331 1332 1333 void TemplateTable::fneg() { 1334 transition(ftos, ftos); 1335 __ fneg(FloatRegisterImpl::S, Ftos_f); 1336 } 1337 1338 1339 void TemplateTable::dneg() { 1340 transition(dtos, dtos); 1341 // v8 has fnegd if source and dest are the same 1342 __ fneg(FloatRegisterImpl::D, Ftos_f); 1343 } 1344 1345 1346 void TemplateTable::iinc() { 1347 transition(vtos, vtos); 1348 locals_index(G3_scratch); 1349 __ ldsb(Lbcp, 2, O2); // load constant 1350 __ access_local_int(G3_scratch, Otos_i); 1351 __ add(Otos_i, O2, Otos_i); 1352 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1353 } 1354 1355 1356 void TemplateTable::wide_iinc() { 1357 transition(vtos, vtos); 1358 locals_index_wide(G3_scratch); 1359 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); 1360 __ access_local_int(G3_scratch, Otos_i); 1361 __ add(Otos_i, O3, Otos_i); 1362 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch 1363 } 1364 1365 1366 void TemplateTable::convert() { 1367 // %%%%% Factor this first part accross platforms 1368 #ifdef ASSERT 1369 TosState tos_in = ilgl; 1370 TosState tos_out = ilgl; 1371 switch (bytecode()) { 1372 case Bytecodes::_i2l: // fall through 1373 case Bytecodes::_i2f: // fall through 1374 case Bytecodes::_i2d: // fall through 1375 case Bytecodes::_i2b: // fall through 1376 case Bytecodes::_i2c: // fall through 1377 case Bytecodes::_i2s: tos_in = itos; break; 1378 case Bytecodes::_l2i: // fall through 1379 case Bytecodes::_l2f: // fall through 1380 case Bytecodes::_l2d: tos_in = ltos; break; 1381 case Bytecodes::_f2i: // fall through 1382 case Bytecodes::_f2l: // fall through 1383 case Bytecodes::_f2d: tos_in = ftos; break; 1384 case Bytecodes::_d2i: // fall through 1385 case Bytecodes::_d2l: // fall through 1386 case Bytecodes::_d2f: tos_in = dtos; break; 1387 default : ShouldNotReachHere(); 1388 } 1389 switch (bytecode()) { 1390 case Bytecodes::_l2i: // fall through 1391 case Bytecodes::_f2i: // fall through 1392 case Bytecodes::_d2i: // fall through 1393 case Bytecodes::_i2b: // fall through 1394 case Bytecodes::_i2c: // fall through 1395 case Bytecodes::_i2s: tos_out = itos; break; 1396 case Bytecodes::_i2l: // fall through 1397 case Bytecodes::_f2l: // fall through 1398 case Bytecodes::_d2l: tos_out = ltos; break; 1399 case Bytecodes::_i2f: // fall through 1400 case Bytecodes::_l2f: // fall through 1401 case Bytecodes::_d2f: tos_out = ftos; break; 1402 case Bytecodes::_i2d: // fall through 1403 case Bytecodes::_l2d: // fall through 1404 case Bytecodes::_f2d: tos_out = dtos; break; 1405 default : ShouldNotReachHere(); 1406 } 1407 transition(tos_in, tos_out); 1408 #endif 1409 1410 1411 // Conversion 1412 Label done; 1413 switch (bytecode()) { 1414 case Bytecodes::_i2l: 1415 #ifdef _LP64 1416 // Sign extend the 32 bits 1417 __ sra ( Otos_i, 0, Otos_l ); 1418 #else 1419 __ addcc(Otos_i, 0, Otos_l2); 1420 __ br(Assembler::greaterEqual, true, Assembler::pt, done); 1421 __ delayed()->clr(Otos_l1); 1422 __ set(~0, Otos_l1); 1423 #endif 1424 break; 1425 1426 case Bytecodes::_i2f: 1427 __ st(Otos_i, __ d_tmp ); 1428 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1429 __ fitof(FloatRegisterImpl::S, F0, Ftos_f); 1430 break; 1431 1432 case Bytecodes::_i2d: 1433 __ st(Otos_i, __ d_tmp); 1434 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); 1435 __ fitof(FloatRegisterImpl::D, F0, Ftos_f); 1436 break; 1437 1438 case Bytecodes::_i2b: 1439 __ sll(Otos_i, 24, Otos_i); 1440 __ sra(Otos_i, 24, Otos_i); 1441 break; 1442 1443 case Bytecodes::_i2c: 1444 __ sll(Otos_i, 16, Otos_i); 1445 __ srl(Otos_i, 16, Otos_i); 1446 break; 1447 1448 case Bytecodes::_i2s: 1449 __ sll(Otos_i, 16, Otos_i); 1450 __ sra(Otos_i, 16, Otos_i); 1451 break; 1452 1453 case Bytecodes::_l2i: 1454 #ifndef _LP64 1455 __ mov(Otos_l2, Otos_i); 1456 #else 1457 // Sign-extend into the high 32 bits 1458 __ sra(Otos_l, 0, Otos_i); 1459 #endif 1460 break; 1461 1462 case Bytecodes::_l2f: 1463 case Bytecodes::_l2d: 1464 __ st_long(Otos_l, __ d_tmp); 1465 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 1466 1467 if (VM_Version::v9_instructions_work()) { 1468 if (bytecode() == Bytecodes::_l2f) { 1469 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 1470 } else { 1471 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 1472 } 1473 } else { 1474 __ call_VM_leaf( 1475 Lscratch, 1476 bytecode() == Bytecodes::_l2f 1477 ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f) 1478 : CAST_FROM_FN_PTR(address, SharedRuntime::l2d) 1479 ); 1480 } 1481 break; 1482 1483 case Bytecodes::_f2i: { 1484 Label isNaN; 1485 // result must be 0 if value is NaN; test by comparing value to itself 1486 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 1487 // According to the v8 manual, you have to have a non-fp instruction 1488 // between fcmp and fb. 1489 if (!VM_Version::v9_instructions_work()) { 1490 __ nop(); 1491 } 1492 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 1493 __ delayed()->clr(Otos_i); // NaN 1494 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 1495 __ stf(FloatRegisterImpl::S, F30, __ d_tmp); 1496 __ ld(__ d_tmp, Otos_i); 1497 __ bind(isNaN); 1498 } 1499 break; 1500 1501 case Bytecodes::_f2l: 1502 // must uncache tos 1503 __ push_f(); 1504 #ifdef _LP64 1505 __ pop_f(F1); 1506 #else 1507 __ pop_i(O0); 1508 #endif 1509 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); 1510 break; 1511 1512 case Bytecodes::_f2d: 1513 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); 1514 break; 1515 1516 case Bytecodes::_d2i: 1517 case Bytecodes::_d2l: 1518 // must uncache tos 1519 __ push_d(); 1520 #ifdef _LP64 1521 // LP64 calling conventions pass first double arg in D0 1522 __ pop_d( Ftos_d ); 1523 #else 1524 __ pop_i( O0 ); 1525 __ pop_i( O1 ); 1526 #endif 1527 __ call_VM_leaf(Lscratch, 1528 bytecode() == Bytecodes::_d2i 1529 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) 1530 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1531 break; 1532 1533 case Bytecodes::_d2f: 1534 if (VM_Version::v9_instructions_work()) { 1535 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 1536 } 1537 else { 1538 // must uncache tos 1539 __ push_d(); 1540 __ pop_i(O0); 1541 __ pop_i(O1); 1542 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f)); 1543 } 1544 break; 1545 1546 default: ShouldNotReachHere(); 1547 } 1548 __ bind(done); 1549 } 1550 1551 1552 void TemplateTable::lcmp() { 1553 transition(ltos, itos); 1554 1555 #ifdef _LP64 1556 __ pop_l(O1); // pop off value 1, value 2 is in O0 1557 __ lcmp( O1, Otos_l, Otos_i ); 1558 #else 1559 __ pop_l(O2); // cmp O2,3 to O0,1 1560 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i ); 1561 #endif 1562 } 1563 1564 1565 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1566 1567 if (is_float) __ pop_f(F2); 1568 else __ pop_d(F2); 1569 1570 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); 1571 1572 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); 1573 } 1574 1575 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1576 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1577 __ verify_thread(); 1578 1579 const Register O2_bumped_count = O2; 1580 __ profile_taken_branch(G3_scratch, O2_bumped_count); 1581 1582 // get (wide) offset to O1_disp 1583 const Register O1_disp = O1; 1584 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); 1585 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); 1586 1587 // Handle all the JSR stuff here, then exit. 1588 // It's much shorter and cleaner than intermingling with the 1589 // non-JSR normal-branch stuff occurring below. 1590 if( is_jsr ) { 1591 // compute return address as bci in Otos_i 1592 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1593 __ sub(Lbcp, G3_scratch, G3_scratch); 1594 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i); 1595 1596 // Bump Lbcp to target of JSR 1597 __ add(Lbcp, O1_disp, Lbcp); 1598 // Push returnAddress for "ret" on stack 1599 __ push_ptr(Otos_i); 1600 // And away we go! 1601 __ dispatch_next(vtos); 1602 return; 1603 } 1604 1605 // Normal (non-jsr) branch handling 1606 1607 // Save the current Lbcp 1608 const Register O0_cur_bcp = O0; 1609 __ mov( Lbcp, O0_cur_bcp ); 1610 1611 1612 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1613 if ( increment_invocation_counter_for_backward_branches ) { 1614 Label Lforward; 1615 // check branch direction 1616 __ br( Assembler::positive, false, Assembler::pn, Lforward ); 1617 // Bump bytecode pointer by displacement (take the branch) 1618 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1619 1620 if (TieredCompilation) { 1621 Label Lno_mdo, Loverflow; 1622 int increment = InvocationCounter::count_increment; 1623 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1624 if (ProfileInterpreter) { 1625 // If no method data exists, go to profile_continue. 1626 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); 1627 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); 1628 1629 // Increment backedge counter in the MDO 1630 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1631 in_bytes(InvocationCounter::counter_offset())); 1632 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch, 1633 Assembler::notZero, &Lforward); 1634 __ ba_short(Loverflow); 1635 } 1636 1637 // If there's no MDO, increment counter in Method* 1638 __ bind(Lno_mdo); 1639 Address backedge_counter(Lmethod, in_bytes(Method::backedge_counter_offset()) + 1640 in_bytes(InvocationCounter::counter_offset())); 1641 __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch, 1642 Assembler::notZero, &Lforward); 1643 __ bind(Loverflow); 1644 1645 // notify point for loop, pass branch bytecode 1646 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp); 1647 1648 // Was an OSR adapter generated? 1649 // O0 = osr nmethod 1650 __ br_null_short(O0, Assembler::pn, Lforward); 1651 1652 // Has the nmethod been invalidated already? 1653 __ ld(O0, nmethod::entry_bci_offset(), O2); 1654 __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward); 1655 1656 // migrate the interpreter frame off of the stack 1657 1658 __ mov(G2_thread, L7); 1659 // save nmethod 1660 __ mov(O0, L6); 1661 __ set_last_Java_frame(SP, noreg); 1662 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 1663 __ reset_last_Java_frame(); 1664 __ mov(L7, G2_thread); 1665 1666 // move OSR nmethod to I1 1667 __ mov(L6, I1); 1668 1669 // OSR buffer to I0 1670 __ mov(O0, I0); 1671 1672 // remove the interpreter frame 1673 __ restore(I5_savedSP, 0, SP); 1674 1675 // Jump to the osr code. 1676 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 1677 __ jmp(O2, G0); 1678 __ delayed()->nop(); 1679 1680 } else { 1681 // Update Backedge branch separately from invocations 1682 const Register G4_invoke_ctr = G4; 1683 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); 1684 if (ProfileInterpreter) { 1685 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); 1686 if (UseOnStackReplacement) { 1687 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); 1688 } 1689 } else { 1690 if (UseOnStackReplacement) { 1691 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); 1692 } 1693 } 1694 } 1695 1696 __ bind(Lforward); 1697 } else 1698 // Bump bytecode pointer by displacement (take the branch) 1699 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr 1700 1701 // continue with bytecode @ target 1702 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1703 // %%%%% and changing dispatch_next to dispatch_only 1704 __ dispatch_next(vtos); 1705 } 1706 1707 1708 // Note Condition in argument is TemplateTable::Condition 1709 // arg scope is within class scope 1710 1711 void TemplateTable::if_0cmp(Condition cc) { 1712 // no pointers, integer only! 1713 transition(itos, vtos); 1714 // assume branch is more often taken than not (loops use backward branches) 1715 __ cmp( Otos_i, 0); 1716 __ if_cmp(ccNot(cc), false); 1717 } 1718 1719 1720 void TemplateTable::if_icmp(Condition cc) { 1721 transition(itos, vtos); 1722 __ pop_i(O1); 1723 __ cmp(O1, Otos_i); 1724 __ if_cmp(ccNot(cc), false); 1725 } 1726 1727 1728 void TemplateTable::if_nullcmp(Condition cc) { 1729 transition(atos, vtos); 1730 __ tst(Otos_i); 1731 __ if_cmp(ccNot(cc), true); 1732 } 1733 1734 1735 void TemplateTable::if_acmp(Condition cc) { 1736 transition(atos, vtos); 1737 __ pop_ptr(O1); 1738 __ verify_oop(O1); 1739 __ verify_oop(Otos_i); 1740 __ cmp(O1, Otos_i); 1741 __ if_cmp(ccNot(cc), true); 1742 } 1743 1744 1745 1746 void TemplateTable::ret() { 1747 transition(vtos, vtos); 1748 locals_index(G3_scratch); 1749 __ access_local_returnAddress(G3_scratch, Otos_i); 1750 // Otos_i contains the bci, compute the bcp from that 1751 1752 #ifdef _LP64 1753 #ifdef ASSERT 1754 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC 1755 // the result. The return address (really a BCI) was stored with an 1756 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in 1757 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit 1758 // loaded value. 1759 { Label zzz ; 1760 __ set (65536, G3_scratch) ; 1761 __ cmp (Otos_i, G3_scratch) ; 1762 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); 1763 __ delayed()->nop(); 1764 __ stop("BCI is in the wrong register half?"); 1765 __ bind (zzz) ; 1766 } 1767 #endif 1768 #endif 1769 1770 __ profile_ret(vtos, Otos_i, G4_scratch); 1771 1772 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1773 __ add(G3_scratch, Otos_i, G3_scratch); 1774 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1775 __ dispatch_next(vtos); 1776 } 1777 1778 1779 void TemplateTable::wide_ret() { 1780 transition(vtos, vtos); 1781 locals_index_wide(G3_scratch); 1782 __ access_local_returnAddress(G3_scratch, Otos_i); 1783 // Otos_i contains the bci, compute the bcp from that 1784 1785 __ profile_ret(vtos, Otos_i, G4_scratch); 1786 1787 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); 1788 __ add(G3_scratch, Otos_i, G3_scratch); 1789 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); 1790 __ dispatch_next(vtos); 1791 } 1792 1793 1794 void TemplateTable::tableswitch() { 1795 transition(itos, vtos); 1796 Label default_case, continue_execution; 1797 1798 // align bcp 1799 __ add(Lbcp, BytesPerInt, O1); 1800 __ and3(O1, -BytesPerInt, O1); 1801 // load lo, hi 1802 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte 1803 __ ld(O1, 2 * BytesPerInt, O3); // High Byte 1804 #ifdef _LP64 1805 // Sign extend the 32 bits 1806 __ sra ( Otos_i, 0, Otos_i ); 1807 #endif /* _LP64 */ 1808 1809 // check against lo & hi 1810 __ cmp( Otos_i, O2); 1811 __ br( Assembler::less, false, Assembler::pn, default_case); 1812 __ delayed()->cmp( Otos_i, O3 ); 1813 __ br( Assembler::greater, false, Assembler::pn, default_case); 1814 // lookup dispatch offset 1815 __ delayed()->sub(Otos_i, O2, O2); 1816 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); 1817 __ sll(O2, LogBytesPerInt, O2); 1818 __ add(O2, 3 * BytesPerInt, O2); 1819 __ ba(continue_execution); 1820 __ delayed()->ld(O1, O2, O2); 1821 // handle default 1822 __ bind(default_case); 1823 __ profile_switch_default(O3); 1824 __ ld(O1, 0, O2); // get default offset 1825 // continue execution 1826 __ bind(continue_execution); 1827 __ add(Lbcp, O2, Lbcp); 1828 __ dispatch_next(vtos); 1829 } 1830 1831 1832 void TemplateTable::lookupswitch() { 1833 transition(itos, itos); 1834 __ stop("lookupswitch bytecode should have been rewritten"); 1835 } 1836 1837 void TemplateTable::fast_linearswitch() { 1838 transition(itos, vtos); 1839 Label loop_entry, loop, found, continue_execution; 1840 // align bcp 1841 __ add(Lbcp, BytesPerInt, O1); 1842 __ and3(O1, -BytesPerInt, O1); 1843 // set counter 1844 __ ld(O1, BytesPerInt, O2); 1845 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs 1846 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr 1847 __ ba(loop_entry); 1848 __ delayed()->add(O3, O2, O2); // counter now points past last pair 1849 1850 // table search 1851 __ bind(loop); 1852 __ cmp(O4, Otos_i); 1853 __ br(Assembler::equal, true, Assembler::pn, found); 1854 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 1855 __ inc(O3, 2 * BytesPerInt); 1856 1857 __ bind(loop_entry); 1858 __ cmp(O2, O3); 1859 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); 1860 __ delayed()->ld(O3, 0, O4); 1861 1862 // default case 1863 __ ld(O1, 0, O4); // get default offset 1864 if (ProfileInterpreter) { 1865 __ profile_switch_default(O3); 1866 __ ba_short(continue_execution); 1867 } 1868 1869 // entry found -> get offset 1870 __ bind(found); 1871 if (ProfileInterpreter) { 1872 __ sub(O3, O1, O3); 1873 __ sub(O3, 2*BytesPerInt, O3); 1874 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs 1875 __ profile_switch_case(O3, O1, O2, G3_scratch); 1876 1877 __ bind(continue_execution); 1878 } 1879 __ add(Lbcp, O4, Lbcp); 1880 __ dispatch_next(vtos); 1881 } 1882 1883 1884 void TemplateTable::fast_binaryswitch() { 1885 transition(itos, vtos); 1886 // Implementation using the following core algorithm: (copied from Intel) 1887 // 1888 // int binary_search(int key, LookupswitchPair* array, int n) { 1889 // // Binary search according to "Methodik des Programmierens" by 1890 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1891 // int i = 0; 1892 // int j = n; 1893 // while (i+1 < j) { 1894 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1895 // // with Q: for all i: 0 <= i < n: key < a[i] 1896 // // where a stands for the array and assuming that the (inexisting) 1897 // // element a[n] is infinitely big. 1898 // int h = (i + j) >> 1; 1899 // // i < h < j 1900 // if (key < array[h].fast_match()) { 1901 // j = h; 1902 // } else { 1903 // i = h; 1904 // } 1905 // } 1906 // // R: a[i] <= key < a[i+1] or Q 1907 // // (i.e., if key is within array, i is the correct index) 1908 // return i; 1909 // } 1910 1911 // register allocation 1912 assert(Otos_i == O0, "alias checking"); 1913 const Register Rkey = Otos_i; // already set (tosca) 1914 const Register Rarray = O1; 1915 const Register Ri = O2; 1916 const Register Rj = O3; 1917 const Register Rh = O4; 1918 const Register Rscratch = O5; 1919 1920 const int log_entry_size = 3; 1921 const int entry_size = 1 << log_entry_size; 1922 1923 Label found; 1924 // Find Array start 1925 __ add(Lbcp, 3 * BytesPerInt, Rarray); 1926 __ and3(Rarray, -BytesPerInt, Rarray); 1927 // initialize i & j (in delay slot) 1928 __ clr( Ri ); 1929 1930 // and start 1931 Label entry; 1932 __ ba(entry); 1933 __ delayed()->ld( Rarray, -BytesPerInt, Rj); 1934 // (Rj is already in the native byte-ordering.) 1935 1936 // binary search loop 1937 { Label loop; 1938 __ bind( loop ); 1939 // int h = (i + j) >> 1; 1940 __ sra( Rh, 1, Rh ); 1941 // if (key < array[h].fast_match()) { 1942 // j = h; 1943 // } else { 1944 // i = h; 1945 // } 1946 __ sll( Rh, log_entry_size, Rscratch ); 1947 __ ld( Rarray, Rscratch, Rscratch ); 1948 // (Rscratch is already in the native byte-ordering.) 1949 __ cmp( Rkey, Rscratch ); 1950 if ( VM_Version::v9_instructions_work() ) { 1951 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 1952 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 1953 } 1954 else { 1955 Label end_of_if; 1956 __ br( Assembler::less, true, Assembler::pt, end_of_if ); 1957 __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh 1958 __ mov( Rh, Ri ); // else i = h 1959 __ bind(end_of_if); // } 1960 } 1961 1962 // while (i+1 < j) 1963 __ bind( entry ); 1964 __ add( Ri, 1, Rscratch ); 1965 __ cmp(Rscratch, Rj); 1966 __ br( Assembler::less, true, Assembler::pt, loop ); 1967 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; 1968 } 1969 1970 // end of binary search, result index is i (must check again!) 1971 Label default_case; 1972 Label continue_execution; 1973 if (ProfileInterpreter) { 1974 __ mov( Ri, Rh ); // Save index in i for profiling 1975 } 1976 __ sll( Ri, log_entry_size, Ri ); 1977 __ ld( Rarray, Ri, Rscratch ); 1978 // (Rscratch is already in the native byte-ordering.) 1979 __ cmp( Rkey, Rscratch ); 1980 __ br( Assembler::notEqual, true, Assembler::pn, default_case ); 1981 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j 1982 1983 // entry found -> j = offset 1984 __ inc( Ri, BytesPerInt ); 1985 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 1986 __ ld( Rarray, Ri, Rj ); 1987 // (Rj is already in the native byte-ordering.) 1988 1989 if (ProfileInterpreter) { 1990 __ ba_short(continue_execution); 1991 } 1992 1993 __ bind(default_case); // fall through (if not profiling) 1994 __ profile_switch_default(Ri); 1995 1996 __ bind(continue_execution); 1997 __ add( Lbcp, Rj, Lbcp ); 1998 __ dispatch_next( vtos ); 1999 } 2000 2001 2002 void TemplateTable::_return(TosState state) { 2003 transition(state, state); 2004 assert(_desc->calls_vm(), "inconsistent calls_vm information"); 2005 2006 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2007 assert(state == vtos, "only valid state"); 2008 __ mov(G0, G3_scratch); 2009 __ access_local_ptr(G3_scratch, Otos_i); 2010 __ load_klass(Otos_i, O2); 2011 __ set(JVM_ACC_HAS_FINALIZER, G3); 2012 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2); 2013 __ andcc(G3, O2, G0); 2014 Label skip_register_finalizer; 2015 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); 2016 __ delayed()->nop(); 2017 2018 // Call out to do finalizer registration 2019 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); 2020 2021 __ bind(skip_register_finalizer); 2022 } 2023 2024 __ remove_activation(state, /* throw_monitor_exception */ true); 2025 2026 // The caller's SP was adjusted upon method entry to accomodate 2027 // the callee's non-argument locals. Undo that adjustment. 2028 __ ret(); // return to caller 2029 __ delayed()->restore(I5_savedSP, G0, SP); 2030 } 2031 2032 2033 // ---------------------------------------------------------------------------- 2034 // Volatile variables demand their effects be made known to all CPU's in 2035 // order. Store buffers on most chips allow reads & writes to reorder; the 2036 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 2037 // memory barrier (i.e., it's not sufficient that the interpreter does not 2038 // reorder volatile references, the hardware also must not reorder them). 2039 // 2040 // According to the new Java Memory Model (JMM): 2041 // (1) All volatiles are serialized wrt to each other. 2042 // ALSO reads & writes act as aquire & release, so: 2043 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 2044 // the read float up to before the read. It's OK for non-volatile memory refs 2045 // that happen before the volatile read to float down below it. 2046 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 2047 // that happen BEFORE the write float down to after the write. It's OK for 2048 // non-volatile memory refs that happen after the volatile write to float up 2049 // before it. 2050 // 2051 // We only put in barriers around volatile refs (they are expensive), not 2052 // _between_ memory refs (that would require us to track the flavor of the 2053 // previous memory refs). Requirements (2) and (3) require some barriers 2054 // before volatile stores and after volatile loads. These nearly cover 2055 // requirement (1) but miss the volatile-store-volatile-load case. This final 2056 // case is placed after volatile-stores although it could just as well go 2057 // before volatile-loads. 2058 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { 2059 // Helper function to insert a is-volatile test and memory barrier 2060 // All current sparc implementations run in TSO, needing only StoreLoad 2061 if ((order_constraint & Assembler::StoreLoad) == 0) return; 2062 __ membar( order_constraint ); 2063 } 2064 2065 // ---------------------------------------------------------------------------- 2066 void TemplateTable::resolve_cache_and_index(int byte_no, 2067 Register Rcache, 2068 Register index, 2069 size_t index_size) { 2070 // Depends on cpCacheOop layout! 2071 Label resolved; 2072 2073 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2074 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); 2075 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode? 2076 __ br(Assembler::equal, false, Assembler::pt, resolved); 2077 __ delayed()->set((int)bytecode(), O1); 2078 2079 address entry; 2080 switch (bytecode()) { 2081 case Bytecodes::_getstatic : // fall through 2082 case Bytecodes::_putstatic : // fall through 2083 case Bytecodes::_getfield : // fall through 2084 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2085 case Bytecodes::_invokevirtual : // fall through 2086 case Bytecodes::_invokespecial : // fall through 2087 case Bytecodes::_invokestatic : // fall through 2088 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2089 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2090 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2091 default: 2092 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); 2093 break; 2094 } 2095 // first time invocation - must resolve first 2096 __ call_VM(noreg, entry, O1); 2097 // Update registers with resolved info 2098 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); 2099 __ bind(resolved); 2100 } 2101 2102 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2103 Register method, 2104 Register itable_index, 2105 Register flags, 2106 bool is_invokevirtual, 2107 bool is_invokevfinal, 2108 bool is_invokedynamic) { 2109 // Uses both G3_scratch and G4_scratch 2110 Register cache = G3_scratch; 2111 Register index = G4_scratch; 2112 assert_different_registers(cache, method, itable_index); 2113 2114 // determine constant pool cache field offsets 2115 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2116 const int method_offset = in_bytes( 2117 ConstantPoolCache::base_offset() + 2118 ((byte_no == f2_byte) 2119 ? ConstantPoolCacheEntry::f2_offset() 2120 : ConstantPoolCacheEntry::f1_offset() 2121 ) 2122 ); 2123 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + 2124 ConstantPoolCacheEntry::flags_offset()); 2125 // access constant pool cache fields 2126 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + 2127 ConstantPoolCacheEntry::f2_offset()); 2128 2129 if (is_invokevfinal) { 2130 __ get_cache_and_index_at_bcp(cache, index, 1); 2131 __ ld_ptr(Address(cache, method_offset), method); 2132 } else { 2133 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2134 resolve_cache_and_index(byte_no, cache, index, index_size); 2135 __ ld_ptr(Address(cache, method_offset), method); 2136 } 2137 2138 if (itable_index != noreg) { 2139 // pick up itable or appendix index from f2 also: 2140 __ ld_ptr(Address(cache, index_offset), itable_index); 2141 } 2142 __ ld_ptr(Address(cache, flags_offset), flags); 2143 } 2144 2145 // The Rcache register must be set before call 2146 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2147 Register Rcache, 2148 Register index, 2149 Register Roffset, 2150 Register Rflags, 2151 bool is_static) { 2152 assert_different_registers(Rcache, Rflags, Roffset); 2153 2154 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2155 2156 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2157 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2158 if (is_static) { 2159 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); 2160 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 2161 __ ld_ptr( Robj, mirror_offset, Robj); 2162 } 2163 } 2164 2165 // The registers Rcache and index expected to be set before call. 2166 // Correct values of the Rcache and index registers are preserved. 2167 void TemplateTable::jvmti_post_field_access(Register Rcache, 2168 Register index, 2169 bool is_static, 2170 bool has_tos) { 2171 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2172 2173 if (JvmtiExport::can_post_field_access()) { 2174 // Check to see if a field access watch has been set before we take 2175 // the time to call into the VM. 2176 Label Label1; 2177 assert_different_registers(Rcache, index, G1_scratch); 2178 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); 2179 __ load_contents(get_field_access_count_addr, G1_scratch); 2180 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); 2181 2182 __ add(Rcache, in_bytes(cp_base_offset), Rcache); 2183 2184 if (is_static) { 2185 __ clr(Otos_i); 2186 } else { 2187 if (has_tos) { 2188 // save object pointer before call_VM() clobbers it 2189 __ push_ptr(Otos_i); // put object on tos where GC wants it. 2190 } else { 2191 // Load top of stack (do not pop the value off the stack); 2192 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); 2193 } 2194 __ verify_oop(Otos_i); 2195 } 2196 // Otos_i: object pointer or NULL if static 2197 // Rcache: cache entry pointer 2198 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), 2199 Otos_i, Rcache); 2200 if (!is_static && has_tos) { 2201 __ pop_ptr(Otos_i); // restore object pointer 2202 __ verify_oop(Otos_i); 2203 } 2204 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2205 __ bind(Label1); 2206 } 2207 } 2208 2209 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2210 transition(vtos, vtos); 2211 2212 Register Rcache = G3_scratch; 2213 Register index = G4_scratch; 2214 Register Rclass = Rcache; 2215 Register Roffset= G4_scratch; 2216 Register Rflags = G1_scratch; 2217 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2218 2219 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2220 jvmti_post_field_access(Rcache, index, is_static, false); 2221 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2222 2223 if (!is_static) { 2224 pop_and_check_object(Rclass); 2225 } else { 2226 __ verify_oop(Rclass); 2227 } 2228 2229 Label exit; 2230 2231 Assembler::Membar_mask_bits membar_bits = 2232 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2233 2234 if (__ membar_has_effect(membar_bits)) { 2235 // Get volatile flag 2236 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2237 __ and3(Rflags, Lscratch, Lscratch); 2238 } 2239 2240 Label checkVolatile; 2241 2242 // compute field type 2243 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj; 2244 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2245 // Make sure we don't need to mask Rflags after the above shift 2246 ConstantPoolCacheEntry::verify_tos_state_shift(); 2247 2248 // Check atos before itos for getstatic, more likely (in Queens at least) 2249 __ cmp(Rflags, atos); 2250 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2251 __ delayed() ->cmp(Rflags, itos); 2252 2253 // atos 2254 __ load_heap_oop(Rclass, Roffset, Otos_i); 2255 __ verify_oop(Otos_i); 2256 __ push(atos); 2257 if (!is_static) { 2258 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); 2259 } 2260 __ ba(checkVolatile); 2261 __ delayed()->tst(Lscratch); 2262 2263 __ bind(notObj); 2264 2265 // cmp(Rflags, itos); 2266 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2267 __ delayed() ->cmp(Rflags, ltos); 2268 2269 // itos 2270 __ ld(Rclass, Roffset, Otos_i); 2271 __ push(itos); 2272 if (!is_static) { 2273 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); 2274 } 2275 __ ba(checkVolatile); 2276 __ delayed()->tst(Lscratch); 2277 2278 __ bind(notInt); 2279 2280 // cmp(Rflags, ltos); 2281 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2282 __ delayed() ->cmp(Rflags, btos); 2283 2284 // ltos 2285 // load must be atomic 2286 __ ld_long(Rclass, Roffset, Otos_l); 2287 __ push(ltos); 2288 if (!is_static) { 2289 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); 2290 } 2291 __ ba(checkVolatile); 2292 __ delayed()->tst(Lscratch); 2293 2294 __ bind(notLong); 2295 2296 // cmp(Rflags, btos); 2297 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2298 __ delayed() ->cmp(Rflags, ctos); 2299 2300 // btos 2301 __ ldsb(Rclass, Roffset, Otos_i); 2302 __ push(itos); 2303 if (!is_static) { 2304 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); 2305 } 2306 __ ba(checkVolatile); 2307 __ delayed()->tst(Lscratch); 2308 2309 __ bind(notByte); 2310 2311 // cmp(Rflags, ctos); 2312 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2313 __ delayed() ->cmp(Rflags, stos); 2314 2315 // ctos 2316 __ lduh(Rclass, Roffset, Otos_i); 2317 __ push(itos); 2318 if (!is_static) { 2319 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); 2320 } 2321 __ ba(checkVolatile); 2322 __ delayed()->tst(Lscratch); 2323 2324 __ bind(notChar); 2325 2326 // cmp(Rflags, stos); 2327 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2328 __ delayed() ->cmp(Rflags, ftos); 2329 2330 // stos 2331 __ ldsh(Rclass, Roffset, Otos_i); 2332 __ push(itos); 2333 if (!is_static) { 2334 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); 2335 } 2336 __ ba(checkVolatile); 2337 __ delayed()->tst(Lscratch); 2338 2339 __ bind(notShort); 2340 2341 2342 // cmp(Rflags, ftos); 2343 __ br(Assembler::notEqual, false, Assembler::pt, notFloat); 2344 __ delayed() ->tst(Lscratch); 2345 2346 // ftos 2347 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); 2348 __ push(ftos); 2349 if (!is_static) { 2350 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); 2351 } 2352 __ ba(checkVolatile); 2353 __ delayed()->tst(Lscratch); 2354 2355 __ bind(notFloat); 2356 2357 2358 // dtos 2359 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); 2360 __ push(dtos); 2361 if (!is_static) { 2362 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); 2363 } 2364 2365 __ bind(checkVolatile); 2366 if (__ membar_has_effect(membar_bits)) { 2367 // __ tst(Lscratch); executed in delay slot 2368 __ br(Assembler::zero, false, Assembler::pt, exit); 2369 __ delayed()->nop(); 2370 volatile_barrier(membar_bits); 2371 } 2372 2373 __ bind(exit); 2374 } 2375 2376 2377 void TemplateTable::getfield(int byte_no) { 2378 getfield_or_static(byte_no, false); 2379 } 2380 2381 void TemplateTable::getstatic(int byte_no) { 2382 getfield_or_static(byte_no, true); 2383 } 2384 2385 2386 void TemplateTable::fast_accessfield(TosState state) { 2387 transition(atos, state); 2388 Register Rcache = G3_scratch; 2389 Register index = G4_scratch; 2390 Register Roffset = G4_scratch; 2391 Register Rflags = Rcache; 2392 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2393 2394 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2395 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); 2396 2397 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2398 2399 __ null_check(Otos_i); 2400 __ verify_oop(Otos_i); 2401 2402 Label exit; 2403 2404 Assembler::Membar_mask_bits membar_bits = 2405 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2406 if (__ membar_has_effect(membar_bits)) { 2407 // Get volatile flag 2408 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); 2409 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2410 } 2411 2412 switch (bytecode()) { 2413 case Bytecodes::_fast_bgetfield: 2414 __ ldsb(Otos_i, Roffset, Otos_i); 2415 break; 2416 case Bytecodes::_fast_cgetfield: 2417 __ lduh(Otos_i, Roffset, Otos_i); 2418 break; 2419 case Bytecodes::_fast_sgetfield: 2420 __ ldsh(Otos_i, Roffset, Otos_i); 2421 break; 2422 case Bytecodes::_fast_igetfield: 2423 __ ld(Otos_i, Roffset, Otos_i); 2424 break; 2425 case Bytecodes::_fast_lgetfield: 2426 __ ld_long(Otos_i, Roffset, Otos_l); 2427 break; 2428 case Bytecodes::_fast_fgetfield: 2429 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); 2430 break; 2431 case Bytecodes::_fast_dgetfield: 2432 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); 2433 break; 2434 case Bytecodes::_fast_agetfield: 2435 __ load_heap_oop(Otos_i, Roffset, Otos_i); 2436 break; 2437 default: 2438 ShouldNotReachHere(); 2439 } 2440 2441 if (__ membar_has_effect(membar_bits)) { 2442 __ btst(Lscratch, Rflags); 2443 __ br(Assembler::zero, false, Assembler::pt, exit); 2444 __ delayed()->nop(); 2445 volatile_barrier(membar_bits); 2446 __ bind(exit); 2447 } 2448 2449 if (state == atos) { 2450 __ verify_oop(Otos_i); // does not blow flags! 2451 } 2452 } 2453 2454 void TemplateTable::jvmti_post_fast_field_mod() { 2455 if (JvmtiExport::can_post_field_modification()) { 2456 // Check to see if a field modification watch has been set before we take 2457 // the time to call into the VM. 2458 Label done; 2459 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2460 __ load_contents(get_field_modification_count_addr, G4_scratch); 2461 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); 2462 __ pop_ptr(G4_scratch); // copy the object pointer from tos 2463 __ verify_oop(G4_scratch); 2464 __ push_ptr(G4_scratch); // put the object pointer back on tos 2465 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); 2466 // Save tos values before call_VM() clobbers them. Since we have 2467 // to do it for every data type, we use the saved values as the 2468 // jvalue object. 2469 switch (bytecode()) { // save tos values before call_VM() clobbers them 2470 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; 2471 case Bytecodes::_fast_bputfield: // fall through 2472 case Bytecodes::_fast_sputfield: // fall through 2473 case Bytecodes::_fast_cputfield: // fall through 2474 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; 2475 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; 2476 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; 2477 // get words in right order for use as jvalue object 2478 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; 2479 } 2480 // setup pointer to jvalue object 2481 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); 2482 // G4_scratch: object pointer 2483 // G1_scratch: cache entry pointer 2484 // G3_scratch: jvalue object on the stack 2485 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); 2486 switch (bytecode()) { // restore tos values 2487 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; 2488 case Bytecodes::_fast_bputfield: // fall through 2489 case Bytecodes::_fast_sputfield: // fall through 2490 case Bytecodes::_fast_cputfield: // fall through 2491 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; 2492 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; 2493 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; 2494 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; 2495 } 2496 __ bind(done); 2497 } 2498 } 2499 2500 // The registers Rcache and index expected to be set before call. 2501 // The function may destroy various registers, just not the Rcache and index registers. 2502 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { 2503 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2504 2505 if (JvmtiExport::can_post_field_modification()) { 2506 // Check to see if a field modification watch has been set before we take 2507 // the time to call into the VM. 2508 Label Label1; 2509 assert_different_registers(Rcache, index, G1_scratch); 2510 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); 2511 __ load_contents(get_field_modification_count_addr, G1_scratch); 2512 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); 2513 2514 // The Rcache and index registers have been already set. 2515 // This allows to eliminate this call but the Rcache and index 2516 // registers must be correspondingly used after this line. 2517 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); 2518 2519 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); 2520 if (is_static) { 2521 // Life is simple. Null out the object pointer. 2522 __ clr(G4_scratch); 2523 } else { 2524 Register Rflags = G1_scratch; 2525 // Life is harder. The stack holds the value on top, followed by the 2526 // object. We don't know the size of the value, though; it could be 2527 // one or two words depending on its type. As a result, we must find 2528 // the type to determine where the object is. 2529 2530 Label two_word, valsizeknown; 2531 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2532 __ mov(Lesp, G4_scratch); 2533 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2534 // Make sure we don't need to mask Rflags after the above shift 2535 ConstantPoolCacheEntry::verify_tos_state_shift(); 2536 __ cmp(Rflags, ltos); 2537 __ br(Assembler::equal, false, Assembler::pt, two_word); 2538 __ delayed()->cmp(Rflags, dtos); 2539 __ br(Assembler::equal, false, Assembler::pt, two_word); 2540 __ delayed()->nop(); 2541 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); 2542 __ ba_short(valsizeknown); 2543 __ bind(two_word); 2544 2545 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); 2546 2547 __ bind(valsizeknown); 2548 // setup object pointer 2549 __ ld_ptr(G4_scratch, 0, G4_scratch); 2550 __ verify_oop(G4_scratch); 2551 } 2552 // setup pointer to jvalue object 2553 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); 2554 // G4_scratch: object pointer or NULL if static 2555 // G3_scratch: cache entry pointer 2556 // G1_scratch: jvalue object on the stack 2557 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), 2558 G4_scratch, G3_scratch, G1_scratch); 2559 __ get_cache_and_index_at_bcp(Rcache, index, 1); 2560 __ bind(Label1); 2561 } 2562 } 2563 2564 void TemplateTable::pop_and_check_object(Register r) { 2565 __ pop_ptr(r); 2566 __ null_check(r); // for field access must check obj. 2567 __ verify_oop(r); 2568 } 2569 2570 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2571 transition(vtos, vtos); 2572 Register Rcache = G3_scratch; 2573 Register index = G4_scratch; 2574 Register Rclass = Rcache; 2575 Register Roffset= G4_scratch; 2576 Register Rflags = G1_scratch; 2577 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2578 2579 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2)); 2580 jvmti_post_field_mod(Rcache, index, is_static); 2581 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); 2582 2583 Assembler::Membar_mask_bits read_bits = 2584 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2585 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2586 2587 Label notVolatile, checkVolatile, exit; 2588 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2589 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2590 __ and3(Rflags, Lscratch, Lscratch); 2591 2592 if (__ membar_has_effect(read_bits)) { 2593 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2594 volatile_barrier(read_bits); 2595 __ bind(notVolatile); 2596 } 2597 } 2598 2599 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); 2600 // Make sure we don't need to mask Rflags after the above shift 2601 ConstantPoolCacheEntry::verify_tos_state_shift(); 2602 2603 // compute field type 2604 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat; 2605 2606 if (is_static) { 2607 // putstatic with object type most likely, check that first 2608 __ cmp(Rflags, atos); 2609 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2610 __ delayed()->cmp(Rflags, itos); 2611 2612 // atos 2613 { 2614 __ pop_ptr(); 2615 __ verify_oop(Otos_i); 2616 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2617 __ ba(checkVolatile); 2618 __ delayed()->tst(Lscratch); 2619 } 2620 2621 __ bind(notObj); 2622 // cmp(Rflags, itos); 2623 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2624 __ delayed()->cmp(Rflags, btos); 2625 2626 // itos 2627 { 2628 __ pop_i(); 2629 __ st(Otos_i, Rclass, Roffset); 2630 __ ba(checkVolatile); 2631 __ delayed()->tst(Lscratch); 2632 } 2633 2634 __ bind(notInt); 2635 } else { 2636 // putfield with int type most likely, check that first 2637 __ cmp(Rflags, itos); 2638 __ br(Assembler::notEqual, false, Assembler::pt, notInt); 2639 __ delayed()->cmp(Rflags, atos); 2640 2641 // itos 2642 { 2643 __ pop_i(); 2644 pop_and_check_object(Rclass); 2645 __ st(Otos_i, Rclass, Roffset); 2646 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); 2647 __ ba(checkVolatile); 2648 __ delayed()->tst(Lscratch); 2649 } 2650 2651 __ bind(notInt); 2652 // cmp(Rflags, atos); 2653 __ br(Assembler::notEqual, false, Assembler::pt, notObj); 2654 __ delayed()->cmp(Rflags, btos); 2655 2656 // atos 2657 { 2658 __ pop_ptr(); 2659 pop_and_check_object(Rclass); 2660 __ verify_oop(Otos_i); 2661 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2662 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); 2663 __ ba(checkVolatile); 2664 __ delayed()->tst(Lscratch); 2665 } 2666 2667 __ bind(notObj); 2668 } 2669 2670 // cmp(Rflags, btos); 2671 __ br(Assembler::notEqual, false, Assembler::pt, notByte); 2672 __ delayed()->cmp(Rflags, ltos); 2673 2674 // btos 2675 { 2676 __ pop_i(); 2677 if (!is_static) pop_and_check_object(Rclass); 2678 __ stb(Otos_i, Rclass, Roffset); 2679 if (!is_static) { 2680 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); 2681 } 2682 __ ba(checkVolatile); 2683 __ delayed()->tst(Lscratch); 2684 } 2685 2686 __ bind(notByte); 2687 // cmp(Rflags, ltos); 2688 __ br(Assembler::notEqual, false, Assembler::pt, notLong); 2689 __ delayed()->cmp(Rflags, ctos); 2690 2691 // ltos 2692 { 2693 __ pop_l(); 2694 if (!is_static) pop_and_check_object(Rclass); 2695 __ st_long(Otos_l, Rclass, Roffset); 2696 if (!is_static) { 2697 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); 2698 } 2699 __ ba(checkVolatile); 2700 __ delayed()->tst(Lscratch); 2701 } 2702 2703 __ bind(notLong); 2704 // cmp(Rflags, ctos); 2705 __ br(Assembler::notEqual, false, Assembler::pt, notChar); 2706 __ delayed()->cmp(Rflags, stos); 2707 2708 // ctos (char) 2709 { 2710 __ pop_i(); 2711 if (!is_static) pop_and_check_object(Rclass); 2712 __ sth(Otos_i, Rclass, Roffset); 2713 if (!is_static) { 2714 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); 2715 } 2716 __ ba(checkVolatile); 2717 __ delayed()->tst(Lscratch); 2718 } 2719 2720 __ bind(notChar); 2721 // cmp(Rflags, stos); 2722 __ br(Assembler::notEqual, false, Assembler::pt, notShort); 2723 __ delayed()->cmp(Rflags, ftos); 2724 2725 // stos (short) 2726 { 2727 __ pop_i(); 2728 if (!is_static) pop_and_check_object(Rclass); 2729 __ sth(Otos_i, Rclass, Roffset); 2730 if (!is_static) { 2731 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); 2732 } 2733 __ ba(checkVolatile); 2734 __ delayed()->tst(Lscratch); 2735 } 2736 2737 __ bind(notShort); 2738 // cmp(Rflags, ftos); 2739 __ br(Assembler::notZero, false, Assembler::pt, notFloat); 2740 __ delayed()->nop(); 2741 2742 // ftos 2743 { 2744 __ pop_f(); 2745 if (!is_static) pop_and_check_object(Rclass); 2746 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2747 if (!is_static) { 2748 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); 2749 } 2750 __ ba(checkVolatile); 2751 __ delayed()->tst(Lscratch); 2752 } 2753 2754 __ bind(notFloat); 2755 2756 // dtos 2757 { 2758 __ pop_d(); 2759 if (!is_static) pop_and_check_object(Rclass); 2760 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2761 if (!is_static) { 2762 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); 2763 } 2764 } 2765 2766 __ bind(checkVolatile); 2767 __ tst(Lscratch); 2768 2769 if (__ membar_has_effect(write_bits)) { 2770 // __ tst(Lscratch); in delay slot 2771 __ br(Assembler::zero, false, Assembler::pt, exit); 2772 __ delayed()->nop(); 2773 volatile_barrier(Assembler::StoreLoad); 2774 __ bind(exit); 2775 } 2776 } 2777 2778 void TemplateTable::fast_storefield(TosState state) { 2779 transition(state, vtos); 2780 Register Rcache = G3_scratch; 2781 Register Rclass = Rcache; 2782 Register Roffset= G4_scratch; 2783 Register Rflags = G1_scratch; 2784 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2785 2786 jvmti_post_fast_field_mod(); 2787 2788 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); 2789 2790 Assembler::Membar_mask_bits read_bits = 2791 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); 2792 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; 2793 2794 Label notVolatile, checkVolatile, exit; 2795 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { 2796 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); 2797 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2798 __ and3(Rflags, Lscratch, Lscratch); 2799 if (__ membar_has_effect(read_bits)) { 2800 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); 2801 volatile_barrier(read_bits); 2802 __ bind(notVolatile); 2803 } 2804 } 2805 2806 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); 2807 pop_and_check_object(Rclass); 2808 2809 switch (bytecode()) { 2810 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; 2811 case Bytecodes::_fast_cputfield: /* fall through */ 2812 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; 2813 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; 2814 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; 2815 case Bytecodes::_fast_fputfield: 2816 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); 2817 break; 2818 case Bytecodes::_fast_dputfield: 2819 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); 2820 break; 2821 case Bytecodes::_fast_aputfield: 2822 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); 2823 break; 2824 default: 2825 ShouldNotReachHere(); 2826 } 2827 2828 if (__ membar_has_effect(write_bits)) { 2829 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); 2830 volatile_barrier(Assembler::StoreLoad); 2831 __ bind(exit); 2832 } 2833 } 2834 2835 2836 void TemplateTable::putfield(int byte_no) { 2837 putfield_or_static(byte_no, false); 2838 } 2839 2840 void TemplateTable::putstatic(int byte_no) { 2841 putfield_or_static(byte_no, true); 2842 } 2843 2844 2845 void TemplateTable::fast_xaccess(TosState state) { 2846 transition(vtos, state); 2847 Register Rcache = G3_scratch; 2848 Register Roffset = G4_scratch; 2849 Register Rflags = G4_scratch; 2850 Register Rreceiver = Lscratch; 2851 2852 __ ld_ptr(Llocals, 0, Rreceiver); 2853 2854 // access constant pool cache (is resolved) 2855 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); 2856 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); 2857 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp 2858 2859 __ verify_oop(Rreceiver); 2860 __ null_check(Rreceiver); 2861 if (state == atos) { 2862 __ load_heap_oop(Rreceiver, Roffset, Otos_i); 2863 } else if (state == itos) { 2864 __ ld (Rreceiver, Roffset, Otos_i) ; 2865 } else if (state == ftos) { 2866 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); 2867 } else { 2868 ShouldNotReachHere(); 2869 } 2870 2871 Assembler::Membar_mask_bits membar_bits = 2872 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); 2873 if (__ membar_has_effect(membar_bits)) { 2874 2875 // Get is_volatile value in Rflags and check if membar is needed 2876 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); 2877 2878 // Test volatile 2879 Label notVolatile; 2880 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); 2881 __ btst(Rflags, Lscratch); 2882 __ br(Assembler::zero, false, Assembler::pt, notVolatile); 2883 __ delayed()->nop(); 2884 volatile_barrier(membar_bits); 2885 __ bind(notVolatile); 2886 } 2887 2888 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 2889 __ sub(Lbcp, 1, Lbcp); 2890 } 2891 2892 //---------------------------------------------------------------------------------------------------- 2893 // Calls 2894 2895 void TemplateTable::count_calls(Register method, Register temp) { 2896 // implemented elsewhere 2897 ShouldNotReachHere(); 2898 } 2899 2900 void TemplateTable::prepare_invoke(int byte_no, 2901 Register method, // linked method (or i-klass) 2902 Register ra, // return address 2903 Register index, // itable index, MethodType, etc. 2904 Register recv, // if caller wants to see it 2905 Register flags // if caller wants to test it 2906 ) { 2907 // determine flags 2908 const Bytecodes::Code code = bytecode(); 2909 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 2910 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 2911 const bool is_invokehandle = code == Bytecodes::_invokehandle; 2912 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 2913 const bool is_invokespecial = code == Bytecodes::_invokespecial; 2914 const bool load_receiver = (recv != noreg); 2915 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 2916 assert(recv == noreg || recv == O0, ""); 2917 assert(flags == noreg || flags == O1, ""); 2918 2919 // setup registers & access constant pool cache 2920 if (recv == noreg) recv = O0; 2921 if (flags == noreg) flags = O1; 2922 const Register temp = O2; 2923 assert_different_registers(method, ra, index, recv, flags, temp); 2924 2925 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); 2926 2927 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2928 2929 // maybe push appendix to arguments 2930 if (is_invokedynamic || is_invokehandle) { 2931 Label L_no_push; 2932 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); 2933 __ btst(flags, temp); 2934 __ br(Assembler::zero, false, Assembler::pt, L_no_push); 2935 __ delayed()->nop(); 2936 // Push the appendix as a trailing parameter. 2937 // This must be done before we get the receiver, 2938 // since the parameter_size includes it. 2939 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); 2940 __ load_resolved_reference_at_index(temp, index); 2941 __ verify_oop(temp); 2942 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.) 2943 __ bind(L_no_push); 2944 } 2945 2946 // load receiver if needed (after appendix is pushed so parameter size is correct) 2947 if (load_receiver) { 2948 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size 2949 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp 2950 __ verify_oop(recv); 2951 } 2952 2953 // compute return type 2954 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); 2955 // Make sure we don't need to mask flags after the above shift 2956 ConstantPoolCacheEntry::verify_tos_state_shift(); 2957 // load return address 2958 { 2959 const address table_addr = (is_invokeinterface || is_invokedynamic) ? 2960 (address)Interpreter::return_5_addrs_by_index_table() : 2961 (address)Interpreter::return_3_addrs_by_index_table(); 2962 AddressLiteral table(table_addr); 2963 __ set(table, temp); 2964 __ sll(ra, LogBytesPerWord, ra); 2965 __ ld_ptr(Address(temp, ra), ra); 2966 } 2967 } 2968 2969 2970 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { 2971 Register Rtemp = G4_scratch; 2972 Register Rcall = Rindex; 2973 assert_different_registers(Rcall, G5_method, Gargs, Rret); 2974 2975 // get target Method* & entry point 2976 __ lookup_virtual_method(Rrecv, Rindex, G5_method); 2977 __ call_from_interpreter(Rcall, Gargs, Rret); 2978 } 2979 2980 void TemplateTable::invokevirtual(int byte_no) { 2981 transition(vtos, vtos); 2982 assert(byte_no == f2_byte, "use this argument"); 2983 2984 Register Rscratch = G3_scratch; 2985 Register Rtemp = G4_scratch; 2986 Register Rret = Lscratch; 2987 Register O0_recv = O0; 2988 Label notFinal; 2989 2990 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); 2991 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 2992 2993 // Check for vfinal 2994 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); 2995 __ btst(Rret, G4_scratch); 2996 __ br(Assembler::zero, false, Assembler::pt, notFinal); 2997 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters 2998 2999 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); 3000 3001 invokevfinal_helper(Rscratch, Rret); 3002 3003 __ bind(notFinal); 3004 3005 __ mov(G5_method, Rscratch); // better scratch register 3006 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop 3007 // receiver is in O0_recv 3008 __ verify_oop(O0_recv); 3009 3010 // get return address 3011 AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); 3012 __ set(table, Rtemp); 3013 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3014 // Make sure we don't need to mask Rret after the above shift 3015 ConstantPoolCacheEntry::verify_tos_state_shift(); 3016 __ sll(Rret, LogBytesPerWord, Rret); 3017 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3018 3019 // get receiver klass 3020 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3021 __ load_klass(O0_recv, O0_recv); 3022 __ verify_klass_ptr(O0_recv); 3023 3024 __ profile_virtual_call(O0_recv, O4); 3025 3026 generate_vtable_call(O0_recv, Rscratch, Rret); 3027 } 3028 3029 void TemplateTable::fast_invokevfinal(int byte_no) { 3030 transition(vtos, vtos); 3031 assert(byte_no == f2_byte, "use this argument"); 3032 3033 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, 3034 /*is_invokevfinal*/true, false); 3035 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore 3036 invokevfinal_helper(G3_scratch, Lscratch); 3037 } 3038 3039 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { 3040 Register Rtemp = G4_scratch; 3041 3042 // Load receiver from stack slot 3043 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch); 3044 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch); 3045 __ load_receiver(G4_scratch, O0); 3046 3047 // receiver NULL check 3048 __ null_check(O0); 3049 3050 __ profile_final_call(O4); 3051 3052 // get return address 3053 AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); 3054 __ set(table, Rtemp); 3055 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type 3056 // Make sure we don't need to mask Rret after the above shift 3057 ConstantPoolCacheEntry::verify_tos_state_shift(); 3058 __ sll(Rret, LogBytesPerWord, Rret); 3059 __ ld_ptr(Rtemp, Rret, Rret); // get return address 3060 3061 3062 // do the call 3063 __ call_from_interpreter(Rscratch, Gargs, Rret); 3064 } 3065 3066 3067 void TemplateTable::invokespecial(int byte_no) { 3068 transition(vtos, vtos); 3069 assert(byte_no == f1_byte, "use this argument"); 3070 3071 const Register Rret = Lscratch; 3072 const Register O0_recv = O0; 3073 const Register Rscratch = G3_scratch; 3074 3075 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check 3076 __ null_check(O0_recv); 3077 3078 // do the call 3079 __ profile_call(O4); 3080 __ call_from_interpreter(Rscratch, Gargs, Rret); 3081 } 3082 3083 3084 void TemplateTable::invokestatic(int byte_no) { 3085 transition(vtos, vtos); 3086 assert(byte_no == f1_byte, "use this argument"); 3087 3088 const Register Rret = Lscratch; 3089 const Register Rscratch = G3_scratch; 3090 3091 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method* 3092 3093 // do the call 3094 __ profile_call(O4); 3095 __ call_from_interpreter(Rscratch, Gargs, Rret); 3096 } 3097 3098 void TemplateTable::invokeinterface_object_method(Register RKlass, 3099 Register Rcall, 3100 Register Rret, 3101 Register Rflags) { 3102 Register Rscratch = G4_scratch; 3103 Register Rindex = Lscratch; 3104 3105 assert_different_registers(Rscratch, Rindex, Rret); 3106 3107 Label notFinal; 3108 3109 // Check for vfinal 3110 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); 3111 __ btst(Rflags, Rscratch); 3112 __ br(Assembler::zero, false, Assembler::pt, notFinal); 3113 __ delayed()->nop(); 3114 3115 __ profile_final_call(O4); 3116 3117 // do the call - the index (f2) contains the Method* 3118 assert_different_registers(G5_method, Gargs, Rcall); 3119 __ mov(Rindex, G5_method); 3120 __ call_from_interpreter(Rcall, Gargs, Rret); 3121 __ bind(notFinal); 3122 3123 __ profile_virtual_call(RKlass, O4); 3124 generate_vtable_call(RKlass, Rindex, Rret); 3125 } 3126 3127 3128 void TemplateTable::invokeinterface(int byte_no) { 3129 transition(vtos, vtos); 3130 assert(byte_no == f1_byte, "use this argument"); 3131 3132 const Register Rinterface = G1_scratch; 3133 const Register Rret = G3_scratch; 3134 const Register Rindex = Lscratch; 3135 const Register O0_recv = O0; 3136 const Register O1_flags = O1; 3137 const Register O2_Klass = O2; 3138 const Register Rscratch = G4_scratch; 3139 assert_different_registers(Rscratch, G5_method); 3140 3141 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags); 3142 3143 // get receiver klass 3144 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); 3145 __ load_klass(O0_recv, O2_Klass); 3146 3147 // Special case of invokeinterface called for virtual method of 3148 // java.lang.Object. See cpCacheOop.cpp for details. 3149 // This code isn't produced by javac, but could be produced by 3150 // another compliant java compiler. 3151 Label notMethod; 3152 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); 3153 __ btst(O1_flags, Rscratch); 3154 __ br(Assembler::zero, false, Assembler::pt, notMethod); 3155 __ delayed()->nop(); 3156 3157 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags); 3158 3159 __ bind(notMethod); 3160 3161 __ profile_virtual_call(O2_Klass, O4); 3162 3163 // 3164 // find entry point to call 3165 // 3166 3167 // compute start of first itableOffsetEntry (which is at end of vtable) 3168 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3169 Label search; 3170 Register Rtemp = O1_flags; 3171 3172 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp); 3173 if (align_object_offset(1) > 1) { 3174 __ round_to(Rtemp, align_object_offset(1)); 3175 } 3176 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4; 3177 if (Assembler::is_simm13(base)) { 3178 __ add(Rtemp, base, Rtemp); 3179 } else { 3180 __ set(base, Rscratch); 3181 __ add(Rscratch, Rtemp, Rtemp); 3182 } 3183 __ add(O2_Klass, Rtemp, Rscratch); 3184 3185 __ bind(search); 3186 3187 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); 3188 { 3189 Label ok; 3190 3191 // Check that entry is non-null. Null entries are probably a bytecode 3192 // problem. If the interface isn't implemented by the receiver class, 3193 // the VM should throw IncompatibleClassChangeError. linkResolver checks 3194 // this too but that's only if the entry isn't already resolved, so we 3195 // need to check again. 3196 __ br_notnull_short( Rtemp, Assembler::pt, ok); 3197 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3198 __ should_not_reach_here(); 3199 __ bind(ok); 3200 } 3201 3202 __ cmp(Rinterface, Rtemp); 3203 __ brx(Assembler::notEqual, true, Assembler::pn, search); 3204 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch); 3205 3206 // entry found and Rscratch points to it 3207 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch); 3208 3209 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); 3210 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; 3211 __ add(Rscratch, Rindex, Rscratch); 3212 __ ld_ptr(O2_Klass, Rscratch, G5_method); 3213 3214 // Check for abstract method error. 3215 { 3216 Label ok; 3217 __ br_notnull_short(G5_method, Assembler::pt, ok); 3218 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3219 __ should_not_reach_here(); 3220 __ bind(ok); 3221 } 3222 3223 Register Rcall = Rinterface; 3224 assert_different_registers(Rcall, G5_method, Gargs, Rret); 3225 3226 __ call_from_interpreter(Rcall, Gargs, Rret); 3227 } 3228 3229 void TemplateTable::invokehandle(int byte_no) { 3230 transition(vtos, vtos); 3231 assert(byte_no == f1_byte, "use this argument"); 3232 3233 if (!EnableInvokeDynamic) { 3234 // rewriter does not generate this bytecode 3235 __ should_not_reach_here(); 3236 return; 3237 } 3238 3239 const Register Rret = Lscratch; 3240 const Register G4_mtype = G4_scratch; 3241 const Register O0_recv = O0; 3242 const Register Rscratch = G3_scratch; 3243 3244 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); 3245 __ null_check(O0_recv); 3246 3247 // G4: MethodType object (from cpool->resolved_references[f1], if necessary) 3248 // G5: MH.invokeExact_MT method (from f2) 3249 3250 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke 3251 3252 // do the call 3253 __ verify_oop(G4_mtype); 3254 __ profile_final_call(O4); // FIXME: profile the LambdaForm also 3255 __ call_from_interpreter(Rscratch, Gargs, Rret); 3256 } 3257 3258 3259 void TemplateTable::invokedynamic(int byte_no) { 3260 transition(vtos, vtos); 3261 assert(byte_no == f1_byte, "use this argument"); 3262 3263 if (!EnableInvokeDynamic) { 3264 // We should not encounter this bytecode if !EnableInvokeDynamic. 3265 // The verifier will stop it. However, if we get past the verifier, 3266 // this will stop the thread in a reasonable way, without crashing the JVM. 3267 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 3268 InterpreterRuntime::throw_IncompatibleClassChangeError)); 3269 // the call_VM checks for exception, so we should never return here. 3270 __ should_not_reach_here(); 3271 return; 3272 } 3273 3274 const Register Rret = Lscratch; 3275 const Register G4_callsite = G4_scratch; 3276 const Register Rscratch = G3_scratch; 3277 3278 prepare_invoke(byte_no, G5_method, Rret, G4_callsite); 3279 3280 // G4: CallSite object (from cpool->resolved_references[f1]) 3281 // G5: MH.linkToCallSite method (from f2) 3282 3283 // Note: G4_callsite is already pushed by prepare_invoke 3284 3285 // %%% should make a type profile for any invokedynamic that takes a ref argument 3286 // profile this call 3287 __ profile_call(O4); 3288 3289 // do the call 3290 __ verify_oop(G4_callsite); 3291 __ call_from_interpreter(Rscratch, Gargs, Rret); 3292 } 3293 3294 3295 //---------------------------------------------------------------------------------------------------- 3296 // Allocation 3297 3298 void TemplateTable::_new() { 3299 transition(vtos, atos); 3300 3301 Label slow_case; 3302 Label done; 3303 Label initialize_header; 3304 Label initialize_object; // including clearing the fields 3305 3306 Register RallocatedObject = Otos_i; 3307 Register RinstanceKlass = O1; 3308 Register Roffset = O3; 3309 Register Rscratch = O4; 3310 3311 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3312 __ get_cpool_and_tags(Rscratch, G3_scratch); 3313 // make sure the class we're about to instantiate has been resolved 3314 // This is done before loading InstanceKlass to be consistent with the order 3315 // how Constant Pool is updated (see ConstantPool::klass_at_put) 3316 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3317 __ ldub(G3_scratch, Roffset, G3_scratch); 3318 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3319 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3320 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3321 // get InstanceKlass 3322 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot 3323 __ add(Roffset, sizeof(ConstantPool), Roffset); 3324 __ ld_ptr(Rscratch, Roffset, RinstanceKlass); 3325 3326 // make sure klass is fully initialized: 3327 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch); 3328 __ cmp(G3_scratch, InstanceKlass::fully_initialized); 3329 __ br(Assembler::notEqual, false, Assembler::pn, slow_case); 3330 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3331 3332 // get instance_size in InstanceKlass (already aligned) 3333 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset); 3334 3335 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class 3336 __ btst(Klass::_lh_instance_slow_path_bit, Roffset); 3337 __ br(Assembler::notZero, false, Assembler::pn, slow_case); 3338 __ delayed()->nop(); 3339 3340 // allocate the instance 3341 // 1) Try to allocate in the TLAB 3342 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden 3343 // 3) if the above fails (or is not applicable), go to a slow case 3344 // (creates a new TLAB, etc.) 3345 3346 const bool allow_shared_alloc = 3347 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3348 3349 if(UseTLAB) { 3350 Register RoldTopValue = RallocatedObject; 3351 Register RtlabWasteLimitValue = G3_scratch; 3352 Register RnewTopValue = G1_scratch; 3353 Register RendValue = Rscratch; 3354 Register RfreeValue = RnewTopValue; 3355 3356 // check if we can allocate in the TLAB 3357 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject 3358 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); 3359 __ add(RoldTopValue, Roffset, RnewTopValue); 3360 3361 // if there is enough space, we do not CAS and do not clear 3362 __ cmp(RnewTopValue, RendValue); 3363 if(ZeroTLAB) { 3364 // the fields have already been cleared 3365 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); 3366 } else { 3367 // initialize both the header and fields 3368 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); 3369 } 3370 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3371 3372 if (allow_shared_alloc) { 3373 // Check if tlab should be discarded (refill_waste_limit >= free) 3374 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); 3375 __ sub(RendValue, RoldTopValue, RfreeValue); 3376 #ifdef _LP64 3377 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue); 3378 #else 3379 __ srl(RfreeValue, LogHeapWordSize, RfreeValue); 3380 #endif 3381 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small 3382 3383 // increment waste limit to prevent getting stuck on this slow path 3384 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue); 3385 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3386 } else { 3387 // No allocation in the shared eden. 3388 __ ba_short(slow_case); 3389 } 3390 } 3391 3392 // Allocation in the shared Eden 3393 if (allow_shared_alloc) { 3394 Register RoldTopValue = G1_scratch; 3395 Register RtopAddr = G3_scratch; 3396 Register RnewTopValue = RallocatedObject; 3397 Register RendValue = Rscratch; 3398 3399 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); 3400 3401 Label retry; 3402 __ bind(retry); 3403 __ set((intptr_t)Universe::heap()->end_addr(), RendValue); 3404 __ ld_ptr(RendValue, 0, RendValue); 3405 __ ld_ptr(RtopAddr, 0, RoldTopValue); 3406 __ add(RoldTopValue, Roffset, RnewTopValue); 3407 3408 // RnewTopValue contains the top address after the new object 3409 // has been allocated. 3410 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 3411 3412 __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue, 3413 VM_Version::v9_instructions_work() ? NULL : 3414 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 3415 3416 // if someone beat us on the allocation, try again, otherwise continue 3417 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 3418 3419 // bump total bytes allocated by this thread 3420 // RoldTopValue and RtopAddr are dead, so can use G1 and G3 3421 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); 3422 } 3423 3424 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { 3425 // clear object fields 3426 __ bind(initialize_object); 3427 __ deccc(Roffset, sizeof(oopDesc)); 3428 __ br(Assembler::zero, false, Assembler::pt, initialize_header); 3429 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); 3430 3431 // initialize remaining object fields 3432 if (UseBlockZeroing) { 3433 // Use BIS for zeroing 3434 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); 3435 } else { 3436 Label loop; 3437 __ subcc(Roffset, wordSize, Roffset); 3438 __ bind(loop); 3439 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot 3440 __ st_ptr(G0, G3_scratch, Roffset); 3441 __ br(Assembler::notEqual, false, Assembler::pt, loop); 3442 __ delayed()->subcc(Roffset, wordSize, Roffset); 3443 } 3444 __ ba_short(initialize_header); 3445 } 3446 3447 // slow case 3448 __ bind(slow_case); 3449 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); 3450 __ get_constant_pool(O1); 3451 3452 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); 3453 3454 __ ba_short(done); 3455 3456 // Initialize the header: mark, klass 3457 __ bind(initialize_header); 3458 3459 if (UseBiasedLocking) { 3460 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch); 3461 } else { 3462 __ set((intptr_t)markOopDesc::prototype(), G4_scratch); 3463 } 3464 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark 3465 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed 3466 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) 3467 3468 { 3469 SkipIfEqual skip_if( 3470 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); 3471 // Trigger dtrace event 3472 __ push(atos); 3473 __ call_VM_leaf(noreg, 3474 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); 3475 __ pop(atos); 3476 } 3477 3478 // continue 3479 __ bind(done); 3480 } 3481 3482 3483 3484 void TemplateTable::newarray() { 3485 transition(itos, atos); 3486 __ ldub(Lbcp, 1, O1); 3487 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); 3488 } 3489 3490 3491 void TemplateTable::anewarray() { 3492 transition(itos, atos); 3493 __ get_constant_pool(O1); 3494 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); 3495 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); 3496 } 3497 3498 3499 void TemplateTable::arraylength() { 3500 transition(atos, itos); 3501 Label ok; 3502 __ verify_oop(Otos_i); 3503 __ tst(Otos_i); 3504 __ throw_if_not_1_x( Assembler::notZero, ok ); 3505 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); 3506 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3507 } 3508 3509 3510 void TemplateTable::checkcast() { 3511 transition(atos, atos); 3512 Label done, is_null, quicked, cast_ok, resolved; 3513 Register Roffset = G1_scratch; 3514 Register RobjKlass = O5; 3515 Register RspecifiedKlass = O4; 3516 3517 // Check for casting a NULL 3518 __ br_null_short(Otos_i, Assembler::pn, is_null); 3519 3520 // Get value klass in RobjKlass 3521 __ load_klass(Otos_i, RobjKlass); // get value klass 3522 3523 // Get constant pool tag 3524 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3525 3526 // See if the checkcast has been quickened 3527 __ get_cpool_and_tags(Lscratch, G3_scratch); 3528 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3529 __ ldub(G3_scratch, Roffset, G3_scratch); 3530 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3531 __ br(Assembler::equal, true, Assembler::pt, quicked); 3532 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3533 3534 __ push_ptr(); // save receiver for result, and for GC 3535 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3536 __ get_vm_result_2(RspecifiedKlass); 3537 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3538 3539 __ ba_short(resolved); 3540 3541 // Extract target class from constant pool 3542 __ bind(quicked); 3543 __ add(Roffset, sizeof(ConstantPool), Roffset); 3544 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3545 __ bind(resolved); 3546 __ load_klass(Otos_i, RobjKlass); // get value klass 3547 3548 // Generate a fast subtype check. Branch to cast_ok if no 3549 // failure. Throw exception if failure. 3550 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); 3551 3552 // Not a subtype; so must throw exception 3553 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); 3554 3555 __ bind(cast_ok); 3556 3557 if (ProfileInterpreter) { 3558 __ ba_short(done); 3559 } 3560 __ bind(is_null); 3561 __ profile_null_seen(G3_scratch); 3562 __ bind(done); 3563 } 3564 3565 3566 void TemplateTable::instanceof() { 3567 Label done, is_null, quicked, resolved; 3568 transition(atos, itos); 3569 Register Roffset = G1_scratch; 3570 Register RobjKlass = O5; 3571 Register RspecifiedKlass = O4; 3572 3573 // Check for casting a NULL 3574 __ br_null_short(Otos_i, Assembler::pt, is_null); 3575 3576 // Get value klass in RobjKlass 3577 __ load_klass(Otos_i, RobjKlass); // get value klass 3578 3579 // Get constant pool tag 3580 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); 3581 3582 // See if the checkcast has been quickened 3583 __ get_cpool_and_tags(Lscratch, G3_scratch); 3584 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch); 3585 __ ldub(G3_scratch, Roffset, G3_scratch); 3586 __ cmp(G3_scratch, JVM_CONSTANT_Class); 3587 __ br(Assembler::equal, true, Assembler::pt, quicked); 3588 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); 3589 3590 __ push_ptr(); // save receiver for result, and for GC 3591 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); 3592 __ get_vm_result_2(RspecifiedKlass); 3593 __ pop_ptr(Otos_i, G3_scratch); // restore receiver 3594 3595 __ ba_short(resolved); 3596 3597 // Extract target class from constant pool 3598 __ bind(quicked); 3599 __ add(Roffset, sizeof(ConstantPool), Roffset); 3600 __ get_constant_pool(Lscratch); 3601 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); 3602 __ bind(resolved); 3603 __ load_klass(Otos_i, RobjKlass); // get value klass 3604 3605 // Generate a fast subtype check. Branch to cast_ok if no 3606 // failure. Return 0 if failure. 3607 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed 3608 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); 3609 // Not a subtype; return 0; 3610 __ clr( Otos_i ); 3611 3612 if (ProfileInterpreter) { 3613 __ ba_short(done); 3614 } 3615 __ bind(is_null); 3616 __ profile_null_seen(G3_scratch); 3617 __ bind(done); 3618 } 3619 3620 void TemplateTable::_breakpoint() { 3621 3622 // Note: We get here even if we are single stepping.. 3623 // jbug inists on setting breakpoints at every bytecode 3624 // even if we are in single step mode. 3625 3626 transition(vtos, vtos); 3627 // get the unpatched byte code 3628 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); 3629 __ mov(O0, Lbyte_code); 3630 3631 // post the breakpoint event 3632 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); 3633 3634 // complete the execution of original bytecode 3635 __ dispatch_normal(vtos); 3636 } 3637 3638 3639 //---------------------------------------------------------------------------------------------------- 3640 // Exceptions 3641 3642 void TemplateTable::athrow() { 3643 transition(atos, vtos); 3644 3645 // This works because exception is cached in Otos_i which is same as O0, 3646 // which is same as what throw_exception_entry_expects 3647 assert(Otos_i == Oexception, "see explanation above"); 3648 3649 __ verify_oop(Otos_i); 3650 __ null_check(Otos_i); 3651 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); 3652 } 3653 3654 3655 //---------------------------------------------------------------------------------------------------- 3656 // Synchronization 3657 3658 3659 // See frame_sparc.hpp for monitor block layout. 3660 // Monitor elements are dynamically allocated by growing stack as needed. 3661 3662 void TemplateTable::monitorenter() { 3663 transition(atos, vtos); 3664 __ verify_oop(Otos_i); 3665 // Try to acquire a lock on the object 3666 // Repeat until succeeded (i.e., until 3667 // monitorenter returns true). 3668 3669 { Label ok; 3670 __ tst(Otos_i); 3671 __ throw_if_not_1_x( Assembler::notZero, ok); 3672 __ delayed()->mov(Otos_i, Lscratch); // save obj 3673 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); 3674 } 3675 3676 assert(O0 == Otos_i, "Be sure where the object to lock is"); 3677 3678 // find a free slot in the monitor block 3679 3680 3681 // initialize entry pointer 3682 __ clr(O1); // points to free slot or NULL 3683 3684 { 3685 Label entry, loop, exit; 3686 __ add( __ top_most_monitor(), O2 ); // last one to check 3687 __ ba( entry ); 3688 __ delayed()->mov( Lmonitors, O3 ); // first one to check 3689 3690 3691 __ bind( loop ); 3692 3693 __ verify_oop(O4); // verify each monitor's oop 3694 __ tst(O4); // is this entry unused? 3695 if (VM_Version::v9_instructions_work()) 3696 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 3697 else { 3698 Label L; 3699 __ br( Assembler::zero, true, Assembler::pn, L ); 3700 __ delayed()->mov(O3, O1); // rememeber this one if match 3701 __ bind(L); 3702 } 3703 3704 __ cmp(O4, O0); // check if current entry is for same object 3705 __ brx( Assembler::equal, false, Assembler::pn, exit ); 3706 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one 3707 3708 __ bind( entry ); 3709 3710 __ cmp( O3, O2 ); 3711 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3712 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); 3713 3714 __ bind( exit ); 3715 } 3716 3717 { Label allocated; 3718 3719 // found free slot? 3720 __ br_notnull_short(O1, Assembler::pn, allocated); 3721 3722 __ add_monitor_to_stack( false, O2, O3 ); 3723 __ mov(Lmonitors, O1); 3724 3725 __ bind(allocated); 3726 } 3727 3728 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3729 // The object has already been poped from the stack, so the expression stack looks correct. 3730 __ inc(Lbcp); 3731 3732 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object 3733 __ lock_object(O1, O0); 3734 3735 // check if there's enough space on the stack for the monitors after locking 3736 __ generate_stack_overflow_check(0); 3737 3738 // The bcp has already been incremented. Just need to dispatch to next instruction. 3739 __ dispatch_next(vtos); 3740 } 3741 3742 3743 void TemplateTable::monitorexit() { 3744 transition(atos, vtos); 3745 __ verify_oop(Otos_i); 3746 __ tst(Otos_i); 3747 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); 3748 3749 assert(O0 == Otos_i, "just checking"); 3750 3751 { Label entry, loop, found; 3752 __ add( __ top_most_monitor(), O2 ); // last one to check 3753 __ ba(entry); 3754 // use Lscratch to hold monitor elem to check, start with most recent monitor, 3755 // By using a local it survives the call to the C routine. 3756 __ delayed()->mov( Lmonitors, Lscratch ); 3757 3758 __ bind( loop ); 3759 3760 __ verify_oop(O4); // verify each monitor's oop 3761 __ cmp(O4, O0); // check if current entry is for desired object 3762 __ brx( Assembler::equal, true, Assembler::pt, found ); 3763 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit 3764 3765 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next 3766 3767 __ bind( entry ); 3768 3769 __ cmp( Lscratch, O2 ); 3770 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); 3771 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); 3772 3773 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 3774 __ should_not_reach_here(); 3775 3776 __ bind(found); 3777 } 3778 __ unlock_object(O1); 3779 } 3780 3781 3782 //---------------------------------------------------------------------------------------------------- 3783 // Wide instructions 3784 3785 void TemplateTable::wide() { 3786 transition(vtos, vtos); 3787 __ ldub(Lbcp, 1, G3_scratch);// get next bc 3788 __ sll(G3_scratch, LogBytesPerWord, G3_scratch); 3789 AddressLiteral ep(Interpreter::_wentry_point); 3790 __ set(ep, G4_scratch); 3791 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); 3792 __ jmp(G3_scratch, G0); 3793 __ delayed()->nop(); 3794 // Note: the Lbcp increment step is part of the individual wide bytecode implementations 3795 } 3796 3797 3798 //---------------------------------------------------------------------------------------------------- 3799 // Multi arrays 3800 3801 void TemplateTable::multianewarray() { 3802 transition(vtos, atos); 3803 // put ndims * wordSize into Lscratch 3804 __ ldub( Lbcp, 3, Lscratch); 3805 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); 3806 // Lesp points past last_dim, so set to O1 to first_dim address 3807 __ add( Lesp, Lscratch, O1); 3808 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); 3809 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack 3810 } 3811 #endif /* !CC_INTERP */