455 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
456 __ push(dtos);
457 __ jmpb(Done);
458
459 __ bind(Long);
460 // ltos
461 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
462 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
463
464 __ push(ltos);
465
466 __ bind(Done);
467 }
468
469
470 void TemplateTable::locals_index(Register reg, int offset) {
471 __ load_unsigned_byte(reg, at_bcp(offset));
472 __ negptr(reg);
473 }
474
475
476 void TemplateTable::iload() {
477 transition(vtos, itos);
478 if (RewriteFrequentPairs) {
479 Label rewrite, done;
480
481 // get next byte
482 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
483 // if _iload, wait to rewrite to iload2. We only want to rewrite the
484 // last two iloads in a pair. Comparing against fast_iload means that
485 // the next bytecode is neither an iload or a caload, and therefore
486 // an iload pair.
487 __ cmpl(rbx, Bytecodes::_iload);
488 __ jcc(Assembler::equal, done);
489
490 __ cmpl(rbx, Bytecodes::_fast_iload);
491 __ movl(rcx, Bytecodes::_fast_iload2);
492 __ jccb(Assembler::equal, rewrite);
493
494 // if _caload, rewrite to fast_icaload
495 __ cmpl(rbx, Bytecodes::_caload);
496 __ movl(rcx, Bytecodes::_fast_icaload);
497 __ jccb(Assembler::equal, rewrite);
498
730 }
731
732
733 void TemplateTable::fload(int n) {
734 transition(vtos, ftos);
735 __ fld_s(faddress(n));
736 }
737
738
739 void TemplateTable::dload(int n) {
740 transition(vtos, dtos);
741 __ fld_d(daddress(n));
742 }
743
744
745 void TemplateTable::aload(int n) {
746 transition(vtos, atos);
747 __ movptr(rax, aaddress(n));
748 }
749
750
751 void TemplateTable::aload_0() {
752 transition(vtos, atos);
753 // According to bytecode histograms, the pairs:
754 //
755 // _aload_0, _fast_igetfield
756 // _aload_0, _fast_agetfield
757 // _aload_0, _fast_fgetfield
758 //
759 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
760 // bytecode checks if the next bytecode is either _fast_igetfield,
761 // _fast_agetfield or _fast_fgetfield and then rewrites the
762 // current bytecode into a pair bytecode; otherwise it rewrites the current
763 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
764 //
765 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
766 // otherwise we may miss an opportunity for a pair.
767 //
768 // Also rewrite frequent pairs
769 // aload_0, aload_1
770 // aload_0, iload_1
771 // These bytecodes with a small amount of code are most profitable to rewrite
772 if (RewriteFrequentPairs) {
773 Label rewrite, done;
774 // get next byte
775 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
776
777 // do actual aload_0
778 aload(0);
779
780 // if _getfield then wait with rewrite
781 __ cmpl(rbx, Bytecodes::_getfield);
782 __ jcc(Assembler::equal, done);
783
784 // if _igetfield then reqrite to _fast_iaccess_0
785 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
786 __ cmpl(rbx, Bytecodes::_fast_igetfield);
787 __ movl(rcx, Bytecodes::_fast_iaccess_0);
788 __ jccb(Assembler::equal, rewrite);
789
790 // if _agetfield then reqrite to _fast_aaccess_0
791 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
792 __ cmpl(rbx, Bytecodes::_fast_agetfield);
2059 // We only put in barriers around volatile refs (they are expensive), not
2060 // _between_ memory refs (that would require us to track the flavor of the
2061 // previous memory refs). Requirements (2) and (3) require some barriers
2062 // before volatile stores and after volatile loads. These nearly cover
2063 // requirement (1) but miss the volatile-store-volatile-load case. This final
2064 // case is placed after volatile-stores although it could just as well go
2065 // before volatile-loads.
2066 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2067 // Helper function to insert a is-volatile test and memory barrier
2068 if( !os::is_MP() ) return; // Not needed on single CPU
2069 __ membar(order_constraint);
2070 }
2071
2072 void TemplateTable::resolve_cache_and_index(int byte_no,
2073 Register Rcache,
2074 Register index,
2075 size_t index_size) {
2076 const Register temp = rbx;
2077 assert_different_registers(Rcache, index, temp);
2078
2079 Label resolved;
2080 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2081 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2082 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2083 __ jcc(Assembler::equal, resolved);
2084
2085 // resolve first time through
2086 address entry;
2087 switch (bytecode()) {
2088 case Bytecodes::_getstatic : // fall through
2089 case Bytecodes::_putstatic : // fall through
2090 case Bytecodes::_getfield : // fall through
2091 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2092 case Bytecodes::_invokevirtual : // fall through
2093 case Bytecodes::_invokespecial : // fall through
2094 case Bytecodes::_invokestatic : // fall through
2095 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2096 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2097 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2098 default:
2099 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2100 break;
2101 }
2102 __ movl(temp, (int)bytecode());
2103 __ call_VM(noreg, entry, temp);
2104 // Update registers with resolved info
2105 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2106 __ bind(resolved);
2107 }
2108
2109
2110 // The cache and index registers must be set before call
2111 void TemplateTable::load_field_cp_cache_entry(Register obj,
2112 Register cache,
2113 Register index,
2114 Register off,
2115 Register flags,
2116 bool is_static = false) {
2117 assert_different_registers(cache, index, flags, off);
2118
2119 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2120 // Field offset
2121 __ movptr(off, Address(cache, index, Address::times_ptr,
2122 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2195 } else {
2196 __ pop(atos); // Get the object
2197 __ verify_oop(rax);
2198 __ push(atos); // Restore stack state
2199 }
2200 // rax,: object pointer or NULL
2201 // cache: cache entry pointer
2202 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2203 rax, cache);
2204 __ get_cache_and_index_at_bcp(cache, index, 1);
2205 __ bind(L1);
2206 }
2207 }
2208
2209 void TemplateTable::pop_and_check_object(Register r) {
2210 __ pop_ptr(r);
2211 __ null_check(r); // for field access must check obj.
2212 __ verify_oop(r);
2213 }
2214
2215 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2216 transition(vtos, vtos);
2217
2218 const Register cache = rcx;
2219 const Register index = rdx;
2220 const Register obj = rcx;
2221 const Register off = rbx;
2222 const Register flags = rax;
2223
2224 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2225 jvmti_post_field_access(cache, index, is_static, false);
2226 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2227
2228 if (!is_static) pop_and_check_object(obj);
2229
2230 const Address lo(obj, off, Address::times_1, 0*wordSize);
2231 const Address hi(obj, off, Address::times_1, 1*wordSize);
2232
2233 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2234
2235 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2236 assert(btos == 0, "change code, btos != 0");
2237 // btos
2238 __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
2239 __ jcc(Assembler::notZero, notByte);
2240
2241 __ load_signed_byte(rax, lo );
2242 __ push(btos);
2243 // Rewrite bytecode to be faster
2244 if (!is_static) {
2245 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2246 }
2247 __ jmp(Done);
2248
2249 __ bind(notByte);
2250 // itos
2251 __ cmpl(flags, itos );
2252 __ jcc(Assembler::notEqual, notInt);
2253
2254 __ movl(rax, lo );
2255 __ push(itos);
2256 // Rewrite bytecode to be faster
2257 if (!is_static) {
2258 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2259 }
2260 __ jmp(Done);
2261
2262 __ bind(notInt);
2263 // atos
2264 __ cmpl(flags, atos );
2265 __ jcc(Assembler::notEqual, notObj);
2266
2267 __ movl(rax, lo );
2268 __ push(atos);
2269 if (!is_static) {
2270 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2271 }
2272 __ jmp(Done);
2273
2274 __ bind(notObj);
2275 // ctos
2276 __ cmpl(flags, ctos );
2277 __ jcc(Assembler::notEqual, notChar);
2278
2279 __ load_unsigned_short(rax, lo );
2280 __ push(ctos);
2281 if (!is_static) {
2282 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2283 }
2284 __ jmp(Done);
2285
2286 __ bind(notChar);
2287 // stos
2288 __ cmpl(flags, stos );
2289 __ jcc(Assembler::notEqual, notShort);
2290
2291 __ load_signed_short(rax, lo );
2292 __ push(stos);
2293 if (!is_static) {
2294 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2295 }
2296 __ jmp(Done);
2297
2298 __ bind(notShort);
2299 // ltos
2300 __ cmpl(flags, ltos );
2301 __ jcc(Assembler::notEqual, notLong);
2302
2303 // Generate code as if volatile. There just aren't enough registers to
2304 // save that information and this code is faster than the test.
2305 __ fild_d(lo); // Must load atomically
2306 __ subptr(rsp,2*wordSize); // Make space for store
2307 __ fistp_d(Address(rsp,0));
2308 __ pop(rax);
2309 __ pop(rdx);
2310
2311 __ push(ltos);
2312 // Don't rewrite to _fast_lgetfield for potential volatile case.
2313 __ jmp(Done);
2314
2315 __ bind(notLong);
2316 // ftos
2317 __ cmpl(flags, ftos );
2318 __ jcc(Assembler::notEqual, notFloat);
2319
2320 __ fld_s(lo);
2321 __ push(ftos);
2322 if (!is_static) {
2323 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2324 }
2325 __ jmp(Done);
2326
2327 __ bind(notFloat);
2328 // dtos
2329 __ cmpl(flags, dtos );
2330 __ jcc(Assembler::notEqual, notDouble);
2331
2332 __ fld_d(lo);
2333 __ push(dtos);
2334 if (!is_static) {
2335 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2336 }
2337 __ jmpb(Done);
2338
2339 __ bind(notDouble);
2340
2341 __ stop("Bad state");
2342
2343 __ bind(Done);
2344 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2345 // volatile_barrier( );
2346 }
2347
2348
2349 void TemplateTable::getfield(int byte_no) {
2350 getfield_or_static(byte_no, false);
2351 }
2352
2353
2354 void TemplateTable::getstatic(int byte_no) {
2355 getfield_or_static(byte_no, true);
2356 }
2357
2358 // The registers cache and index expected to be set before call.
2359 // The function may destroy various registers, just not the cache and index registers.
2360 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2361
2362 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2363
2364 if (JvmtiExport::can_post_field_modification()) {
2365 // Check to see if a field modification watch has been set before we take
2366 // the time to call into the VM.
2367 Label L1;
2368 assert_different_registers(cache, index, rax);
2369 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2370 __ testl(rax, rax);
2371 __ jcc(Assembler::zero, L1);
2372
2403 __ bind(valsize_known);
2404 // setup object pointer
2405 __ movptr(rbx, Address(rbx, 0));
2406 }
2407 // cache entry pointer
2408 __ addptr(rax, in_bytes(cp_base_offset));
2409 __ shll(rdx, LogBytesPerWord);
2410 __ addptr(rax, rdx);
2411 // object (tos)
2412 __ mov(rcx, rsp);
2413 // rbx,: object pointer set up above (NULL if static)
2414 // rax,: cache entry pointer
2415 // rcx: jvalue object on the stack
2416 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2417 rbx, rax, rcx);
2418 __ get_cache_and_index_at_bcp(cache, index, 1);
2419 __ bind(L1);
2420 }
2421 }
2422
2423
2424 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2425 transition(vtos, vtos);
2426
2427 const Register cache = rcx;
2428 const Register index = rdx;
2429 const Register obj = rcx;
2430 const Register off = rbx;
2431 const Register flags = rax;
2432
2433 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2434 jvmti_post_field_mod(cache, index, is_static);
2435 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2436
2437 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2438 // volatile_barrier( );
2439
2440 Label notVolatile, Done;
2441 __ movl(rdx, flags);
2442 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2443 __ andl(rdx, 0x1);
2444
2445 // field addresses
2446 const Address lo(obj, off, Address::times_1, 0*wordSize);
2447 const Address hi(obj, off, Address::times_1, 1*wordSize);
2448
2449 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2450
2451 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2452 assert(btos == 0, "change code, btos != 0");
2453 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2454 __ jcc(Assembler::notZero, notByte);
2455
2456 // btos
2457 {
2458 __ pop(btos);
2459 if (!is_static) pop_and_check_object(obj);
2460 __ movb(lo, rax);
2461 if (!is_static) {
2462 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
2463 }
2464 __ jmp(Done);
2465 }
2466
2467 __ bind(notByte);
2468 __ cmpl(flags, itos);
2469 __ jcc(Assembler::notEqual, notInt);
2470
2471 // itos
2472 {
2473 __ pop(itos);
2474 if (!is_static) pop_and_check_object(obj);
2475 __ movl(lo, rax);
2476 if (!is_static) {
2477 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
2478 }
2479 __ jmp(Done);
2480 }
2481
2482 __ bind(notInt);
2483 __ cmpl(flags, atos);
2484 __ jcc(Assembler::notEqual, notObj);
2485
2486 // atos
2487 {
2488 __ pop(atos);
2489 if (!is_static) pop_and_check_object(obj);
2490 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2491 if (!is_static) {
2492 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
2493 }
2494 __ jmp(Done);
2495 }
2496
2497 __ bind(notObj);
2498 __ cmpl(flags, ctos);
2499 __ jcc(Assembler::notEqual, notChar);
2500
2501 // ctos
2502 {
2503 __ pop(ctos);
2504 if (!is_static) pop_and_check_object(obj);
2505 __ movw(lo, rax);
2506 if (!is_static) {
2507 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
2508 }
2509 __ jmp(Done);
2510 }
2511
2512 __ bind(notChar);
2513 __ cmpl(flags, stos);
2514 __ jcc(Assembler::notEqual, notShort);
2515
2516 // stos
2517 {
2518 __ pop(stos);
2519 if (!is_static) pop_and_check_object(obj);
2520 __ movw(lo, rax);
2521 if (!is_static) {
2522 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
2523 }
2524 __ jmp(Done);
2525 }
2526
2527 __ bind(notShort);
2528 __ cmpl(flags, ltos);
2529 __ jcc(Assembler::notEqual, notLong);
2530
2531 // ltos
2532 {
2533 Label notVolatileLong;
2534 __ testl(rdx, rdx);
2535 __ jcc(Assembler::zero, notVolatileLong);
2536
2537 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2538 if (!is_static) pop_and_check_object(obj);
2539
2540 // Replace with real volatile test
2541 __ push(rdx);
2542 __ push(rax); // Must update atomically with FIST
2543 __ fild_d(Address(rsp,0)); // So load into FPU register
2544 __ fistp_d(lo); // and put into memory atomically
2545 __ addptr(rsp, 2*wordSize);
2546 // volatile_barrier();
2547 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2548 Assembler::StoreStore));
2549 // Don't rewrite volatile version
2550 __ jmp(notVolatile);
2551
2552 __ bind(notVolatileLong);
2553
2554 __ pop(ltos); // overwrites rdx
2555 if (!is_static) pop_and_check_object(obj);
2556 NOT_LP64(__ movptr(hi, rdx));
2557 __ movptr(lo, rax);
2558 if (!is_static) {
2559 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
2560 }
2561 __ jmp(notVolatile);
2562 }
2563
2564 __ bind(notLong);
2565 __ cmpl(flags, ftos);
2566 __ jcc(Assembler::notEqual, notFloat);
2567
2568 // ftos
2569 {
2570 __ pop(ftos);
2571 if (!is_static) pop_and_check_object(obj);
2572 __ fstp_s(lo);
2573 if (!is_static) {
2574 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
2575 }
2576 __ jmp(Done);
2577 }
2578
2579 __ bind(notFloat);
2580 #ifdef ASSERT
2581 __ cmpl(flags, dtos);
2582 __ jcc(Assembler::notEqual, notDouble);
2583 #endif
2584
2585 // dtos
2586 {
2587 __ pop(dtos);
2588 if (!is_static) pop_and_check_object(obj);
2589 __ fstp_d(lo);
2590 if (!is_static) {
2591 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
2592 }
2593 __ jmp(Done);
2594 }
2595
2596 #ifdef ASSERT
2597 __ bind(notDouble);
2598 __ stop("Bad state");
2599 #endif
2600
2601 __ bind(Done);
2602
2603 // Check for volatile store
2604 __ testl(rdx, rdx);
2605 __ jcc(Assembler::zero, notVolatile);
2606 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2607 Assembler::StoreStore));
2608 __ bind(notVolatile);
2609 }
2610
2611
2612 void TemplateTable::putfield(int byte_no) {
2613 putfield_or_static(byte_no, false);
2614 }
2615
2616
2617 void TemplateTable::putstatic(int byte_no) {
2618 putfield_or_static(byte_no, true);
2619 }
2620
2621 void TemplateTable::jvmti_post_fast_field_mod() {
2622 if (JvmtiExport::can_post_field_modification()) {
2623 // Check to see if a field modification watch has been set before we take
2624 // the time to call into the VM.
2625 Label L2;
2626 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2627 __ testl(rcx,rcx);
2628 __ jcc(Assembler::zero, L2);
2629 __ pop_ptr(rbx); // copy the object pointer from tos
2630 __ verify_oop(rbx);
2631 __ push_ptr(rbx); // put the object pointer back on tos
2632
2633 // Save tos values before call_VM() clobbers them. Since we have
2634 // to do it for every data type, we use the saved values as the
2635 // jvalue object.
2847 }
2848
2849
2850
2851 //----------------------------------------------------------------------------------------------------
2852 // Calls
2853
2854 void TemplateTable::count_calls(Register method, Register temp) {
2855 // implemented elsewhere
2856 ShouldNotReachHere();
2857 }
2858
2859
2860 void TemplateTable::prepare_invoke(int byte_no,
2861 Register method, // linked method (or i-klass)
2862 Register index, // itable index, MethodType, etc.
2863 Register recv, // if caller wants to see it
2864 Register flags // if caller wants to test it
2865 ) {
2866 // determine flags
2867 const Bytecodes::Code code = bytecode();
2868 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2869 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2870 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2871 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2872 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2873 const bool load_receiver = (recv != noreg);
2874 const bool save_flags = (flags != noreg);
2875 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2876 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2877 assert(flags == noreg || flags == rdx, "");
2878 assert(recv == noreg || recv == rcx, "");
2879
2880 // setup registers & access constant pool cache
2881 if (recv == noreg) recv = rcx;
2882 if (flags == noreg) flags = rdx;
2883 assert_different_registers(method, index, recv, flags);
2884
2885 // save 'interpreter return address'
2886 __ save_bcp();
2887
2970 __ profile_final_call(rax);
2971 __ profile_arguments_type(rax, method, rsi, true);
2972
2973 __ jump_from_interpreted(method, rax);
2974
2975 __ bind(notFinal);
2976
2977 // get receiver klass
2978 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2979 __ load_klass(rax, recv);
2980
2981 // profile this call
2982 __ profile_virtual_call(rax, rdi, rdx);
2983
2984 // get target Method* & entry point
2985 __ lookup_virtual_method(rax, index, method);
2986 __ profile_arguments_type(rdx, method, rsi, true);
2987 __ jump_from_interpreted(method, rdx);
2988 }
2989
2990
2991 void TemplateTable::invokevirtual(int byte_no) {
2992 transition(vtos, vtos);
2993 assert(byte_no == f2_byte, "use this argument");
2994 prepare_invoke(byte_no,
2995 rbx, // method or vtable index
2996 noreg, // unused itable index
2997 rcx, rdx); // recv, flags
2998
2999 // rbx: index
3000 // rcx: receiver
3001 // rdx: flags
3002
3003 invokevirtual_helper(rbx, rcx, rdx);
3004 }
3005
3006
3007 void TemplateTable::invokespecial(int byte_no) {
3008 transition(vtos, vtos);
3009 assert(byte_no == f1_byte, "use this argument");
3010 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3011 rcx); // get receiver also for null check
|
455 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
456 __ push(dtos);
457 __ jmpb(Done);
458
459 __ bind(Long);
460 // ltos
461 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
462 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
463
464 __ push(ltos);
465
466 __ bind(Done);
467 }
468
469
470 void TemplateTable::locals_index(Register reg, int offset) {
471 __ load_unsigned_byte(reg, at_bcp(offset));
472 __ negptr(reg);
473 }
474
475 void TemplateTable::iload() {
476 iload_internal();
477 }
478
479 void TemplateTable::nofast_iload() {
480 iload_internal(MAY_NOT_REWRITE);
481 }
482
483 void TemplateTable::iload_internal(RewriteControl rc) {
484 transition(vtos, itos);
485 if (RewriteFrequentPairs && rc == MAY_REWRITE) {
486 Label rewrite, done;
487
488 // get next byte
489 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
490 // if _iload, wait to rewrite to iload2. We only want to rewrite the
491 // last two iloads in a pair. Comparing against fast_iload means that
492 // the next bytecode is neither an iload or a caload, and therefore
493 // an iload pair.
494 __ cmpl(rbx, Bytecodes::_iload);
495 __ jcc(Assembler::equal, done);
496
497 __ cmpl(rbx, Bytecodes::_fast_iload);
498 __ movl(rcx, Bytecodes::_fast_iload2);
499 __ jccb(Assembler::equal, rewrite);
500
501 // if _caload, rewrite to fast_icaload
502 __ cmpl(rbx, Bytecodes::_caload);
503 __ movl(rcx, Bytecodes::_fast_icaload);
504 __ jccb(Assembler::equal, rewrite);
505
737 }
738
739
740 void TemplateTable::fload(int n) {
741 transition(vtos, ftos);
742 __ fld_s(faddress(n));
743 }
744
745
746 void TemplateTable::dload(int n) {
747 transition(vtos, dtos);
748 __ fld_d(daddress(n));
749 }
750
751
752 void TemplateTable::aload(int n) {
753 transition(vtos, atos);
754 __ movptr(rax, aaddress(n));
755 }
756
757 void TemplateTable::aload_0() {
758 aload_0_internal();
759 }
760
761 void TemplateTable::nofast_aload_0() {
762 aload_0_internal(MAY_NOT_REWRITE);
763 }
764
765 void TemplateTable::aload_0_internal(RewriteControl rc) {
766 transition(vtos, atos);
767 // According to bytecode histograms, the pairs:
768 //
769 // _aload_0, _fast_igetfield
770 // _aload_0, _fast_agetfield
771 // _aload_0, _fast_fgetfield
772 //
773 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
774 // bytecode checks if the next bytecode is either _fast_igetfield,
775 // _fast_agetfield or _fast_fgetfield and then rewrites the
776 // current bytecode into a pair bytecode; otherwise it rewrites the current
777 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
778 //
779 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
780 // otherwise we may miss an opportunity for a pair.
781 //
782 // Also rewrite frequent pairs
783 // aload_0, aload_1
784 // aload_0, iload_1
785 // These bytecodes with a small amount of code are most profitable to rewrite
786 if (RewriteFrequentPairs && rc == MAY_REWRITE) {
787 Label rewrite, done;
788 // get next byte
789 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
790
791 // do actual aload_0
792 aload(0);
793
794 // if _getfield then wait with rewrite
795 __ cmpl(rbx, Bytecodes::_getfield);
796 __ jcc(Assembler::equal, done);
797
798 // if _igetfield then reqrite to _fast_iaccess_0
799 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
800 __ cmpl(rbx, Bytecodes::_fast_igetfield);
801 __ movl(rcx, Bytecodes::_fast_iaccess_0);
802 __ jccb(Assembler::equal, rewrite);
803
804 // if _agetfield then reqrite to _fast_aaccess_0
805 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
806 __ cmpl(rbx, Bytecodes::_fast_agetfield);
2073 // We only put in barriers around volatile refs (they are expensive), not
2074 // _between_ memory refs (that would require us to track the flavor of the
2075 // previous memory refs). Requirements (2) and (3) require some barriers
2076 // before volatile stores and after volatile loads. These nearly cover
2077 // requirement (1) but miss the volatile-store-volatile-load case. This final
2078 // case is placed after volatile-stores although it could just as well go
2079 // before volatile-loads.
2080 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2081 // Helper function to insert a is-volatile test and memory barrier
2082 if( !os::is_MP() ) return; // Not needed on single CPU
2083 __ membar(order_constraint);
2084 }
2085
2086 void TemplateTable::resolve_cache_and_index(int byte_no,
2087 Register Rcache,
2088 Register index,
2089 size_t index_size) {
2090 const Register temp = rbx;
2091 assert_different_registers(Rcache, index, temp);
2092
2093 Bytecodes::Code code = bytecode();
2094 switch (code) {
2095 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2096 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2097 case Bytecodes::_nofast_invokevirtual: code = Bytecodes::_invokevirtual;
2098 }
2099
2100 Label resolved;
2101 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2102 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2103 __ cmpl(temp, code); // have we resolved this bytecode?
2104 __ jcc(Assembler::equal, resolved);
2105
2106 // resolve first time through
2107 address entry;
2108 switch (code) {
2109 case Bytecodes::_getstatic : // fall through
2110 case Bytecodes::_putstatic : // fall through
2111 case Bytecodes::_getfield : // fall through
2112 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2113 case Bytecodes::_invokevirtual : // fall through
2114 case Bytecodes::_invokespecial : // fall through
2115 case Bytecodes::_invokestatic : // fall through
2116 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2117 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2118 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2119 default:
2120 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(code)));
2121 break;
2122 }
2123 __ movl(temp, code);
2124 __ call_VM(noreg, entry, temp);
2125 // Update registers with resolved info
2126 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2127 __ bind(resolved);
2128 }
2129
2130
2131 // The cache and index registers must be set before call
2132 void TemplateTable::load_field_cp_cache_entry(Register obj,
2133 Register cache,
2134 Register index,
2135 Register off,
2136 Register flags,
2137 bool is_static = false) {
2138 assert_different_registers(cache, index, flags, off);
2139
2140 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2141 // Field offset
2142 __ movptr(off, Address(cache, index, Address::times_ptr,
2143 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2216 } else {
2217 __ pop(atos); // Get the object
2218 __ verify_oop(rax);
2219 __ push(atos); // Restore stack state
2220 }
2221 // rax,: object pointer or NULL
2222 // cache: cache entry pointer
2223 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2224 rax, cache);
2225 __ get_cache_and_index_at_bcp(cache, index, 1);
2226 __ bind(L1);
2227 }
2228 }
2229
2230 void TemplateTable::pop_and_check_object(Register r) {
2231 __ pop_ptr(r);
2232 __ null_check(r); // for field access must check obj.
2233 __ verify_oop(r);
2234 }
2235
2236 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2237 transition(vtos, vtos);
2238
2239 const Register cache = rcx;
2240 const Register index = rdx;
2241 const Register obj = rcx;
2242 const Register off = rbx;
2243 const Register flags = rax;
2244
2245 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2246 jvmti_post_field_access(cache, index, is_static, false);
2247 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2248
2249 if (!is_static) pop_and_check_object(obj);
2250
2251 const Address lo(obj, off, Address::times_1, 0*wordSize);
2252 const Address hi(obj, off, Address::times_1, 1*wordSize);
2253
2254 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2255
2256 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2257 assert(btos == 0, "change code, btos != 0");
2258 // btos
2259 __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
2260 __ jcc(Assembler::notZero, notByte);
2261
2262 __ load_signed_byte(rax, lo );
2263 __ push(btos);
2264 // Rewrite bytecode to be faster
2265 if (!is_static && rc == MAY_REWRITE) {
2266 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2267 }
2268 __ jmp(Done);
2269
2270 __ bind(notByte);
2271 // itos
2272 __ cmpl(flags, itos );
2273 __ jcc(Assembler::notEqual, notInt);
2274
2275 __ movl(rax, lo );
2276 __ push(itos);
2277 // Rewrite bytecode to be faster
2278 if (!is_static && rc == MAY_REWRITE) {
2279 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2280 }
2281 __ jmp(Done);
2282
2283 __ bind(notInt);
2284 // atos
2285 __ cmpl(flags, atos );
2286 __ jcc(Assembler::notEqual, notObj);
2287
2288 __ movl(rax, lo );
2289 __ push(atos);
2290 if (!is_static && rc == MAY_REWRITE) {
2291 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2292 }
2293 __ jmp(Done);
2294
2295 __ bind(notObj);
2296 // ctos
2297 __ cmpl(flags, ctos );
2298 __ jcc(Assembler::notEqual, notChar);
2299
2300 __ load_unsigned_short(rax, lo );
2301 __ push(ctos);
2302 if (!is_static && rc == MAY_REWRITE) {
2303 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2304 }
2305 __ jmp(Done);
2306
2307 __ bind(notChar);
2308 // stos
2309 __ cmpl(flags, stos );
2310 __ jcc(Assembler::notEqual, notShort);
2311
2312 __ load_signed_short(rax, lo );
2313 __ push(stos);
2314 if (!is_static && rc == MAY_REWRITE) {
2315 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2316 }
2317 __ jmp(Done);
2318
2319 __ bind(notShort);
2320 // ltos
2321 __ cmpl(flags, ltos );
2322 __ jcc(Assembler::notEqual, notLong);
2323
2324 // Generate code as if volatile. There just aren't enough registers to
2325 // save that information and this code is faster than the test.
2326 __ fild_d(lo); // Must load atomically
2327 __ subptr(rsp,2*wordSize); // Make space for store
2328 __ fistp_d(Address(rsp,0));
2329 __ pop(rax);
2330 __ pop(rdx);
2331
2332 __ push(ltos);
2333 // Don't rewrite to _fast_lgetfield for potential volatile case.
2334 __ jmp(Done);
2335
2336 __ bind(notLong);
2337 // ftos
2338 __ cmpl(flags, ftos );
2339 __ jcc(Assembler::notEqual, notFloat);
2340
2341 __ fld_s(lo);
2342 __ push(ftos);
2343 if (!is_static && rc == MAY_REWRITE) {
2344 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2345 }
2346 __ jmp(Done);
2347
2348 __ bind(notFloat);
2349 // dtos
2350 __ cmpl(flags, dtos );
2351 __ jcc(Assembler::notEqual, notDouble);
2352
2353 __ fld_d(lo);
2354 __ push(dtos);
2355 if (!is_static && rc == MAY_REWRITE) {
2356 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2357 }
2358 __ jmpb(Done);
2359
2360 __ bind(notDouble);
2361
2362 __ stop("Bad state");
2363
2364 __ bind(Done);
2365 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2366 // volatile_barrier( );
2367 }
2368
2369 void TemplateTable::getfield(int byte_no) {
2370 getfield_or_static(byte_no, false);
2371 }
2372
2373 void TemplateTable::nofast_getfield(int byte_no) {
2374 getfield_or_static(byte_no, false, MAY_NOT_REWRITE);
2375 }
2376
2377 void TemplateTable::getstatic(int byte_no) {
2378 getfield_or_static(byte_no, true);
2379 }
2380
2381 // The registers cache and index expected to be set before call.
2382 // The function may destroy various registers, just not the cache and index registers.
2383 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2384
2385 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2386
2387 if (JvmtiExport::can_post_field_modification()) {
2388 // Check to see if a field modification watch has been set before we take
2389 // the time to call into the VM.
2390 Label L1;
2391 assert_different_registers(cache, index, rax);
2392 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2393 __ testl(rax, rax);
2394 __ jcc(Assembler::zero, L1);
2395
2426 __ bind(valsize_known);
2427 // setup object pointer
2428 __ movptr(rbx, Address(rbx, 0));
2429 }
2430 // cache entry pointer
2431 __ addptr(rax, in_bytes(cp_base_offset));
2432 __ shll(rdx, LogBytesPerWord);
2433 __ addptr(rax, rdx);
2434 // object (tos)
2435 __ mov(rcx, rsp);
2436 // rbx,: object pointer set up above (NULL if static)
2437 // rax,: cache entry pointer
2438 // rcx: jvalue object on the stack
2439 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2440 rbx, rax, rcx);
2441 __ get_cache_and_index_at_bcp(cache, index, 1);
2442 __ bind(L1);
2443 }
2444 }
2445
2446 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2447 transition(vtos, vtos);
2448
2449 const Register cache = rcx;
2450 const Register index = rdx;
2451 const Register obj = rcx;
2452 const Register off = rbx;
2453 const Register flags = rax;
2454
2455 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2456 jvmti_post_field_mod(cache, index, is_static);
2457 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2458
2459 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2460 // volatile_barrier( );
2461
2462 Label notVolatile, Done;
2463 __ movl(rdx, flags);
2464 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2465 __ andl(rdx, 0x1);
2466
2467 // field addresses
2468 const Address lo(obj, off, Address::times_1, 0*wordSize);
2469 const Address hi(obj, off, Address::times_1, 1*wordSize);
2470
2471 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2472
2473 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2474 assert(btos == 0, "change code, btos != 0");
2475 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2476 __ jcc(Assembler::notZero, notByte);
2477
2478 // btos
2479 {
2480 __ pop(btos);
2481 if (!is_static) pop_and_check_object(obj);
2482 __ movb(lo, rax);
2483 if (!is_static && rc == MAY_REWRITE) {
2484 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
2485 }
2486 __ jmp(Done);
2487 }
2488
2489 __ bind(notByte);
2490 __ cmpl(flags, itos);
2491 __ jcc(Assembler::notEqual, notInt);
2492
2493 // itos
2494 {
2495 __ pop(itos);
2496 if (!is_static) pop_and_check_object(obj);
2497 __ movl(lo, rax);
2498 if (!is_static && rc == MAY_REWRITE) {
2499 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
2500 }
2501 __ jmp(Done);
2502 }
2503
2504 __ bind(notInt);
2505 __ cmpl(flags, atos);
2506 __ jcc(Assembler::notEqual, notObj);
2507
2508 // atos
2509 {
2510 __ pop(atos);
2511 if (!is_static) pop_and_check_object(obj);
2512 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2513 if (!is_static && rc == MAY_REWRITE) {
2514 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
2515 }
2516 __ jmp(Done);
2517 }
2518
2519 __ bind(notObj);
2520 __ cmpl(flags, ctos);
2521 __ jcc(Assembler::notEqual, notChar);
2522
2523 // ctos
2524 {
2525 __ pop(ctos);
2526 if (!is_static) pop_and_check_object(obj);
2527 __ movw(lo, rax);
2528 if (!is_static && rc == MAY_REWRITE) {
2529 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
2530 }
2531 __ jmp(Done);
2532 }
2533
2534 __ bind(notChar);
2535 __ cmpl(flags, stos);
2536 __ jcc(Assembler::notEqual, notShort);
2537
2538 // stos
2539 {
2540 __ pop(stos);
2541 if (!is_static) pop_and_check_object(obj);
2542 __ movw(lo, rax);
2543 if (!is_static && rc == MAY_REWRITE) {
2544 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
2545 }
2546 __ jmp(Done);
2547 }
2548
2549 __ bind(notShort);
2550 __ cmpl(flags, ltos);
2551 __ jcc(Assembler::notEqual, notLong);
2552
2553 // ltos
2554 {
2555 Label notVolatileLong;
2556 __ testl(rdx, rdx);
2557 __ jcc(Assembler::zero, notVolatileLong);
2558
2559 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2560 if (!is_static) pop_and_check_object(obj);
2561
2562 // Replace with real volatile test
2563 __ push(rdx);
2564 __ push(rax); // Must update atomically with FIST
2565 __ fild_d(Address(rsp,0)); // So load into FPU register
2566 __ fistp_d(lo); // and put into memory atomically
2567 __ addptr(rsp, 2*wordSize);
2568 // volatile_barrier();
2569 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2570 Assembler::StoreStore));
2571 // Don't rewrite volatile version
2572 __ jmp(notVolatile);
2573
2574 __ bind(notVolatileLong);
2575
2576 __ pop(ltos); // overwrites rdx
2577 if (!is_static) pop_and_check_object(obj);
2578 NOT_LP64(__ movptr(hi, rdx));
2579 __ movptr(lo, rax);
2580 if (!is_static && rc == MAY_REWRITE) {
2581 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
2582 }
2583 __ jmp(notVolatile);
2584 }
2585
2586 __ bind(notLong);
2587 __ cmpl(flags, ftos);
2588 __ jcc(Assembler::notEqual, notFloat);
2589
2590 // ftos
2591 {
2592 __ pop(ftos);
2593 if (!is_static) pop_and_check_object(obj);
2594 __ fstp_s(lo);
2595 if (!is_static && rc == MAY_REWRITE) {
2596 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
2597 }
2598 __ jmp(Done);
2599 }
2600
2601 __ bind(notFloat);
2602 #ifdef ASSERT
2603 __ cmpl(flags, dtos);
2604 __ jcc(Assembler::notEqual, notDouble);
2605 #endif
2606
2607 // dtos
2608 {
2609 __ pop(dtos);
2610 if (!is_static) pop_and_check_object(obj);
2611 __ fstp_d(lo);
2612 if (!is_static && rc == MAY_REWRITE) {
2613 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
2614 }
2615 __ jmp(Done);
2616 }
2617
2618 #ifdef ASSERT
2619 __ bind(notDouble);
2620 __ stop("Bad state");
2621 #endif
2622
2623 __ bind(Done);
2624
2625 // Check for volatile store
2626 __ testl(rdx, rdx);
2627 __ jcc(Assembler::zero, notVolatile);
2628 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2629 Assembler::StoreStore));
2630 __ bind(notVolatile);
2631 }
2632
2633
2634 void TemplateTable::putfield(int byte_no) {
2635 putfield_or_static(byte_no, false);
2636 }
2637
2638 void TemplateTable::nofast_putfield(int byte_no) {
2639 putfield_or_static(byte_no, false, MAY_NOT_REWRITE);
2640 }
2641
2642 void TemplateTable::putstatic(int byte_no) {
2643 putfield_or_static(byte_no, true);
2644 }
2645
2646 void TemplateTable::jvmti_post_fast_field_mod() {
2647 if (JvmtiExport::can_post_field_modification()) {
2648 // Check to see if a field modification watch has been set before we take
2649 // the time to call into the VM.
2650 Label L2;
2651 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2652 __ testl(rcx,rcx);
2653 __ jcc(Assembler::zero, L2);
2654 __ pop_ptr(rbx); // copy the object pointer from tos
2655 __ verify_oop(rbx);
2656 __ push_ptr(rbx); // put the object pointer back on tos
2657
2658 // Save tos values before call_VM() clobbers them. Since we have
2659 // to do it for every data type, we use the saved values as the
2660 // jvalue object.
2872 }
2873
2874
2875
2876 //----------------------------------------------------------------------------------------------------
2877 // Calls
2878
2879 void TemplateTable::count_calls(Register method, Register temp) {
2880 // implemented elsewhere
2881 ShouldNotReachHere();
2882 }
2883
2884
2885 void TemplateTable::prepare_invoke(int byte_no,
2886 Register method, // linked method (or i-klass)
2887 Register index, // itable index, MethodType, etc.
2888 Register recv, // if caller wants to see it
2889 Register flags // if caller wants to test it
2890 ) {
2891 // determine flags
2892 const Bytecodes::Code code = bytecode() == Bytecodes::_nofast_invokevirtual ? Bytecodes::_invokevirtual : bytecode();
2893 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2894 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2895 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2896 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2897 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2898 const bool load_receiver = (recv != noreg);
2899 const bool save_flags = (flags != noreg);
2900 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2901 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2902 assert(flags == noreg || flags == rdx, "");
2903 assert(recv == noreg || recv == rcx, "");
2904
2905 // setup registers & access constant pool cache
2906 if (recv == noreg) recv = rcx;
2907 if (flags == noreg) flags = rdx;
2908 assert_different_registers(method, index, recv, flags);
2909
2910 // save 'interpreter return address'
2911 __ save_bcp();
2912
2995 __ profile_final_call(rax);
2996 __ profile_arguments_type(rax, method, rsi, true);
2997
2998 __ jump_from_interpreted(method, rax);
2999
3000 __ bind(notFinal);
3001
3002 // get receiver klass
3003 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3004 __ load_klass(rax, recv);
3005
3006 // profile this call
3007 __ profile_virtual_call(rax, rdi, rdx);
3008
3009 // get target Method* & entry point
3010 __ lookup_virtual_method(rax, index, method);
3011 __ profile_arguments_type(rdx, method, rsi, true);
3012 __ jump_from_interpreted(method, rdx);
3013 }
3014
3015 void TemplateTable::invokevirtual(int byte_no){
3016 invokevirtual_internal(byte_no);
3017 }
3018
3019 void TemplateTable::nofast_invokevirtual(int byte_no){
3020 invokevirtual_internal(byte_no, MAY_NOT_REWRITE);
3021 }
3022
3023 void TemplateTable::invokevirtual_internal(int byte_no, RewriteControl rc) {
3024 transition(vtos, vtos);
3025 assert(byte_no == f2_byte, "use this argument");
3026 prepare_invoke(byte_no,
3027 rbx, // method or vtable index
3028 noreg, // unused itable index
3029 rcx, rdx); // recv, flags
3030
3031 // rbx: index
3032 // rcx: receiver
3033 // rdx: flags
3034
3035 invokevirtual_helper(rbx, rcx, rdx);
3036 }
3037
3038
3039 void TemplateTable::invokespecial(int byte_no) {
3040 transition(vtos, vtos);
3041 assert(byte_no == f1_byte, "use this argument");
3042 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3043 rcx); // get receiver also for null check
|