hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp

Print this page
rev 611 : Merge
   1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)interp_masm_sparc.cpp        1.199 07/08/29 13:42:17 JVM"
   3 #endif
   4 /*
   5  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  


 845   add(LcpoolCache, tmp, cache);
 846 }
 847 
 848 
 849 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
 850   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 851   assert_different_registers(cache, tmp);
 852   assert_not_delayed();
 853   get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
 854               // convert from field index to ConstantPoolCacheEntry index
 855               // and from word index to byte offset
 856   sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
 857               // skip past the header
 858   add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp);
 859               // construct pointer to cache entry
 860   add(LcpoolCache, tmp, cache);
 861 }
 862 
 863 
 864 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
 865 // a subtype of super_klass.  Blows registers Rsub_klass, tmp1, tmp2.
 866 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 867                                                   Register Rsuper_klass,
 868                                                   Register Rtmp1,
 869                                                   Register Rtmp2,
 870                                                   Register Rtmp3,
 871                                                   Label &ok_is_subtype ) {
 872   Label not_subtype, loop;
 873 
 874   // Profile the not-null value's klass.
 875   profile_typecheck(Rsub_klass, Rtmp1);
 876 
 877   // Load the super-klass's check offset into Rtmp1
 878   ld( Rsuper_klass, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes(), Rtmp1 );
 879   // Load from the sub-klass's super-class display list, or a 1-word cache of
 880   // the secondary superclass list, or a failing value with a sentinel offset
 881   // if the super-klass is an interface or exceptionally deep in the Java
 882   // hierarchy and we have to scan the secondary superclass list the hard way.
 883   ld_ptr( Rsub_klass, Rtmp1, Rtmp2 );
 884   // See if we get an immediate positive hit
 885   cmp( Rtmp2, Rsuper_klass );
 886   brx( Assembler::equal, false, Assembler::pt, ok_is_subtype );
 887   // In the delay slot, check for immediate negative hit
 888   delayed()->cmp( Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
 889   br( Assembler::notEqual, false, Assembler::pt, not_subtype );
 890   // In the delay slot, check for self
 891   delayed()->cmp( Rsub_klass, Rsuper_klass );
 892   brx( Assembler::equal, false, Assembler::pt, ok_is_subtype );
 893 
 894   // Now do a linear scan of the secondary super-klass chain.
 895   delayed()->ld_ptr( Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), Rtmp2 );
 896 



 897   // Rtmp2 holds the objArrayOop of secondary supers.
 898   ld( Rtmp2, arrayOopDesc::length_offset_in_bytes(), Rtmp1 );// Load the array length
 899   // Check for empty secondary super list
 900   tst(Rtmp1);
 901 
 902   // Top of search loop
 903   bind( loop );
 904   br( Assembler::equal, false, Assembler::pn, not_subtype );
 905   delayed()->nop();
 906   // load next super to check
 907   ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3 );
 908 



 909   // Bump array pointer forward one oop
 910   add( Rtmp2, wordSize, Rtmp2 );





 911   // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
 912   cmp( Rtmp3, Rsuper_klass );
 913   // A miss means we are NOT a subtype and need to keep looping
 914   brx( Assembler::notEqual, false, Assembler::pt, loop );
 915   delayed()->deccc( Rtmp1 );    // dec trip counter in delay slot
 916   // Falling out the bottom means we found a hit; we ARE a subtype

 917   br( Assembler::always, false, Assembler::pt, ok_is_subtype );
 918   // Update the cache
 919   delayed()->st_ptr( Rsuper_klass, Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );

 920 
 921   bind(not_subtype);
 922   profile_typecheck_failed(Rtmp1);
 923 }
 924 
 925 // Separate these two to allow for delay slot in middle
 926 // These are used to do a test and full jump to exception-throwing code.
 927 
 928 // %%%%% Could possibly reoptimize this by testing to see if could use
 929 // a single conditional branch (i.e. if span is small enough.
 930 // If you go that route, than get rid of the split and give up
 931 // on the delay-slot hack. 
 932 
 933 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, 
 934                                                     Label&    ok ) {
 935   assert_not_delayed();
 936   br(ok_condition, true, pt, ok);
 937   // DELAY SLOT
 938 }
 939 


2440 
2441   // Perform a more elaborate out-of-line call
2442   // Not an address; verify it:
2443   bind(test);
2444   verify_oop(reg);
2445   bind(skip);
2446 }
2447 
2448 
2449 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2450   if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2451 }
2452 #endif /* CC_INTERP */
2453 
2454 // Inline assembly for:
2455 //
2456 // if (thread is in interp_only_mode) {
2457 //   InterpreterRuntime::post_method_entry();
2458 // }
2459 // if (DTraceMethodProbes) {
2460 //   SharedRuntime::dtrace_method_entry(method, reciever);



2461 // }
2462 
2463 void InterpreterMacroAssembler::notify_method_entry() {
2464 
2465   // C++ interpreter only uses this for native methods.
2466 
2467   // Whenever JVMTI puts a thread in interp_only_mode, method
2468   // entry/exit events are sent for that thread to track stack
2469   // depth.  If it is possible to enter interp_only_mode we add
2470   // the code to check if the event should be sent.
2471   if (JvmtiExport::can_post_interpreter_events()) {
2472     Label L;
2473     Register temp_reg = O5;
2474 
2475     const Address interp_only       (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset()));
2476 
2477     ld(interp_only, temp_reg);
2478     tst(temp_reg);
2479     br(zero, false, pt, L);
2480     delayed()->nop();
2481     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2482     bind(L);
2483   }
2484 
2485   {
2486     Register temp_reg = O5;
2487     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2488     call_VM_leaf(noreg, 
2489       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 
2490       G2_thread, Lmethod);
2491   }







2492 }
2493 
2494 
2495 // Inline assembly for:
2496 //
2497 // if (thread is in interp_only_mode) {
2498 //   // save result
2499 //   InterpreterRuntime::post_method_exit();
2500 //   // restore result
2501 // }
2502 // if (DTraceMethodProbes) {
2503 //   SharedRuntime::dtrace_method_exit(thread, method);
2504 // }
2505 // 
2506 // Native methods have their result stored in d_tmp and l_tmp
2507 // Java methods have their result stored in the expression stack
2508 
2509 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2510                                                    TosState state,
2511                                                    NotifyMethodExitMode mode) {


   1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)interp_masm_sparc.cpp        1.199 07/08/29 13:42:17 JVM"
   3 #endif
   4 /*
   5  * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  


 845   add(LcpoolCache, tmp, cache);
 846 }
 847 
 848 
 849 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
 850   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 851   assert_different_registers(cache, tmp);
 852   assert_not_delayed();
 853   get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
 854               // convert from field index to ConstantPoolCacheEntry index
 855               // and from word index to byte offset
 856   sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
 857               // skip past the header
 858   add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp);
 859               // construct pointer to cache entry
 860   add(LcpoolCache, tmp, cache);
 861 }
 862 
 863 
 864 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
 865 // a subtype of super_klass.  Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
 866 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 867                                                   Register Rsuper_klass,
 868                                                   Register Rtmp1,
 869                                                   Register Rtmp2,
 870                                                   Register Rtmp3,
 871                                                   Label &ok_is_subtype ) {
 872   Label not_subtype, loop;
 873 
 874   // Profile the not-null value's klass.
 875   profile_typecheck(Rsub_klass, Rtmp1);
 876 
 877   // Load the super-klass's check offset into Rtmp1
 878   ld( Rsuper_klass, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes(), Rtmp1 );
 879   // Load from the sub-klass's super-class display list, or a 1-word cache of
 880   // the secondary superclass list, or a failing value with a sentinel offset
 881   // if the super-klass is an interface or exceptionally deep in the Java
 882   // hierarchy and we have to scan the secondary superclass list the hard way.
 883   ld_ptr( Rsub_klass, Rtmp1, Rtmp2 );
 884   // See if we get an immediate positive hit
 885   cmp( Rtmp2, Rsuper_klass );
 886   brx( Assembler::equal, false, Assembler::pt, ok_is_subtype );
 887   // In the delay slot, check for immediate negative hit
 888   delayed()->cmp( Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
 889   br( Assembler::notEqual, false, Assembler::pt, not_subtype );
 890   // In the delay slot, check for self
 891   delayed()->cmp( Rsub_klass, Rsuper_klass );
 892   brx( Assembler::equal, false, Assembler::pt, ok_is_subtype );
 893 
 894   // Now do a linear scan of the secondary super-klass chain.
 895   delayed()->ld_ptr( Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), Rtmp2 );
 896 
 897   // compress superclass
 898   if (UseCompressedOops) encode_heap_oop(Rsuper_klass);
 899 
 900   // Rtmp2 holds the objArrayOop of secondary supers.
 901   ld( Rtmp2, arrayOopDesc::length_offset_in_bytes(), Rtmp1 );// Load the array length
 902   // Check for empty secondary super list
 903   tst(Rtmp1);
 904 
 905   // Top of search loop
 906   bind( loop );
 907   br( Assembler::equal, false, Assembler::pn, not_subtype );
 908   delayed()->nop();


 909 
 910   // load next super to check
 911   if (UseCompressedOops) {
 912     lduw( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
 913     // Bump array pointer forward one oop
 914     add( Rtmp2, 4, Rtmp2 );
 915   } else {
 916     ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
 917     // Bump array pointer forward one oop
 918     add( Rtmp2, wordSize, Rtmp2);
 919   }
 920   // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
 921   cmp( Rtmp3, Rsuper_klass );
 922   // A miss means we are NOT a subtype and need to keep looping
 923   brx( Assembler::notEqual, false, Assembler::pt, loop );
 924   delayed()->deccc( Rtmp1 );    // dec trip counter in delay slot
 925   // Falling out the bottom means we found a hit; we ARE a subtype
 926   if (UseCompressedOops) decode_heap_oop(Rsuper_klass);
 927   br( Assembler::always, false, Assembler::pt, ok_is_subtype );
 928   // Update the cache
 929   delayed()->st_ptr( Rsuper_klass, Rsub_klass,
 930                      sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
 931 
 932   bind(not_subtype);
 933   profile_typecheck_failed(Rtmp1);
 934 }
 935 
 936 // Separate these two to allow for delay slot in middle
 937 // These are used to do a test and full jump to exception-throwing code.
 938 
 939 // %%%%% Could possibly reoptimize this by testing to see if could use
 940 // a single conditional branch (i.e. if span is small enough.
 941 // If you go that route, than get rid of the split and give up
 942 // on the delay-slot hack. 
 943 
 944 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, 
 945                                                     Label&    ok ) {
 946   assert_not_delayed();
 947   br(ok_condition, true, pt, ok);
 948   // DELAY SLOT
 949 }
 950 


2451 
2452   // Perform a more elaborate out-of-line call
2453   // Not an address; verify it:
2454   bind(test);
2455   verify_oop(reg);
2456   bind(skip);
2457 }
2458 
2459 
2460 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2461   if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2462 }
2463 #endif /* CC_INTERP */
2464 
2465 // Inline assembly for:
2466 //
2467 // if (thread is in interp_only_mode) {
2468 //   InterpreterRuntime::post_method_entry();
2469 // }
2470 // if (DTraceMethodProbes) {
2471 //   SharedRuntime::dtrace_method_entry(method, receiver);
2472 // }
2473 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2474 //   SharedRuntime::rc_trace_method_entry(method, receiver);
2475 // }
2476 
2477 void InterpreterMacroAssembler::notify_method_entry() {
2478 
2479   // C++ interpreter only uses this for native methods.
2480 
2481   // Whenever JVMTI puts a thread in interp_only_mode, method
2482   // entry/exit events are sent for that thread to track stack
2483   // depth.  If it is possible to enter interp_only_mode we add
2484   // the code to check if the event should be sent.
2485   if (JvmtiExport::can_post_interpreter_events()) {
2486     Label L;
2487     Register temp_reg = O5;
2488 
2489     const Address interp_only       (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset()));
2490 
2491     ld(interp_only, temp_reg);
2492     tst(temp_reg);
2493     br(zero, false, pt, L);
2494     delayed()->nop();
2495     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2496     bind(L);
2497   }
2498 
2499   {
2500     Register temp_reg = O5;
2501     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2502     call_VM_leaf(noreg, 
2503       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 
2504       G2_thread, Lmethod);
2505   }
2506 
2507   // RedefineClasses() tracing support for obsolete method entry
2508   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2509     call_VM_leaf(noreg,
2510       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2511       G2_thread, Lmethod);
2512   }
2513 }
2514 
2515 
2516 // Inline assembly for:
2517 //
2518 // if (thread is in interp_only_mode) {
2519 //   // save result
2520 //   InterpreterRuntime::post_method_exit();
2521 //   // restore result
2522 // }
2523 // if (DTraceMethodProbes) {
2524 //   SharedRuntime::dtrace_method_exit(thread, method);
2525 // }
2526 // 
2527 // Native methods have their result stored in d_tmp and l_tmp
2528 // Java methods have their result stored in the expression stack
2529 
2530 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2531                                                    TosState state,
2532                                                    NotifyMethodExitMode mode) {