< prev index next >

src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp

Print this page
rev 47591 : Add Thread Local handshakes and thread local polling


2342   case T_OBJECT:                // Really a handle
2343   case T_ARRAY:
2344   case T_INT:
2345                   __ mov(O0, I0);
2346                   break;
2347   case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2348   case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
2349   case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
2350   case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
2351     break;                      // Cannot de-handlize until after reclaiming jvm_lock
2352   default:
2353     ShouldNotReachHere();
2354   }
2355 
2356   Label after_transition;
2357   // must we block?
2358 
2359   // Block, if necessary, before resuming in _thread_in_Java state.
2360   // In order for GC to work, don't clear the last_Java_sp until after blocking.
2361   { Label no_block;
2362     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2363 
2364     // Switch thread to "native transition" state before reading the synchronization state.
2365     // This additional state is necessary because reading and testing the synchronization
2366     // state is not atomic w.r.t. GC, as this scenario demonstrates:
2367     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2368     //     VM thread changes sync state to synchronizing and suspends threads for GC.
2369     //     Thread A is resumed to finish this native method, but doesn't block here since it
2370     //     didn't see any synchronization is progress, and escapes.
2371     __ set(_thread_in_native_trans, G3_scratch);
2372     __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2373     if(os::is_MP()) {
2374       if (UseMembar) {
2375         // Force this write out before the read below
2376         __ membar(Assembler::StoreLoad);
2377       } else {
2378         // Write serialization page so VM thread can do a pseudo remote membar.
2379         // We use the current thread pointer to calculate a thread specific
2380         // offset to write to within the page. This minimizes bus traffic
2381         // due to cache line collision.
2382         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2383       }
2384     }
2385     __ load_contents(sync_state, G3_scratch);
2386     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2387 
2388     Label L;
2389     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2390     __ br(Assembler::notEqual, false, Assembler::pn, L);
2391     __ delayed()->ld(suspend_state, G3_scratch);
2392     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2393     __ bind(L);
2394 
2395     // Block.  Save any potential method result value before the operation and
2396     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2397     // lets us share the oopMap we used when we went native rather the create
2398     // a distinct one for this pc
2399     //
2400     save_native_result(masm, ret_type, stack_slots);
2401     if (!is_critical_native) {
2402       __ call_VM_leaf(L7_thread_cache,
2403                       CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2404                       G2_thread);
2405     } else {
2406       __ call_VM_leaf(L7_thread_cache,
2407                       CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2408                       G2_thread);
2409     }
2410 


3101   // allocate space for the code
3102   ResourceMark rm;
3103   // setup code generation tools
3104   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3105   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3106   CodeBuffer buffer("handler_blob", 1600, 512);
3107   MacroAssembler* masm                = new MacroAssembler(&buffer);
3108   int             frame_size_words;
3109   OopMapSet *oop_maps = new OopMapSet();
3110   OopMap* map = NULL;
3111 
3112   int start = __ offset();
3113 
3114   bool cause_return = (poll_type == POLL_AT_RETURN);
3115   // If this causes a return before the processing, then do a "restore"
3116   if (cause_return) {
3117     __ restore();
3118   } else {
3119     // Make it look like we were called via the poll
3120     // so that frame constructor always sees a valid return address
3121     __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3122     __ sub(O7, frame::pc_return_offset, O7);
3123   }
3124 
3125   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3126 
3127   // setup last_Java_sp (blows G4)
3128   __ set_last_Java_frame(SP, noreg);
3129 









3130   // call into the runtime to handle illegal instructions exception
3131   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3132   __ mov(G2_thread, O0);
3133   __ save_thread(L7_thread_cache);
3134   __ call(call_ptr);
3135   __ delayed()->nop();
3136 
3137   // Set an oopmap for the call site.
3138   // We need this not only for callee-saved registers, but also for volatile
3139   // registers that the compiler might be keeping live across a safepoint.
3140 
3141   oop_maps->add_gc_map( __ offset() - start, map);
3142 
3143   __ restore_thread(L7_thread_cache);
3144   // clear last_Java_sp
3145   __ reset_last_Java_frame();
3146 
3147   // Check for exceptions
3148   Label pending;
3149 
3150   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3151   __ br_notnull_short(O1, Assembler::pn, pending);






3152 
3153   RegisterSaver::restore_live_registers(masm);
3154 
3155   // We are back the the original state on entry and ready to go.
3156 
3157   __ retl();
3158   __ delayed()->nop();
3159 
3160   // Pending exception after the safepoint
3161 
3162   __ bind(pending);
3163 
3164   RegisterSaver::restore_live_registers(masm);
3165 
3166   // We are back the the original state on entry.
3167 
3168   // Tail-call forward_exception_entry, with the issuing PC in O7,
3169   // so it looks like the original nmethod called forward_exception_entry.
3170   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3171   __ JMP(O0, 0);




2342   case T_OBJECT:                // Really a handle
2343   case T_ARRAY:
2344   case T_INT:
2345                   __ mov(O0, I0);
2346                   break;
2347   case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2348   case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
2349   case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
2350   case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
2351     break;                      // Cannot de-handlize until after reclaiming jvm_lock
2352   default:
2353     ShouldNotReachHere();
2354   }
2355 
2356   Label after_transition;
2357   // must we block?
2358 
2359   // Block, if necessary, before resuming in _thread_in_Java state.
2360   // In order for GC to work, don't clear the last_Java_sp until after blocking.
2361   { Label no_block;

2362 
2363     // Switch thread to "native transition" state before reading the synchronization state.
2364     // This additional state is necessary because reading and testing the synchronization
2365     // state is not atomic w.r.t. GC, as this scenario demonstrates:
2366     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2367     //     VM thread changes sync state to synchronizing and suspends threads for GC.
2368     //     Thread A is resumed to finish this native method, but doesn't block here since it
2369     //     didn't see any synchronization is progress, and escapes.
2370     __ set(_thread_in_native_trans, G3_scratch);
2371     __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2372     if(os::is_MP()) {
2373       if (UseMembar) {
2374         // Force this write out before the read below
2375         __ membar(Assembler::StoreLoad);
2376       } else {
2377         // Write serialization page so VM thread can do a pseudo remote membar.
2378         // We use the current thread pointer to calculate a thread specific
2379         // offset to write to within the page. This minimizes bus traffic
2380         // due to cache line collision.
2381         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2382       }
2383     }


2384 
2385     Label L;
2386     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2387     __ safepoint_poll(L, false, G2_thread, G3_scratch);
2388     __ delayed()->ld(suspend_state, G3_scratch);
2389     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2390     __ bind(L);
2391 
2392     // Block.  Save any potential method result value before the operation and
2393     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2394     // lets us share the oopMap we used when we went native rather the create
2395     // a distinct one for this pc
2396     //
2397     save_native_result(masm, ret_type, stack_slots);
2398     if (!is_critical_native) {
2399       __ call_VM_leaf(L7_thread_cache,
2400                       CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2401                       G2_thread);
2402     } else {
2403       __ call_VM_leaf(L7_thread_cache,
2404                       CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2405                       G2_thread);
2406     }
2407 


3098   // allocate space for the code
3099   ResourceMark rm;
3100   // setup code generation tools
3101   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3102   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3103   CodeBuffer buffer("handler_blob", 1600, 512);
3104   MacroAssembler* masm                = new MacroAssembler(&buffer);
3105   int             frame_size_words;
3106   OopMapSet *oop_maps = new OopMapSet();
3107   OopMap* map = NULL;
3108 
3109   int start = __ offset();
3110 
3111   bool cause_return = (poll_type == POLL_AT_RETURN);
3112   // If this causes a return before the processing, then do a "restore"
3113   if (cause_return) {
3114     __ restore();
3115   } else {
3116     // Make it look like we were called via the poll
3117     // so that frame constructor always sees a valid return address
3118     __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_pc_offset()), O7);
3119     __ sub(O7, frame::pc_return_offset, O7);
3120   }
3121 
3122   map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3123 
3124   // setup last_Java_sp (blows G4)
3125   __ set_last_Java_frame(SP, noreg);
3126 
3127   Register saved_O7 = O7->after_save();
3128   if (!cause_return && SafepointMechanism::uses_thread_local_poll()) {
3129     // Keep a copy of the return pc in L0 to detect if it gets modified
3130     __ mov(saved_O7, L0);
3131     // Adjust and keep a copy of our npc saved by the signal handler
3132     __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_npc_offset()), L1);
3133     __ sub(L1, frame::pc_return_offset, L1);
3134   }
3135 
3136   // call into the runtime to handle illegal instructions exception
3137   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3138   __ mov(G2_thread, O0);
3139   __ save_thread(L7_thread_cache);
3140   __ call(call_ptr);
3141   __ delayed()->nop();
3142 
3143   // Set an oopmap for the call site.
3144   // We need this not only for callee-saved registers, but also for volatile
3145   // registers that the compiler might be keeping live across a safepoint.
3146 
3147   oop_maps->add_gc_map( __ offset() - start, map);
3148 
3149   __ restore_thread(L7_thread_cache);
3150   // clear last_Java_sp
3151   __ reset_last_Java_frame();
3152 
3153   // Check for exceptions
3154   Label pending;
3155 
3156   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3157   __ br_notnull_short(O1, Assembler::pn, pending);
3158 
3159   if (!cause_return && SafepointMechanism::uses_thread_local_poll()) {
3160     // If nobody modified our return pc then we must return to the npc which he saved in L1
3161     __ cmp(saved_O7, L0);
3162     __ movcc(Assembler::equal, false, Assembler::ptr_cc, L1, saved_O7);
3163   }
3164 
3165   RegisterSaver::restore_live_registers(masm);
3166 
3167   // We are back the the original state on entry and ready to go.
3168 
3169   __ retl();
3170   __ delayed()->nop();
3171 
3172   // Pending exception after the safepoint
3173 
3174   __ bind(pending);
3175 
3176   RegisterSaver::restore_live_registers(masm);
3177 
3178   // We are back the the original state on entry.
3179 
3180   // Tail-call forward_exception_entry, with the issuing PC in O7,
3181   // so it looks like the original nmethod called forward_exception_entry.
3182   __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3183   __ JMP(O0, 0);


< prev index next >