2346 case T_OBJECT: // Really a handle
2347 case T_ARRAY:
2348 case T_INT:
2349 __ mov(O0, I0);
2350 break;
2351 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2352 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2353 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2354 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2355 break; // Cannot de-handlize until after reclaiming jvm_lock
2356 default:
2357 ShouldNotReachHere();
2358 }
2359
2360 Label after_transition;
2361 // must we block?
2362
2363 // Block, if necessary, before resuming in _thread_in_Java state.
2364 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2365 { Label no_block;
2366 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2367
2368 // Switch thread to "native transition" state before reading the synchronization state.
2369 // This additional state is necessary because reading and testing the synchronization
2370 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2371 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2372 // VM thread changes sync state to synchronizing and suspends threads for GC.
2373 // Thread A is resumed to finish this native method, but doesn't block here since it
2374 // didn't see any synchronization is progress, and escapes.
2375 __ set(_thread_in_native_trans, G3_scratch);
2376 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2377 if(os::is_MP()) {
2378 if (UseMembar) {
2379 // Force this write out before the read below
2380 __ membar(Assembler::StoreLoad);
2381 } else {
2382 // Write serialization page so VM thread can do a pseudo remote membar.
2383 // We use the current thread pointer to calculate a thread specific
2384 // offset to write to within the page. This minimizes bus traffic
2385 // due to cache line collision.
2386 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2387 }
2388 }
2389 __ load_contents(sync_state, G3_scratch);
2390 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2391
2392 Label L;
2393 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2394 __ br(Assembler::notEqual, false, Assembler::pn, L);
2395 __ delayed()->ld(suspend_state, G3_scratch);
2396 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2397 __ bind(L);
2398
2399 // Block. Save any potential method result value before the operation and
2400 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2401 // lets us share the oopMap we used when we went native rather the create
2402 // a distinct one for this pc
2403 //
2404 save_native_result(masm, ret_type, stack_slots);
2405 if (!is_critical_native) {
2406 __ call_VM_leaf(L7_thread_cache,
2407 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2408 G2_thread);
2409 } else {
2410 __ call_VM_leaf(L7_thread_cache,
2411 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2412 G2_thread);
2413 }
2414
3105 // allocate space for the code
3106 ResourceMark rm;
3107 // setup code generation tools
3108 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3109 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3110 CodeBuffer buffer("handler_blob", 1600, 512);
3111 MacroAssembler* masm = new MacroAssembler(&buffer);
3112 int frame_size_words;
3113 OopMapSet *oop_maps = new OopMapSet();
3114 OopMap* map = NULL;
3115
3116 int start = __ offset();
3117
3118 bool cause_return = (poll_type == POLL_AT_RETURN);
3119 // If this causes a return before the processing, then do a "restore"
3120 if (cause_return) {
3121 __ restore();
3122 } else {
3123 // Make it look like we were called via the poll
3124 // so that frame constructor always sees a valid return address
3125 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3126 __ sub(O7, frame::pc_return_offset, O7);
3127 }
3128
3129 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3130
3131 // setup last_Java_sp (blows G4)
3132 __ set_last_Java_frame(SP, noreg);
3133
3134 // call into the runtime to handle illegal instructions exception
3135 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3136 __ mov(G2_thread, O0);
3137 __ save_thread(L7_thread_cache);
3138 __ call(call_ptr);
3139 __ delayed()->nop();
3140
3141 // Set an oopmap for the call site.
3142 // We need this not only for callee-saved registers, but also for volatile
3143 // registers that the compiler might be keeping live across a safepoint.
3144
3145 oop_maps->add_gc_map( __ offset() - start, map);
3146
3147 __ restore_thread(L7_thread_cache);
3148 // clear last_Java_sp
3149 __ reset_last_Java_frame();
3150
3151 // Check for exceptions
3152 Label pending;
3153
3154 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3155 __ br_notnull_short(O1, Assembler::pn, pending);
3156
3157 RegisterSaver::restore_live_registers(masm);
3158
3159 // We are back the the original state on entry and ready to go.
3160
3161 __ retl();
3162 __ delayed()->nop();
3163
3164 // Pending exception after the safepoint
3165
3166 __ bind(pending);
3167
3168 RegisterSaver::restore_live_registers(masm);
3169
3170 // We are back the the original state on entry.
3171
3172 // Tail-call forward_exception_entry, with the issuing PC in O7,
3173 // so it looks like the original nmethod called forward_exception_entry.
3174 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3175 __ JMP(O0, 0);
|
2346 case T_OBJECT: // Really a handle
2347 case T_ARRAY:
2348 case T_INT:
2349 __ mov(O0, I0);
2350 break;
2351 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2352 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
2353 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
2354 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
2355 break; // Cannot de-handlize until after reclaiming jvm_lock
2356 default:
2357 ShouldNotReachHere();
2358 }
2359
2360 Label after_transition;
2361 // must we block?
2362
2363 // Block, if necessary, before resuming in _thread_in_Java state.
2364 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2365 { Label no_block;
2366
2367 // Switch thread to "native transition" state before reading the synchronization state.
2368 // This additional state is necessary because reading and testing the synchronization
2369 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2370 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2371 // VM thread changes sync state to synchronizing and suspends threads for GC.
2372 // Thread A is resumed to finish this native method, but doesn't block here since it
2373 // didn't see any synchronization is progress, and escapes.
2374 __ set(_thread_in_native_trans, G3_scratch);
2375 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2376 if(os::is_MP()) {
2377 if (UseMembar) {
2378 // Force this write out before the read below
2379 __ membar(Assembler::StoreLoad);
2380 } else {
2381 // Write serialization page so VM thread can do a pseudo remote membar.
2382 // We use the current thread pointer to calculate a thread specific
2383 // offset to write to within the page. This minimizes bus traffic
2384 // due to cache line collision.
2385 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2386 }
2387 }
2388
2389 Label L;
2390 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2391 __ safepoint_poll(L, false, G2_thread, G3_scratch);
2392 __ delayed()->ld(suspend_state, G3_scratch);
2393 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2394 __ bind(L);
2395
2396 // Block. Save any potential method result value before the operation and
2397 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2398 // lets us share the oopMap we used when we went native rather the create
2399 // a distinct one for this pc
2400 //
2401 save_native_result(masm, ret_type, stack_slots);
2402 if (!is_critical_native) {
2403 __ call_VM_leaf(L7_thread_cache,
2404 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2405 G2_thread);
2406 } else {
2407 __ call_VM_leaf(L7_thread_cache,
2408 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2409 G2_thread);
2410 }
2411
3102 // allocate space for the code
3103 ResourceMark rm;
3104 // setup code generation tools
3105 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3106 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3107 CodeBuffer buffer("handler_blob", 1600, 512);
3108 MacroAssembler* masm = new MacroAssembler(&buffer);
3109 int frame_size_words;
3110 OopMapSet *oop_maps = new OopMapSet();
3111 OopMap* map = NULL;
3112
3113 int start = __ offset();
3114
3115 bool cause_return = (poll_type == POLL_AT_RETURN);
3116 // If this causes a return before the processing, then do a "restore"
3117 if (cause_return) {
3118 __ restore();
3119 } else {
3120 // Make it look like we were called via the poll
3121 // so that frame constructor always sees a valid return address
3122 __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_pc_offset()), O7);
3123 __ sub(O7, frame::pc_return_offset, O7);
3124 }
3125
3126 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3127
3128 // setup last_Java_sp (blows G4)
3129 __ set_last_Java_frame(SP, noreg);
3130
3131 Register saved_O7 = O7->after_save();
3132 if (!cause_return && SafepointMechanism::uses_thread_local_poll()) {
3133 // Keep a copy of the return pc in L0 to detect if it gets modified
3134 __ mov(saved_O7, L0);
3135 // Adjust and keep a copy of our npc saved by the signal handler
3136 __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_npc_offset()), L1);
3137 __ sub(L1, frame::pc_return_offset, L1);
3138 }
3139
3140 // call into the runtime to handle illegal instructions exception
3141 // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3142 __ mov(G2_thread, O0);
3143 __ save_thread(L7_thread_cache);
3144 __ call(call_ptr);
3145 __ delayed()->nop();
3146
3147 // Set an oopmap for the call site.
3148 // We need this not only for callee-saved registers, but also for volatile
3149 // registers that the compiler might be keeping live across a safepoint.
3150
3151 oop_maps->add_gc_map( __ offset() - start, map);
3152
3153 __ restore_thread(L7_thread_cache);
3154 // clear last_Java_sp
3155 __ reset_last_Java_frame();
3156
3157 // Check for exceptions
3158 Label pending;
3159
3160 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3161 __ br_notnull_short(O1, Assembler::pn, pending);
3162
3163 if (!cause_return && SafepointMechanism::uses_thread_local_poll()) {
3164 // If nobody modified our return pc then we must return to the npc which he saved in L1
3165 __ cmp(saved_O7, L0);
3166 __ movcc(Assembler::equal, false, Assembler::ptr_cc, L1, saved_O7);
3167 }
3168
3169 RegisterSaver::restore_live_registers(masm);
3170
3171 // We are back the the original state on entry and ready to go.
3172
3173 __ retl();
3174 __ delayed()->nop();
3175
3176 // Pending exception after the safepoint
3177
3178 __ bind(pending);
3179
3180 RegisterSaver::restore_live_registers(masm);
3181
3182 // We are back the the original state on entry.
3183
3184 // Tail-call forward_exception_entry, with the issuing PC in O7,
3185 // so it looks like the original nmethod called forward_exception_entry.
3186 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3187 __ JMP(O0, 0);
|