< prev index next >

src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp

Print this page
rev 48251 : 8193257: PPC64, s390 implementation for Thread-local handshakes
Reviewed-by:


 197   RegisterSaver_LiveIntReg(   R26 ),
 198   RegisterSaver_LiveIntReg(   R27 ),
 199   RegisterSaver_LiveIntReg(   R28 ),
 200   RegisterSaver_LiveIntReg(   R29 ),
 201   RegisterSaver_LiveIntReg(   R30 ),
 202   RegisterSaver_LiveIntReg(   R31 ), // must be the last register (see save/restore functions below)
 203 };
 204 
 205 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
 206                          int* out_frame_size_in_bytes,
 207                          bool generate_oop_map,
 208                          int return_pc_adjustment,
 209                          ReturnPCLocation return_pc_location) {
 210   // Push an abi_reg_args-frame and store all registers which may be live.
 211   // If requested, create an OopMap: Record volatile registers as
 212   // callee-save values in an OopMap so their save locations will be
 213   // propagated to the RegisterMap of the caller frame during
 214   // StackFrameStream construction (needed for deoptimization; see
 215   // compiledVFrame::create_stack_value).
 216   // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.

 217 
 218   int i;
 219   int offset;
 220 
 221   // calcualte frame size
 222   const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
 223                                    sizeof(RegisterSaver::LiveRegType);
 224   const int register_save_size   = regstosave_num * reg_size;
 225   const int frame_size_in_bytes  = align_up(register_save_size, frame::alignment_in_bytes)
 226                                    + frame::abi_reg_args_size;
 227   *out_frame_size_in_bytes       = frame_size_in_bytes;
 228   const int frame_size_in_slots  = frame_size_in_bytes / sizeof(jint);
 229   const int register_save_offset = frame_size_in_bytes - register_save_size;
 230 
 231   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
 232   OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
 233 
 234   BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
 235 
 236   // Save r31 in the last slot of the not yet pushed frame so that we
 237   // can use it as scratch reg.
 238   __ std(R31, -reg_size, R1_SP);

 239   assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
 240          "consistency check");
 241 
 242   // save the flags
 243   // Do the save_LR_CR by hand and adjust the return pc if requested.
 244   __ mfcr(R31);
 245   __ std(R31, _abi(cr), R1_SP);
 246   switch (return_pc_location) {
 247     case return_pc_is_lr: __ mflr(R31); break;
 248     case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break;
 249     case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
 250     default: ShouldNotReachHere();
 251   }
 252   if (return_pc_location != return_pc_is_pre_saved) {
 253     if (return_pc_adjustment != 0) {
 254       __ addi(R31, R31, return_pc_adjustment);
 255     }
 256     __ std(R31, _abi(lr), R1_SP);
 257   }
 258 
 259   // push a new frame
 260   __ push_frame(frame_size_in_bytes, R31);
 261 
 262   // save all registers (ints and floats)
 263   offset = register_save_offset;
 264   for (int i = 0; i < regstosave_num; i++) {
 265     int reg_num  = RegisterSaver_LiveRegs[i].reg_num;
 266     int reg_type = RegisterSaver_LiveRegs[i].reg_type;
 267 
 268     switch (reg_type) {
 269       case RegisterSaver::int_reg: {
 270         if (reg_num != 31) { // We spilled R31 right at the beginning.
 271           __ std(as_Register(reg_num), offset, R1_SP);
 272         }
 273         break;
 274       }
 275       case RegisterSaver::float_reg: {
 276         __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
 277         break;
 278       }
 279       case RegisterSaver::special_reg: {
 280         if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
 281           __ mfctr(R31);
 282           __ std(R31, offset, R1_SP);
 283         } else {
 284           Unimplemented();
 285         }
 286         break;
 287       }
 288       default:
 289         ShouldNotReachHere();
 290     }
 291 
 292     if (generate_oop_map) {
 293       map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
 294                             RegisterSaver_LiveRegs[i].vmreg);
 295       map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
 296                             RegisterSaver_LiveRegs[i].vmreg->next());
 297     }
 298     offset += reg_size;
 299   }
 300 
 301   BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
 302 


2347   {
2348     Label no_block, sync;
2349 
2350     if (os::is_MP()) {
2351       if (UseMembar) {
2352         // Force this write out before the read below.
2353         __ fence();
2354       } else {
2355         // Write serialization page so VM thread can do a pseudo remote membar.
2356         // We use the current thread pointer to calculate a thread specific
2357         // offset to write to within the page. This minimizes bus traffic
2358         // due to cache line collision.
2359         __ serialize_memory(R16_thread, r_temp_4, r_temp_5);
2360       }
2361     }
2362 
2363     Register sync_state_addr = r_temp_4;
2364     Register sync_state      = r_temp_5;
2365     Register suspend_flags   = r_temp_6;
2366 
2367     __ load_const(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/ sync_state);
2368 
2369     // TODO: PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
2370     __ lwz(sync_state, 0, sync_state_addr);
2371 

2372     // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
2373     __ lwz(suspend_flags, thread_(suspend_flags));
2374 
2375     __ acquire();
2376 
2377     Label do_safepoint;
2378     // No synchronization in progress nor yet synchronized.
2379     __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
2380     // Not suspended.
2381     __ cmpwi(CCR1, suspend_flags, 0);
2382 
2383     __ bne(CCR0, sync);
2384     __ beq(CCR1, no_block);
2385 
2386     // Block. Save any potential method result value before the operation and
2387     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2388     // lets us share the oopMap we used when we went native rather than create
2389     // a distinct one for this pc.
2390     __ bind(sync);

2391 
2392     address entry_point = is_critical_native
2393       ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)
2394       : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
2395     save_native_result(masm, ret_type, workspace_slot_offset);
2396     __ call_VM_leaf(entry_point, R16_thread);
2397     restore_native_result(masm, ret_type, workspace_slot_offset);
2398 
2399     if (is_critical_native) {
2400       __ b(after_transition); // No thread state transition here.
2401     }
2402     __ bind(no_block);
2403   }
2404 
2405   // Publish thread state.
2406   // --------------------------------------------------------------------------
2407 
2408   // Thread state is thread_in_native_trans. Any safepoint blocking has
2409   // already happened so we can now change state to _thread_in_Java.
2410 
2411   // Transition from _thread_in_native_trans to _thread_in_Java.
2412   __ li(R0, _thread_in_Java);
2413   __ release();
2414   // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2415   __ stw(R0, thread_(thread_state));
2416   __ bind(after_transition);
2417 
2418   // Reguard any pages if necessary.
2419   // --------------------------------------------------------------------------
2420 
2421   Label no_reguard;
2422   __ lwz(r_temp_1, thread_(stack_guard_state));
2423   __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled);
2424   __ bne(CCR0, no_reguard);
2425 
2426   save_native_result(masm, ret_type, workspace_slot_offset);
2427   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2428   restore_native_result(masm, ret_type, workspace_slot_offset);
2429 
2430   __ bind(no_reguard);
2431 
2432 
2433   // Unlock


3076   OopMap* map;
3077 
3078   // Allocate space for the code. Setup code generation tools.
3079   CodeBuffer buffer("handler_blob", 2048, 1024);
3080   MacroAssembler* masm = new MacroAssembler(&buffer);
3081 
3082   address start = __ pc();
3083   int frame_size_in_bytes = 0;
3084 
3085   RegisterSaver::ReturnPCLocation return_pc_location;
3086   bool cause_return = (poll_type == POLL_AT_RETURN);
3087   if (cause_return) {
3088     // Nothing to do here. The frame has already been popped in MachEpilogNode.
3089     // Register LR already contains the return pc.
3090     return_pc_location = RegisterSaver::return_pc_is_lr;
3091   } else {
3092     // Use thread()->saved_exception_pc() as return pc.
3093     return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
3094   }
3095 
3096   // Save registers, fpu state, and flags.
3097   map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3098                                                                    &frame_size_in_bytes,
3099                                                                    /*generate_oop_map=*/ true,
3100                                                                    /*return_pc_adjustment=*/0,
3101                                                                    return_pc_location);
3102 
3103   // The following is basically a call_VM. However, we need the precise
3104   // address of the call in order to generate an oopmap. Hence, we do all the
3105   // work outselves.
3106   __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
3107 
3108   // The return address must always be correct so that the frame constructor
3109   // never sees an invalid pc.
3110 
3111   // Do the call
3112   __ call_VM_leaf(call_ptr, R16_thread);
3113   address calls_return_pc = __ last_calls_return_pc();
3114 
3115   // Set an oopmap for the call site. This oopmap will map all
3116   // oop-registers and debug-info registers as callee-saved. This


3125 
3126   BLOCK_COMMENT("  Check pending exception.");
3127   const Register pending_exception = R0;
3128   __ ld(pending_exception, thread_(pending_exception));
3129   __ cmpdi(CCR0, pending_exception, 0);
3130   __ beq(CCR0, noException);
3131 
3132   // Exception pending
3133   RegisterSaver::restore_live_registers_and_pop_frame(masm,
3134                                                       frame_size_in_bytes,
3135                                                       /*restore_ctr=*/true);
3136 
3137   BLOCK_COMMENT("  Jump to forward_exception_entry.");
3138   // Jump to forward_exception_entry, with the issuing PC in LR
3139   // so it looks like the original nmethod called forward_exception_entry.
3140   __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3141 
3142   // No exception case.
3143   __ BIND(noException);
3144 













3145 
3146   // Normal exit, restore registers and exit.
3147   RegisterSaver::restore_live_registers_and_pop_frame(masm,
3148                                                       frame_size_in_bytes,
3149                                                       /*restore_ctr=*/true);
3150 
3151   __ blr();
3152 
3153   // Make sure all code is generated
3154   masm->flush();
3155 
3156   // Fill-out other meta info
3157   // CodeBlob frame size is in words.
3158   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
3159 }
3160 
3161 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
3162 //
3163 // Generate a stub that calls into the vm to find out the proper destination
3164 // of a java call. All the argument registers are live at this point




 197   RegisterSaver_LiveIntReg(   R26 ),
 198   RegisterSaver_LiveIntReg(   R27 ),
 199   RegisterSaver_LiveIntReg(   R28 ),
 200   RegisterSaver_LiveIntReg(   R29 ),
 201   RegisterSaver_LiveIntReg(   R30 ),
 202   RegisterSaver_LiveIntReg(   R31 ), // must be the last register (see save/restore functions below)
 203 };
 204 
 205 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
 206                          int* out_frame_size_in_bytes,
 207                          bool generate_oop_map,
 208                          int return_pc_adjustment,
 209                          ReturnPCLocation return_pc_location) {
 210   // Push an abi_reg_args-frame and store all registers which may be live.
 211   // If requested, create an OopMap: Record volatile registers as
 212   // callee-save values in an OopMap so their save locations will be
 213   // propagated to the RegisterMap of the caller frame during
 214   // StackFrameStream construction (needed for deoptimization; see
 215   // compiledVFrame::create_stack_value).
 216   // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
 217   // Updated return pc is returned in R31 (if not return_pc_is_pre_saved).
 218 
 219   int i;
 220   int offset;
 221 
 222   // calcualte frame size
 223   const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
 224                                    sizeof(RegisterSaver::LiveRegType);
 225   const int register_save_size   = regstosave_num * reg_size;
 226   const int frame_size_in_bytes  = align_up(register_save_size, frame::alignment_in_bytes)
 227                                    + frame::abi_reg_args_size;
 228   *out_frame_size_in_bytes       = frame_size_in_bytes;
 229   const int frame_size_in_slots  = frame_size_in_bytes / sizeof(jint);
 230   const int register_save_offset = frame_size_in_bytes - register_save_size;
 231 
 232   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
 233   OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
 234 
 235   BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
 236 
 237   // Save r31 in the last slot of the not yet pushed frame so that we
 238   // can use it as scratch reg.
 239   __ std(R31, -  reg_size, R1_SP);
 240   __ std(R30, -2*reg_size, R1_SP);
 241   assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
 242          "consistency check");
 243 
 244   // save the flags
 245   // Do the save_LR_CR by hand and adjust the return pc if requested.
 246   __ mfcr(R30);
 247   __ std(R30, _abi(cr), R1_SP);
 248   switch (return_pc_location) {
 249     case return_pc_is_lr: __ mflr(R31); break;
 250     case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break;
 251     case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
 252     default: ShouldNotReachHere();
 253   }
 254   if (return_pc_location != return_pc_is_pre_saved) {
 255     if (return_pc_adjustment != 0) {
 256       __ addi(R31, R31, return_pc_adjustment);
 257     }
 258     __ std(R31, _abi(lr), R1_SP);
 259   }
 260 
 261   // push a new frame
 262   __ push_frame(frame_size_in_bytes, R30);
 263 
 264   // save all registers (ints and floats)
 265   offset = register_save_offset;
 266   for (int i = 0; i < regstosave_num; i++) {
 267     int reg_num  = RegisterSaver_LiveRegs[i].reg_num;
 268     int reg_type = RegisterSaver_LiveRegs[i].reg_type;
 269 
 270     switch (reg_type) {
 271       case RegisterSaver::int_reg: {
 272         if (reg_num < 30) { // We spilled R30-31 right at the beginning.
 273           __ std(as_Register(reg_num), offset, R1_SP);
 274         }
 275         break;
 276       }
 277       case RegisterSaver::float_reg: {
 278         __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
 279         break;
 280       }
 281       case RegisterSaver::special_reg: {
 282         if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
 283           __ mfctr(R30);
 284           __ std(R30, offset, R1_SP);
 285         } else {
 286           Unimplemented();
 287         }
 288         break;
 289       }
 290       default:
 291         ShouldNotReachHere();
 292     }
 293 
 294     if (generate_oop_map) {
 295       map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
 296                             RegisterSaver_LiveRegs[i].vmreg);
 297       map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
 298                             RegisterSaver_LiveRegs[i].vmreg->next());
 299     }
 300     offset += reg_size;
 301   }
 302 
 303   BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
 304 


2349   {
2350     Label no_block, sync;
2351 
2352     if (os::is_MP()) {
2353       if (UseMembar) {
2354         // Force this write out before the read below.
2355         __ fence();
2356       } else {
2357         // Write serialization page so VM thread can do a pseudo remote membar.
2358         // We use the current thread pointer to calculate a thread specific
2359         // offset to write to within the page. This minimizes bus traffic
2360         // due to cache line collision.
2361         __ serialize_memory(R16_thread, r_temp_4, r_temp_5);
2362       }
2363     }
2364 
2365     Register sync_state_addr = r_temp_4;
2366     Register sync_state      = r_temp_5;
2367     Register suspend_flags   = r_temp_6;
2368 
2369     // No synchronization in progress nor yet synchronized
2370     // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
2371     __ safepoint_poll(sync, sync_state);

2372 
2373     // Not suspended.
2374     // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
2375     __ lwz(suspend_flags, thread_(suspend_flags));







2376     __ cmpwi(CCR1, suspend_flags, 0);


2377     __ beq(CCR1, no_block);
2378 
2379     // Block. Save any potential method result value before the operation and
2380     // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2381     // lets us share the oopMap we used when we went native rather than create
2382     // a distinct one for this pc.
2383     __ bind(sync);
2384     __ isync();
2385 
2386     address entry_point = is_critical_native
2387       ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)
2388       : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
2389     save_native_result(masm, ret_type, workspace_slot_offset);
2390     __ call_VM_leaf(entry_point, R16_thread);
2391     restore_native_result(masm, ret_type, workspace_slot_offset);
2392 
2393     if (is_critical_native) {
2394       __ b(after_transition); // No thread state transition here.
2395     }
2396     __ bind(no_block);
2397   }
2398 
2399   // Publish thread state.
2400   // --------------------------------------------------------------------------
2401 
2402   // Thread state is thread_in_native_trans. Any safepoint blocking has
2403   // already happened so we can now change state to _thread_in_Java.
2404 
2405   // Transition from _thread_in_native_trans to _thread_in_Java.
2406   __ li(R0, _thread_in_Java);
2407   __ lwsync(); // Acquire safepoint and suspend state, release thread state.
2408   // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
2409   __ stw(R0, thread_(thread_state));
2410   __ bind(after_transition);
2411 
2412   // Reguard any pages if necessary.
2413   // --------------------------------------------------------------------------
2414 
2415   Label no_reguard;
2416   __ lwz(r_temp_1, thread_(stack_guard_state));
2417   __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled);
2418   __ bne(CCR0, no_reguard);
2419 
2420   save_native_result(masm, ret_type, workspace_slot_offset);
2421   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2422   restore_native_result(masm, ret_type, workspace_slot_offset);
2423 
2424   __ bind(no_reguard);
2425 
2426 
2427   // Unlock


3070   OopMap* map;
3071 
3072   // Allocate space for the code. Setup code generation tools.
3073   CodeBuffer buffer("handler_blob", 2048, 1024);
3074   MacroAssembler* masm = new MacroAssembler(&buffer);
3075 
3076   address start = __ pc();
3077   int frame_size_in_bytes = 0;
3078 
3079   RegisterSaver::ReturnPCLocation return_pc_location;
3080   bool cause_return = (poll_type == POLL_AT_RETURN);
3081   if (cause_return) {
3082     // Nothing to do here. The frame has already been popped in MachEpilogNode.
3083     // Register LR already contains the return pc.
3084     return_pc_location = RegisterSaver::return_pc_is_lr;
3085   } else {
3086     // Use thread()->saved_exception_pc() as return pc.
3087     return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
3088   }
3089 
3090   // Save registers, fpu state, and flags. Set R31 = return pc.
3091   map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
3092                                                                    &frame_size_in_bytes,
3093                                                                    /*generate_oop_map=*/ true,
3094                                                                    /*return_pc_adjustment=*/0,
3095                                                                    return_pc_location);
3096 
3097   // The following is basically a call_VM. However, we need the precise
3098   // address of the call in order to generate an oopmap. Hence, we do all the
3099   // work outselves.
3100   __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
3101 
3102   // The return address must always be correct so that the frame constructor
3103   // never sees an invalid pc.
3104 
3105   // Do the call
3106   __ call_VM_leaf(call_ptr, R16_thread);
3107   address calls_return_pc = __ last_calls_return_pc();
3108 
3109   // Set an oopmap for the call site. This oopmap will map all
3110   // oop-registers and debug-info registers as callee-saved. This


3119 
3120   BLOCK_COMMENT("  Check pending exception.");
3121   const Register pending_exception = R0;
3122   __ ld(pending_exception, thread_(pending_exception));
3123   __ cmpdi(CCR0, pending_exception, 0);
3124   __ beq(CCR0, noException);
3125 
3126   // Exception pending
3127   RegisterSaver::restore_live_registers_and_pop_frame(masm,
3128                                                       frame_size_in_bytes,
3129                                                       /*restore_ctr=*/true);
3130 
3131   BLOCK_COMMENT("  Jump to forward_exception_entry.");
3132   // Jump to forward_exception_entry, with the issuing PC in LR
3133   // so it looks like the original nmethod called forward_exception_entry.
3134   __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3135 
3136   // No exception case.
3137   __ BIND(noException);
3138 
3139   if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
3140     Label no_adjust;
3141     // If our stashed return pc was modified by the runtime we avoid touching it
3142     __ ld(R0, frame_size_in_bytes + _abi(lr), R1_SP);
3143     __ cmpd(CCR0, R0, R31);
3144     __ bne(CCR0, no_adjust);
3145 
3146     // Adjust return pc forward to step over the safepoint poll instruction
3147     __ addi(R31, R31, 4);
3148     __ std(R31, frame_size_in_bytes + _abi(lr), R1_SP);
3149 
3150     __ bind(no_adjust);
3151   }
3152 
3153   // Normal exit, restore registers and exit.
3154   RegisterSaver::restore_live_registers_and_pop_frame(masm,
3155                                                       frame_size_in_bytes,
3156                                                       /*restore_ctr=*/true);
3157 
3158   __ blr();
3159 
3160   // Make sure all code is generated
3161   masm->flush();
3162 
3163   // Fill-out other meta info
3164   // CodeBlob frame size is in words.
3165   return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
3166 }
3167 
3168 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
3169 //
3170 // Generate a stub that calls into the vm to find out the proper destination
3171 // of a java call. All the argument registers are live at this point


< prev index next >