src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6919934 Sdiff src/share/vm/c1

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




2267 
2268   // increment invocation counters if needed
2269   increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL));
2270 
2271   // all blocks with a successor must end with an unconditional jump
2272   // to the successor even if they are consecutive
2273   __ jump(x->default_sux());
2274 }
2275 
2276 
2277 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2278   // construct our frame and model the production of incoming pointer
2279   // to the OSR buffer.
2280   __ osr_entry(LIR_Assembler::osrBufferPointer());
2281   LIR_Opr result = rlock_result(x);
2282   __ move(LIR_Assembler::osrBufferPointer(), result);
2283 }
2284 
2285 
2286 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2287   int i = x->has_receiver() ? 1 : 0;
2288   for (; i < args->length(); i++) {
2289     LIRItem* param = args->at(i);
2290     LIR_Opr loc = arg_list->at(i);
2291     if (loc->is_register()) {
2292       param->load_item_force(loc);
2293     } else {
2294       LIR_Address* addr = loc->as_address_ptr();
2295       param->load_for_store(addr->type());
2296       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2297         __ unaligned_move(param->result(), addr);
2298       } else {
2299         __ move(param->result(), addr);
2300       }
2301     }
2302   }
2303 
2304   if (x->has_receiver()) {
2305     LIRItem* receiver = args->at(0);
2306     LIR_Opr loc = arg_list->at(0);
2307     if (loc->is_register()) {
2308       receiver->load_item_force(loc);
2309     } else {
2310       assert(loc->is_address(), "just checking");
2311       receiver->load_for_store(T_OBJECT);
2312       __ move(receiver->result(), loc);
2313     }
2314   }
2315 }
2316 
2317 
2318 // Visits all arguments, returns appropriate items without loading them
2319 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2320   LIRItemList* argument_items = new LIRItemList();
2321   if (x->has_receiver()) {
2322     LIRItem* receiver = new LIRItem(x->receiver(), this);
2323     argument_items->append(receiver);
2324   }




2325   int idx = x->has_receiver() ? 1 : 0;
2326   for (int i = 0; i < x->number_of_arguments(); i++) {
2327     LIRItem* param = new LIRItem(x->argument_at(i), this);
2328     argument_items->append(param);
2329     idx += (param->type()->is_double_word() ? 2 : 1);
2330   }
2331   return argument_items;
2332 }
2333 
2334 
2335 // The invoke with receiver has following phases:
2336 //   a) traverse and load/lock receiver;
2337 //   b) traverse all arguments -> item-array (invoke_visit_argument)
2338 //   c) push receiver on stack
2339 //   d) load each of the items and push on stack
2340 //   e) unlock receiver
2341 //   f) move receiver into receiver-register %o0
2342 //   g) lock result registers and emit call operation
2343 //
2344 // Before issuing a call, we must spill-save all values on stack


2354 //   may destroy a value on the stack that is currently in %o0
2355 //   and is waiting to be spilled
2356 // - if we keep the receiver locked while doing spill-save,
2357 //   we cannot spill it as it is spill-locked
2358 //
2359 void LIRGenerator::do_Invoke(Invoke* x) {
2360   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2361 
2362   LIR_OprList* arg_list = cc->args();
2363   LIRItemList* args = invoke_visit_arguments(x);
2364   LIR_Opr receiver = LIR_OprFact::illegalOpr;
2365 
2366   // setup result register
2367   LIR_Opr result_register = LIR_OprFact::illegalOpr;
2368   if (x->type() != voidType) {
2369     result_register = result_register_for(x->type());
2370   }
2371 
2372   CodeEmitInfo* info = state_for(x, x->state());
2373 




2374   invoke_load_arguments(x, args, arg_list);
2375 
2376   if (x->has_receiver()) {
2377     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2378     receiver = args->at(0)->result();
2379   }
2380 
2381   // emit invoke code
2382   bool optimized = x->target_is_loaded() && x->target_is_final();
2383   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2384 
2385   switch (x->code()) {
2386     case Bytecodes::_invokestatic:
2387       __ call_static(x->target(), result_register,
2388                      SharedRuntime::get_resolve_static_call_stub(),
2389                      arg_list, info);
2390       break;
2391     case Bytecodes::_invokespecial:
2392     case Bytecodes::_invokevirtual:
2393     case Bytecodes::_invokeinterface:
2394       // for final target we still produce an inline cache, in order
2395       // to be able to call mixed mode
2396       if (x->code() == Bytecodes::_invokespecial || optimized) {
2397         __ call_opt_virtual(x->target(), receiver, result_register,
2398                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2399                             arg_list, info);
2400       } else if (x->vtable_index() < 0) {
2401         __ call_icvirtual(x->target(), receiver, result_register,
2402                           SharedRuntime::get_resolve_virtual_call_stub(),
2403                           arg_list, info);
2404       } else {
2405         int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2406         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2407         __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
2408       }
2409       break;









































2410     default:
2411       ShouldNotReachHere();
2412       break;
2413   }
2414 
2415   if (x->type()->is_float() || x->type()->is_double()) {
2416     // Force rounding of results from non-strictfp when in strictfp
2417     // scope (or when we don't know the strictness of the callee, to
2418     // be safe.)
2419     if (method()->is_strict()) {
2420       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2421         result_register = round_item(result_register);
2422       }
2423     }
2424   }
2425 
2426   if (result_register->is_valid()) {
2427     LIR_Opr result = rlock_result(x);
2428     __ move(result_register, result);
2429   }




2267 
2268   // increment invocation counters if needed
2269   increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL));
2270 
2271   // all blocks with a successor must end with an unconditional jump
2272   // to the successor even if they are consecutive
2273   __ jump(x->default_sux());
2274 }
2275 
2276 
2277 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2278   // construct our frame and model the production of incoming pointer
2279   // to the OSR buffer.
2280   __ osr_entry(LIR_Assembler::osrBufferPointer());
2281   LIR_Opr result = rlock_result(x);
2282   __ move(LIR_Assembler::osrBufferPointer(), result);
2283 }
2284 
2285 
2286 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2287   int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
2288   for (; i < args->length(); i++) {
2289     LIRItem* param = args->at(i);
2290     LIR_Opr loc = arg_list->at(i);
2291     if (loc->is_register()) {
2292       param->load_item_force(loc);
2293     } else {
2294       LIR_Address* addr = loc->as_address_ptr();
2295       param->load_for_store(addr->type());
2296       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2297         __ unaligned_move(param->result(), addr);
2298       } else {
2299         __ move(param->result(), addr);
2300       }
2301     }
2302   }
2303 
2304   if (x->has_receiver()) {
2305     LIRItem* receiver = args->at(0);
2306     LIR_Opr loc = arg_list->at(0);
2307     if (loc->is_register()) {
2308       receiver->load_item_force(loc);
2309     } else {
2310       assert(loc->is_address(), "just checking");
2311       receiver->load_for_store(T_OBJECT);
2312       __ move(receiver->result(), loc);
2313     }
2314   }
2315 }
2316 
2317 
2318 // Visits all arguments, returns appropriate items without loading them
2319 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2320   LIRItemList* argument_items = new LIRItemList();
2321   if (x->has_receiver()) {
2322     LIRItem* receiver = new LIRItem(x->receiver(), this);
2323     argument_items->append(receiver);
2324   }
2325   if (x->is_invokedynamic()) {
2326     // Insert a dummy for the synthetic MethodHandle argument.
2327     argument_items->append(NULL);
2328   }
2329   int idx = x->has_receiver() ? 1 : 0;
2330   for (int i = 0; i < x->number_of_arguments(); i++) {
2331     LIRItem* param = new LIRItem(x->argument_at(i), this);
2332     argument_items->append(param);
2333     idx += (param->type()->is_double_word() ? 2 : 1);
2334   }
2335   return argument_items;
2336 }
2337 
2338 
2339 // The invoke with receiver has following phases:
2340 //   a) traverse and load/lock receiver;
2341 //   b) traverse all arguments -> item-array (invoke_visit_argument)
2342 //   c) push receiver on stack
2343 //   d) load each of the items and push on stack
2344 //   e) unlock receiver
2345 //   f) move receiver into receiver-register %o0
2346 //   g) lock result registers and emit call operation
2347 //
2348 // Before issuing a call, we must spill-save all values on stack


2358 //   may destroy a value on the stack that is currently in %o0
2359 //   and is waiting to be spilled
2360 // - if we keep the receiver locked while doing spill-save,
2361 //   we cannot spill it as it is spill-locked
2362 //
2363 void LIRGenerator::do_Invoke(Invoke* x) {
2364   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2365 
2366   LIR_OprList* arg_list = cc->args();
2367   LIRItemList* args = invoke_visit_arguments(x);
2368   LIR_Opr receiver = LIR_OprFact::illegalOpr;
2369 
2370   // setup result register
2371   LIR_Opr result_register = LIR_OprFact::illegalOpr;
2372   if (x->type() != voidType) {
2373     result_register = result_register_for(x->type());
2374   }
2375 
2376   CodeEmitInfo* info = state_for(x, x->state());
2377 
2378   // invokedynamics can deoptimize.
2379   bool is_invokedynamic = x->code() == Bytecodes::_invokedynamic;
2380   CodeEmitInfo* deopt_info = is_invokedynamic ? state_for(x, x->state_before()) : NULL;
2381 
2382   invoke_load_arguments(x, args, arg_list);
2383 
2384   if (x->has_receiver()) {
2385     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2386     receiver = args->at(0)->result();
2387   }
2388 
2389   // emit invoke code
2390   bool optimized = x->target_is_loaded() && x->target_is_final();
2391   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2392 
2393   switch (x->code()) {
2394     case Bytecodes::_invokestatic:
2395       __ call_static(x->target(), result_register,
2396                      SharedRuntime::get_resolve_static_call_stub(),
2397                      arg_list, info);
2398       break;
2399     case Bytecodes::_invokespecial:
2400     case Bytecodes::_invokevirtual:
2401     case Bytecodes::_invokeinterface:
2402       // for final target we still produce an inline cache, in order
2403       // to be able to call mixed mode
2404       if (x->code() == Bytecodes::_invokespecial || optimized) {
2405         __ call_opt_virtual(x->target(), receiver, result_register,
2406                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2407                             arg_list, info);
2408       } else if (x->vtable_index() < 0) {
2409         __ call_icvirtual(x->target(), receiver, result_register,
2410                           SharedRuntime::get_resolve_virtual_call_stub(),
2411                           arg_list, info);
2412       } else {
2413         int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2414         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2415         __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
2416       }
2417       break;
2418     case Bytecodes::_invokedynamic: {
2419       ciBytecodeStream bcs(x->scope()->method());
2420       bcs.force_bci(x->bci());
2421       assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2422       ciCPCache* cpcache = bcs.get_cpcache();
2423 
2424       // Get CallSite offset from constant pool cache pointer.
2425       int index = bcs.get_method_index();
2426       size_t call_site_offset = cpcache->get_f1_offset(index);
2427 
2428       // If this invokedynamic call site hasn't been executed yet in
2429       // the interpreter, the CallSite object in the constant pool
2430       // cache is still null and we need to deoptimize.
2431       if (cpcache->is_f1_null_at(index)) {
2432         // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2433         // clone all handlers.  This is handled transparently in other
2434         // places by the CodeEmitInfo cloning logic but is handled
2435         // specially here because a stub isn't being used.
2436         x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2437 
2438         DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2439         __ jump(deopt_stub);
2440       }
2441 
2442       // Use the receiver register for the synthetic MethodHandle
2443       // argument.
2444       receiver = LIR_Assembler::receiverOpr();
2445       LIR_Opr tmp = new_register(objectType);
2446 
2447       // Load CallSite object from constant pool cache.
2448       __ oop2reg(cpcache->constant_encoding(), tmp);
2449       __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2450 
2451       // Load target MethodHandle from CallSite object.
2452       __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2453 
2454       __ call_dynamic(x->target(), receiver, result_register,
2455                       SharedRuntime::get_resolve_opt_virtual_call_stub(),
2456                       arg_list, info);
2457       break;
2458     }
2459     default:
2460       ShouldNotReachHere();
2461       break;
2462   }
2463 
2464   if (x->type()->is_float() || x->type()->is_double()) {
2465     // Force rounding of results from non-strictfp when in strictfp
2466     // scope (or when we don't know the strictness of the callee, to
2467     // be safe.)
2468     if (method()->is_strict()) {
2469       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2470         result_register = round_item(result_register);
2471       }
2472     }
2473   }
2474 
2475   if (result_register->is_valid()) {
2476     LIR_Opr result = rlock_result(x);
2477     __ move(result_register, result);
2478   }


src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File