src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6930772 Sdiff src/share/vm/c1

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




2372   if (x->type() != voidType) {
2373     result_register = result_register_for(x->type());
2374   }
2375 
2376   CodeEmitInfo* info = state_for(x, x->state());
2377 
2378   // invokedynamics can deoptimize.
2379   CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
2380 
2381   invoke_load_arguments(x, args, arg_list);
2382 
2383   if (x->has_receiver()) {
2384     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2385     receiver = args->at(0)->result();
2386   }
2387 
2388   // emit invoke code
2389   bool optimized = x->target_is_loaded() && x->target_is_final();
2390   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2391 








2392   switch (x->code()) {
2393     case Bytecodes::_invokestatic:
2394       __ call_static(x->target(), result_register,
2395                      SharedRuntime::get_resolve_static_call_stub(),
2396                      arg_list, info);
2397       break;
2398     case Bytecodes::_invokespecial:
2399     case Bytecodes::_invokevirtual:
2400     case Bytecodes::_invokeinterface:
2401       // for final target we still produce an inline cache, in order
2402       // to be able to call mixed mode
2403       if (x->code() == Bytecodes::_invokespecial || optimized) {
2404         __ call_opt_virtual(x->target(), receiver, result_register,
2405                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2406                             arg_list, info);
2407       } else if (x->vtable_index() < 0) {
2408         __ call_icvirtual(x->target(), receiver, result_register,
2409                           SharedRuntime::get_resolve_virtual_call_stub(),
2410                           arg_list, info);
2411       } else {
2412         int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2413         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2414         __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
2415       }
2416       break;
2417     case Bytecodes::_invokedynamic: {
2418       ciBytecodeStream bcs(x->scope()->method());
2419       bcs.force_bci(x->bci());
2420       assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2421       ciCPCache* cpcache = bcs.get_cpcache();
2422 
2423       // Get CallSite offset from constant pool cache pointer.
2424       int index = bcs.get_method_index();
2425       size_t call_site_offset = cpcache->get_f1_offset(index);
2426 
2427       // If this invokedynamic call site hasn't been executed yet in
2428       // the interpreter, the CallSite object in the constant pool
2429       // cache is still null and we need to deoptimize.
2430       if (cpcache->is_f1_null_at(index)) {
2431         // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2432         // clone all handlers.  This is handled transparently in other
2433         // places by the CodeEmitInfo cloning logic but is handled
2434         // specially here because a stub isn't being used.
2435         x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2436 
2437         DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2438         __ jump(deopt_stub);
2439       }
2440 
2441       // Use the receiver register for the synthetic MethodHandle
2442       // argument.
2443       receiver = LIR_Assembler::receiverOpr();
2444       LIR_Opr tmp = new_register(objectType);
2445 
2446       // Load CallSite object from constant pool cache.
2447       __ oop2reg(cpcache->constant_encoding(), tmp);
2448       __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2449 
2450       // Load target MethodHandle from CallSite object.
2451       __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2452 
2453       __ call_dynamic(x->target(), receiver, result_register,
2454                       SharedRuntime::get_resolve_opt_virtual_call_stub(),
2455                       arg_list, info);
2456       break;
2457     }
2458     default:
2459       ShouldNotReachHere();
2460       break;
2461   }
2462 






2463   if (x->type()->is_float() || x->type()->is_double()) {
2464     // Force rounding of results from non-strictfp when in strictfp
2465     // scope (or when we don't know the strictness of the callee, to
2466     // be safe.)
2467     if (method()->is_strict()) {
2468       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2469         result_register = round_item(result_register);
2470       }
2471     }
2472   }
2473 
2474   if (result_register->is_valid()) {
2475     LIR_Opr result = rlock_result(x);
2476     __ move(result_register, result);
2477   }
2478 }
2479 
2480 
2481 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2482   assert(x->number_of_arguments() == 1, "wrong type");




2372   if (x->type() != voidType) {
2373     result_register = result_register_for(x->type());
2374   }
2375 
2376   CodeEmitInfo* info = state_for(x, x->state());
2377 
2378   // invokedynamics can deoptimize.
2379   CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
2380 
2381   invoke_load_arguments(x, args, arg_list);
2382 
2383   if (x->has_receiver()) {
2384     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2385     receiver = args->at(0)->result();
2386   }
2387 
2388   // emit invoke code
2389   bool optimized = x->target_is_loaded() && x->target_is_final();
2390   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2391 
2392   // JSR 292
2393   // Preserve the SP over MethodHandle call sites.
2394   ciMethod* target = x->target();
2395   if (target->is_method_handle_invoke()) {
2396     info->set_is_method_handle_invoke(true);
2397     __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2398   }
2399 
2400   switch (x->code()) {
2401     case Bytecodes::_invokestatic:
2402       __ call_static(target, result_register,
2403                      SharedRuntime::get_resolve_static_call_stub(),
2404                      arg_list, info);
2405       break;
2406     case Bytecodes::_invokespecial:
2407     case Bytecodes::_invokevirtual:
2408     case Bytecodes::_invokeinterface:
2409       // for final target we still produce an inline cache, in order
2410       // to be able to call mixed mode
2411       if (x->code() == Bytecodes::_invokespecial || optimized) {
2412         __ call_opt_virtual(target, receiver, result_register,
2413                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2414                             arg_list, info);
2415       } else if (x->vtable_index() < 0) {
2416         __ call_icvirtual(target, receiver, result_register,
2417                           SharedRuntime::get_resolve_virtual_call_stub(),
2418                           arg_list, info);
2419       } else {
2420         int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2421         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2422         __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2423       }
2424       break;
2425     case Bytecodes::_invokedynamic: {
2426       ciBytecodeStream bcs(x->scope()->method());
2427       bcs.force_bci(x->bci());
2428       assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2429       ciCPCache* cpcache = bcs.get_cpcache();
2430 
2431       // Get CallSite offset from constant pool cache pointer.
2432       int index = bcs.get_method_index();
2433       size_t call_site_offset = cpcache->get_f1_offset(index);
2434 
2435       // If this invokedynamic call site hasn't been executed yet in
2436       // the interpreter, the CallSite object in the constant pool
2437       // cache is still null and we need to deoptimize.
2438       if (cpcache->is_f1_null_at(index)) {
2439         // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2440         // clone all handlers.  This is handled transparently in other
2441         // places by the CodeEmitInfo cloning logic but is handled
2442         // specially here because a stub isn't being used.
2443         x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2444 
2445         DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2446         __ jump(deopt_stub);
2447       }
2448 
2449       // Use the receiver register for the synthetic MethodHandle
2450       // argument.
2451       receiver = LIR_Assembler::receiverOpr();
2452       LIR_Opr tmp = new_register(objectType);
2453 
2454       // Load CallSite object from constant pool cache.
2455       __ oop2reg(cpcache->constant_encoding(), tmp);
2456       __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2457 
2458       // Load target MethodHandle from CallSite object.
2459       __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2460 
2461       __ call_dynamic(target, receiver, result_register,
2462                       SharedRuntime::get_resolve_opt_virtual_call_stub(),
2463                       arg_list, info);
2464       break;
2465     }
2466     default:
2467       ShouldNotReachHere();
2468       break;
2469   }
2470 
2471   // JSR 292
2472   // Restore the SP after MethodHandle call sites.
2473   if (target->is_method_handle_invoke()) {
2474     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2475   }
2476 
2477   if (x->type()->is_float() || x->type()->is_double()) {
2478     // Force rounding of results from non-strictfp when in strictfp
2479     // scope (or when we don't know the strictness of the callee, to
2480     // be safe.)
2481     if (method()->is_strict()) {
2482       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2483         result_register = round_item(result_register);
2484       }
2485     }
2486   }
2487 
2488   if (result_register->is_valid()) {
2489     LIR_Opr result = rlock_result(x);
2490     __ move(result_register, result);
2491   }
2492 }
2493 
2494 
2495 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2496   assert(x->number_of_arguments() == 1, "wrong type");


src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File