src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6930772 Sdiff src/share/vm/c1

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




2354   if (x->type() != voidType) {
2355     result_register = result_register_for(x->type());
2356   }
2357 
2358   CodeEmitInfo* info = state_for(x, x->state());
2359 
2360   // invokedynamics can deoptimize.
2361   CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
2362 
2363   invoke_load_arguments(x, args, arg_list);
2364 
2365   if (x->has_receiver()) {
2366     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2367     receiver = args->at(0)->result();
2368   }
2369 
2370   // emit invoke code
2371   bool optimized = x->target_is_loaded() && x->target_is_final();
2372   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2373 








2374   switch (x->code()) {
2375     case Bytecodes::_invokestatic:
2376       __ call_static(x->target(), result_register,
2377                      SharedRuntime::get_resolve_static_call_stub(),
2378                      arg_list, info);
2379       break;
2380     case Bytecodes::_invokespecial:
2381     case Bytecodes::_invokevirtual:
2382     case Bytecodes::_invokeinterface:
2383       // for final target we still produce an inline cache, in order
2384       // to be able to call mixed mode
2385       if (x->code() == Bytecodes::_invokespecial || optimized) {
2386         __ call_opt_virtual(x->target(), receiver, result_register,
2387                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2388                             arg_list, info);
2389       } else if (x->vtable_index() < 0) {
2390         __ call_icvirtual(x->target(), receiver, result_register,
2391                           SharedRuntime::get_resolve_virtual_call_stub(),
2392                           arg_list, info);
2393       } else {
2394         int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2395         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2396         __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
2397       }
2398       break;
2399     case Bytecodes::_invokedynamic: {
2400       ciBytecodeStream bcs(x->scope()->method());
2401       bcs.force_bci(x->bci());
2402       assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2403       ciCPCache* cpcache = bcs.get_cpcache();
2404 
2405       // Get CallSite offset from constant pool cache pointer.
2406       int index = bcs.get_method_index();
2407       size_t call_site_offset = cpcache->get_f1_offset(index);
2408 
2409       // If this invokedynamic call site hasn't been executed yet in
2410       // the interpreter, the CallSite object in the constant pool
2411       // cache is still null and we need to deoptimize.
2412       if (cpcache->is_f1_null_at(index)) {
2413         // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2414         // clone all handlers.  This is handled transparently in other
2415         // places by the CodeEmitInfo cloning logic but is handled
2416         // specially here because a stub isn't being used.
2417         x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2418 
2419         DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2420         __ jump(deopt_stub);
2421       }
2422 
2423       // Use the receiver register for the synthetic MethodHandle
2424       // argument.
2425       receiver = LIR_Assembler::receiverOpr();
2426       LIR_Opr tmp = new_register(objectType);
2427 
2428       // Load CallSite object from constant pool cache.
2429       __ oop2reg(cpcache->constant_encoding(), tmp);
2430       __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2431 
2432       // Load target MethodHandle from CallSite object.
2433       __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2434 
2435       __ call_dynamic(x->target(), receiver, result_register,
2436                       SharedRuntime::get_resolve_opt_virtual_call_stub(),
2437                       arg_list, info);
2438       break;
2439     }
2440     default:
2441       ShouldNotReachHere();
2442       break;
2443   }
2444 






2445   if (x->type()->is_float() || x->type()->is_double()) {
2446     // Force rounding of results from non-strictfp when in strictfp
2447     // scope (or when we don't know the strictness of the callee, to
2448     // be safe.)
2449     if (method()->is_strict()) {
2450       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2451         result_register = round_item(result_register);
2452       }
2453     }
2454   }
2455 
2456   if (result_register->is_valid()) {
2457     LIR_Opr result = rlock_result(x);
2458     __ move(result_register, result);
2459   }
2460 }
2461 
2462 
2463 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2464   assert(x->number_of_arguments() == 1, "wrong type");




2354   if (x->type() != voidType) {
2355     result_register = result_register_for(x->type());
2356   }
2357 
2358   CodeEmitInfo* info = state_for(x, x->state());
2359 
2360   // invokedynamics can deoptimize.
2361   CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
2362 
2363   invoke_load_arguments(x, args, arg_list);
2364 
2365   if (x->has_receiver()) {
2366     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2367     receiver = args->at(0)->result();
2368   }
2369 
2370   // emit invoke code
2371   bool optimized = x->target_is_loaded() && x->target_is_final();
2372   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2373 
2374   // JSR 292
2375   // Preserve the SP over MethodHandle call sites.
2376   ciMethod* target = x->target();
2377   if (target->is_method_handle_invoke()) {
2378     info->set_is_method_handle_invoke(true);
2379     __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2380   }
2381 
2382   switch (x->code()) {
2383     case Bytecodes::_invokestatic:
2384       __ call_static(target, result_register,
2385                      SharedRuntime::get_resolve_static_call_stub(),
2386                      arg_list, info);
2387       break;
2388     case Bytecodes::_invokespecial:
2389     case Bytecodes::_invokevirtual:
2390     case Bytecodes::_invokeinterface:
2391       // for final target we still produce an inline cache, in order
2392       // to be able to call mixed mode
2393       if (x->code() == Bytecodes::_invokespecial || optimized) {
2394         __ call_opt_virtual(target, receiver, result_register,
2395                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2396                             arg_list, info);
2397       } else if (x->vtable_index() < 0) {
2398         __ call_icvirtual(target, receiver, result_register,
2399                           SharedRuntime::get_resolve_virtual_call_stub(),
2400                           arg_list, info);
2401       } else {
2402         int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2403         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2404         __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2405       }
2406       break;
2407     case Bytecodes::_invokedynamic: {
2408       ciBytecodeStream bcs(x->scope()->method());
2409       bcs.force_bci(x->bci());
2410       assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2411       ciCPCache* cpcache = bcs.get_cpcache();
2412 
2413       // Get CallSite offset from constant pool cache pointer.
2414       int index = bcs.get_method_index();
2415       size_t call_site_offset = cpcache->get_f1_offset(index);
2416 
2417       // If this invokedynamic call site hasn't been executed yet in
2418       // the interpreter, the CallSite object in the constant pool
2419       // cache is still null and we need to deoptimize.
2420       if (cpcache->is_f1_null_at(index)) {
2421         // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2422         // clone all handlers.  This is handled transparently in other
2423         // places by the CodeEmitInfo cloning logic but is handled
2424         // specially here because a stub isn't being used.
2425         x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2426 
2427         DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2428         __ jump(deopt_stub);
2429       }
2430 
2431       // Use the receiver register for the synthetic MethodHandle
2432       // argument.
2433       receiver = LIR_Assembler::receiverOpr();
2434       LIR_Opr tmp = new_register(objectType);
2435 
2436       // Load CallSite object from constant pool cache.
2437       __ oop2reg(cpcache->constant_encoding(), tmp);
2438       __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2439 
2440       // Load target MethodHandle from CallSite object.
2441       __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2442 
2443       __ call_dynamic(target, receiver, result_register,
2444                       SharedRuntime::get_resolve_opt_virtual_call_stub(),
2445                       arg_list, info);
2446       break;
2447     }
2448     default:
2449       ShouldNotReachHere();
2450       break;
2451   }
2452 
2453   // JSR 292
2454   // Restore the SP after MethodHandle call sites.
2455   if (target->is_method_handle_invoke()) {
2456     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2457   }
2458 
2459   if (x->type()->is_float() || x->type()->is_double()) {
2460     // Force rounding of results from non-strictfp when in strictfp
2461     // scope (or when we don't know the strictness of the callee, to
2462     // be safe.)
2463     if (method()->is_strict()) {
2464       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2465         result_register = round_item(result_register);
2466       }
2467     }
2468   }
2469 
2470   if (result_register->is_valid()) {
2471     LIR_Opr result = rlock_result(x);
2472     __ move(result_register, result);
2473   }
2474 }
2475 
2476 
2477 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2478   assert(x->number_of_arguments() == 1, "wrong type");


src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File