< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




 903 }
 904 
 905 
 906 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
 907   assert(opr->is_register(), "why spill if item is not register?");
 908 
 909   if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
 910     LIR_Opr result = new_register(T_FLOAT);
 911     set_vreg_flag(result, must_start_in_memory);
 912     assert(opr->is_register(), "only a register can be spilled");
 913     assert(opr->value_type()->is_float(), "rounding only for floats available");
 914     __ roundfp(opr, LIR_OprFact::illegalOpr, result);
 915     return result;
 916   }
 917   return opr;
 918 }
 919 
 920 
 921 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
 922   assert(type2size[t] == type2size[value->type()],
 923          err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
 924   if (!value->is_register()) {
 925     // force into a register
 926     LIR_Opr r = new_register(value->type());
 927     __ move(value, r);
 928     value = r;
 929   }
 930 
 931   // create a spill location
 932   LIR_Opr tmp = new_register(t);
 933   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
 934 
 935   // move from register to spill
 936   __ move(value, tmp);
 937   return tmp;
 938 }
 939 
 940 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
 941   if (if_instr->should_profile()) {
 942     ciMethod* method = if_instr->profiled_method();
 943     assert(method != NULL, "method should be set if branch is profiled");


2812     increment_invocation_counter(info);
2813   }
2814 
2815   // all blocks with a successor must end with an unconditional jump
2816   // to the successor even if they are consecutive
2817   __ jump(x->default_sux());
2818 }
2819 
2820 
2821 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2822   // construct our frame and model the production of incoming pointer
2823   // to the OSR buffer.
2824   __ osr_entry(LIR_Assembler::osrBufferPointer());
2825   LIR_Opr result = rlock_result(x);
2826   __ move(LIR_Assembler::osrBufferPointer(), result);
2827 }
2828 
2829 
2830 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2831   assert(args->length() == arg_list->length(),
2832          err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2833   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2834     LIRItem* param = args->at(i);
2835     LIR_Opr loc = arg_list->at(i);
2836     if (loc->is_register()) {
2837       param->load_item_force(loc);
2838     } else {
2839       LIR_Address* addr = loc->as_address_ptr();
2840       param->load_for_store(addr->type());
2841       if (addr->type() == T_OBJECT) {
2842         __ move_wide(param->result(), addr);
2843       } else
2844         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2845           __ unaligned_move(param->result(), addr);
2846         } else {
2847           __ move(param->result(), addr);
2848         }
2849     }
2850   }
2851 
2852   if (x->has_receiver()) {


2956         __ call_opt_virtual(target, receiver, result_register,
2957                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2958                             arg_list, info);
2959       } else if (x->vtable_index() < 0) {
2960         __ call_icvirtual(target, receiver, result_register,
2961                           SharedRuntime::get_resolve_virtual_call_stub(),
2962                           arg_list, info);
2963       } else {
2964         int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2965         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2966         __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2967       }
2968       break;
2969     case Bytecodes::_invokedynamic: {
2970       __ call_dynamic(target, receiver, result_register,
2971                       SharedRuntime::get_resolve_static_call_stub(),
2972                       arg_list, info);
2973       break;
2974     }
2975     default:
2976       fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2977       break;
2978   }
2979 
2980   // JSR 292
2981   // Restore the SP after MethodHandle call sites, if needed.
2982   if (is_method_handle_invoke
2983       && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2984     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2985   }
2986 
2987   if (x->type()->is_float() || x->type()->is_double()) {
2988     // Force rounding of results from non-strictfp when in strictfp
2989     // scope (or when we don't know the strictness of the callee, to
2990     // be safe.)
2991     if (method()->is_strict()) {
2992       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2993         result_register = round_item(result_register);
2994       }
2995     }
2996   }




 903 }
 904 
 905 
 906 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
 907   assert(opr->is_register(), "why spill if item is not register?");
 908 
 909   if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
 910     LIR_Opr result = new_register(T_FLOAT);
 911     set_vreg_flag(result, must_start_in_memory);
 912     assert(opr->is_register(), "only a register can be spilled");
 913     assert(opr->value_type()->is_float(), "rounding only for floats available");
 914     __ roundfp(opr, LIR_OprFact::illegalOpr, result);
 915     return result;
 916   }
 917   return opr;
 918 }
 919 
 920 
 921 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
 922   assert(type2size[t] == type2size[value->type()],
 923          "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
 924   if (!value->is_register()) {
 925     // force into a register
 926     LIR_Opr r = new_register(value->type());
 927     __ move(value, r);
 928     value = r;
 929   }
 930 
 931   // create a spill location
 932   LIR_Opr tmp = new_register(t);
 933   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
 934 
 935   // move from register to spill
 936   __ move(value, tmp);
 937   return tmp;
 938 }
 939 
 940 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
 941   if (if_instr->should_profile()) {
 942     ciMethod* method = if_instr->profiled_method();
 943     assert(method != NULL, "method should be set if branch is profiled");


2812     increment_invocation_counter(info);
2813   }
2814 
2815   // all blocks with a successor must end with an unconditional jump
2816   // to the successor even if they are consecutive
2817   __ jump(x->default_sux());
2818 }
2819 
2820 
2821 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2822   // construct our frame and model the production of incoming pointer
2823   // to the OSR buffer.
2824   __ osr_entry(LIR_Assembler::osrBufferPointer());
2825   LIR_Opr result = rlock_result(x);
2826   __ move(LIR_Assembler::osrBufferPointer(), result);
2827 }
2828 
2829 
2830 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2831   assert(args->length() == arg_list->length(),
2832          "args=%d, arg_list=%d", args->length(), arg_list->length());
2833   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2834     LIRItem* param = args->at(i);
2835     LIR_Opr loc = arg_list->at(i);
2836     if (loc->is_register()) {
2837       param->load_item_force(loc);
2838     } else {
2839       LIR_Address* addr = loc->as_address_ptr();
2840       param->load_for_store(addr->type());
2841       if (addr->type() == T_OBJECT) {
2842         __ move_wide(param->result(), addr);
2843       } else
2844         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2845           __ unaligned_move(param->result(), addr);
2846         } else {
2847           __ move(param->result(), addr);
2848         }
2849     }
2850   }
2851 
2852   if (x->has_receiver()) {


2956         __ call_opt_virtual(target, receiver, result_register,
2957                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2958                             arg_list, info);
2959       } else if (x->vtable_index() < 0) {
2960         __ call_icvirtual(target, receiver, result_register,
2961                           SharedRuntime::get_resolve_virtual_call_stub(),
2962                           arg_list, info);
2963       } else {
2964         int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2965         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2966         __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2967       }
2968       break;
2969     case Bytecodes::_invokedynamic: {
2970       __ call_dynamic(target, receiver, result_register,
2971                       SharedRuntime::get_resolve_static_call_stub(),
2972                       arg_list, info);
2973       break;
2974     }
2975     default:
2976       fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
2977       break;
2978   }
2979 
2980   // JSR 292
2981   // Restore the SP after MethodHandle call sites, if needed.
2982   if (is_method_handle_invoke
2983       && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2984     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2985   }
2986 
2987   if (x->type()->is_float() || x->type()->is_double()) {
2988     // Force rounding of results from non-strictfp when in strictfp
2989     // scope (or when we don't know the strictness of the callee, to
2990     // be safe.)
2991     if (method()->is_strict()) {
2992       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2993         result_register = round_item(result_register);
2994       }
2995     }
2996   }


< prev index next >