< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page

*** 2999,3031 **** __ osr_entry(LIR_Assembler::osrBufferPointer()); LIR_Opr result = rlock_result(x); __ move(LIR_Assembler::osrBufferPointer(), result); } ! ! void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { ! assert(args->length() == arg_list->length(), ! "args=%d, arg_list=%d", args->length(), arg_list->length()); ! for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) { ! LIRItem* param = args->at(i); ! LIR_Opr loc = arg_list->at(i); if (loc->is_register()) { param->load_item_force(loc); } else { LIR_Address* addr = loc->as_address_ptr(); param->load_for_store(addr->type()); assert(addr->type() != T_VALUETYPE, "not supported yet"); if (addr->type() == T_OBJECT) { __ move_wide(param->result(), addr); ! } else if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { __ unaligned_move(param->result(), addr); } else { __ move(param->result(), addr); } } } if (x->has_receiver()) { LIRItem* receiver = args->at(0); LIR_Opr loc = arg_list->at(0); if (loc->is_register()) { --- 2999,3035 ---- __ osr_entry(LIR_Assembler::osrBufferPointer()); LIR_Opr result = rlock_result(x); __ move(LIR_Assembler::osrBufferPointer(), result); } ! void LIRGenerator::invoke_load_one_argument(LIRItem* param, LIR_Opr loc) { if (loc->is_register()) { param->load_item_force(loc); } else { LIR_Address* addr = loc->as_address_ptr(); param->load_for_store(addr->type()); assert(addr->type() != T_VALUETYPE, "not supported yet"); if (addr->type() == T_OBJECT) { __ move_wide(param->result(), addr); ! } else { if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { __ unaligned_move(param->result(), addr); } else { __ move(param->result(), addr); } } } + } + + void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { + assert(args->length() == arg_list->length(), + "args=%d, arg_list=%d", args->length(), arg_list->length()); + for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) { + LIRItem* param = args->at(i); + LIR_Opr loc = arg_list->at(i); + invoke_load_one_argument(param, loc); + } if (x->has_receiver()) { LIRItem* receiver = args->at(0); LIR_Opr loc = arg_list->at(0); if (loc->is_register()) {
*** 3201,3224 **** #endif LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); ! if (can_inline_as_constant(right.value())) { right.dont_load_item(); } else { right.load_item(); } LIRItem t_val(x->tval(), this); LIRItem f_val(x->fval(), this); t_val.dont_load_item(); f_val.dont_load_item(); - LIR_Opr reg = rlock_result(x); __ cmp(lir_cond(x->cond()), left.result(), right.result()); __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); } #ifdef JFR_HAVE_INTRINSICS void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { CodeEmitInfo* info = state_for(x); --- 3205,3353 ---- #endif LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); ! if (can_inline_as_constant(right.value()) && !x->substituability_check()) { right.dont_load_item(); } else { + // substituability_check() needs to use right as a base register. right.load_item(); } LIRItem t_val(x->tval(), this); LIRItem f_val(x->fval(), this); t_val.dont_load_item(); f_val.dont_load_item(); + if (x->substituability_check()) { + substituability_check(x, left, right, t_val, f_val); + } else { + LIR_Opr reg = rlock_result(x); __ cmp(lir_cond(x->cond()), left.result(), right.result()); __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); + } + } + + void LIRGenerator::substituability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val) { + assert(x->cond() == If::eql || x->cond() == If::neq, "must be"); + bool is_acmpeq = (x->cond() == If::eql); + LIR_Opr reg = rlock_result(x); + LIR_Opr equal_result = is_acmpeq ? t_val.result() : f_val.result(); + LIR_Opr not_equal_result = is_acmpeq ? f_val.result() : t_val.result(); + + LabelObj* L_oops_equal = new LabelObj(); + LabelObj* L_oops_not_equal = new LabelObj(); + LabelObj* L_do_subst_check = new LabelObj(); + LabelObj* L_end = new LabelObj(); + + __ cmp(lir_cond_equal, left.result(), right.result()); + __ branch(lir_cond_equal, T_ILLEGAL, L_oops_equal->label()); + + // The two operands are not the same reference. Do a more costly substituability check. + + ciKlass* left_klass = x->x()->as_loaded_klass_or_null(); + ciKlass* right_klass = x->y()->as_loaded_klass_or_null(); + + // (1) Null check -- if one of the operands is null, the other must not be null (because + // the two references are not equal), so they are not substitutable, + // FIXME: do null check only if the operand is nullable + { + __ cmp(lir_cond_equal, left.result(), LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_equal, T_ILLEGAL, L_oops_not_equal->label()); + + __ cmp(lir_cond_equal, right.result(), LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_equal, T_ILLEGAL, L_oops_not_equal->label()); + } + + // (2) Value object check -- if either of the operands is not a value object, + // they are not substitutable. We do this only if we are not sure that the + // operands value objects + if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node. + !left_klass->is_valuetype() || !right_klass->is_valuetype()) { + // FIXME: on x64, this can be optimized as: + // mov $0x405,%r10d + // and (%left), %r10d /* if need to check left */ + // and (%right), %r10d /* if need to check right */ + // cmp $0x405, $r10d + // jne L_oops_not_equal + + LIR_Opr mark = new_register(T_LONG); + LIR_Opr always_locked_pattern = new_register(T_LONG); + __ move(LIR_OprFact::longConst(markOopDesc::always_locked_pattern), always_locked_pattern); + + if (left_klass == NULL || !left_klass->is_valuetype()) { + __ move(new LIR_Address(left.result(), oopDesc::mark_offset_in_bytes(), T_LONG), mark); + __ logical_and(mark, always_locked_pattern, mark); + __ cmp(lir_cond_notEqual, mark, always_locked_pattern); + __ branch(lir_cond_notEqual, T_ILLEGAL, L_oops_not_equal->label()); + } + + if (right_klass == NULL || !right_klass->is_valuetype()) { + __ move(new LIR_Address(right.result(), oopDesc::mark_offset_in_bytes(), T_LONG), mark); + __ logical_and(mark, always_locked_pattern, mark); + __ cmp(lir_cond_notEqual, mark, always_locked_pattern); + __ branch(lir_cond_notEqual, T_ILLEGAL, L_oops_not_equal->label()); + } + } + + // (3) Same klass check: if the operands are of different klasses, they are not substitutable. + if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) { + // No need to check -- they are known to be the same value klass. + __ branch(lir_cond_always, T_ILLEGAL, L_do_subst_check->label()); + } else { + BasicType t_klass = UseCompressedOops ? T_INT : T_METADATA; + BasicType t_addr = UseCompressedOops ? T_INT : T_ADDRESS; + + LIR_Opr left_klass = new_register(t_klass); + LIR_Opr right_klass = new_register(t_klass); + __ move(new LIR_Address(left.result(), oopDesc::klass_offset_in_bytes(), t_addr), left_klass); + __ move(new LIR_Address(right.result(), oopDesc::klass_offset_in_bytes(), t_addr), right_klass); + __ cmp(lir_cond_equal, left_klass, right_klass); + __ branch(lir_cond_equal, T_ILLEGAL, L_do_subst_check->label()); + + // fall through to L_oops_not_equal + } + + __ branch_destination(L_oops_not_equal->label()); + __ move(not_equal_result, reg); + __ branch(lir_cond_always, T_ILLEGAL, L_end->label()); + + + // FIXME -- for simple case (no non-flattened value fields), do a per-field comparison + + __ branch_destination(L_do_subst_check->label()); + { + // Call into ValueBootstrapMethods::isSubstitutable() + ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature()); + assert(method() != subst_method, "cannot recurse!"); + + const LIR_Opr result = result_register_for(x->type()); + CodeEmitInfo* info = state_for(x, x->state_before()); + BasicTypeList signature(2); + signature.append(T_OBJECT); + signature.append(T_OBJECT); + + CallingConvention* cc = frame_map()->java_calling_convention(&signature, true); + LIR_OprList* arg_list = cc->args(); + + left.set_destroys_register(); + right.set_destroys_register(); + invoke_load_one_argument(&left, arg_list->at(0)); + invoke_load_one_argument(&right, arg_list->at(1)); + __ call_static(subst_method, result, + SharedRuntime::get_resolve_static_call_stub(), + arg_list, info); + __ cmp(lir_cond_notEqual, result, LIR_OprFact::intConst(1)); + } + __ move(not_equal_result, reg); + __ branch(lir_cond_notEqual, T_ILLEGAL, L_end->label()); + + __ branch_destination(L_oops_equal->label()); + __ move(equal_result, reg); + + __ branch_destination(L_end->label()); } #ifdef JFR_HAVE_INTRINSICS void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { CodeEmitInfo* info = state_for(x);
< prev index next >