src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File c1-coops Cdiff src/cpu/x86/vm/c1_LIRAssembler_x86.cpp

src/cpu/x86/vm/c1_LIRAssembler_x86.cpp

Print this page

        

*** 329,349 **** // inline cache check; done before the frame is built. int LIR_Assembler::check_icache() { Register receiver = FrameMap::receiver_opr->as_register(); Register ic_klass = IC_Klass; const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); ! ! if (!VerifyOops) { // insert some nops so that the verified entry point is aligned on CodeEntryAlignment while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { __ nop(); } } int offset = __ offset(); __ inline_cache_check(receiver, IC_Klass); ! assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct"); ! if (VerifyOops) { // force alignment after the cache check. // It's been verified to be aligned if !VerifyOops __ align(CodeEntryAlignment); } return offset; --- 329,349 ---- // inline cache check; done before the frame is built. int LIR_Assembler::check_icache() { Register receiver = FrameMap::receiver_opr->as_register(); Register ic_klass = IC_Klass; const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); ! const bool do_post_padding = VerifyOops || UseCompressedOops; ! if (!do_post_padding) { // insert some nops so that the verified entry point is aligned on CodeEntryAlignment while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { __ nop(); } } int offset = __ offset(); __ inline_cache_check(receiver, IC_Klass); ! assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); ! if (do_post_padding) { // force alignment after the cache check. // It's been verified to be aligned if !VerifyOops __ align(CodeEntryAlignment); } return offset;
*** 545,562 **** void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { __ movptr (rbx, rcx); // receiver is in rcx __ movptr (rax, arg1->as_register()); // Get addresses of first characters from both Strings ! __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes())); __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); // rbx, may be NULL add_debug_info_for_null_check_here(info); ! __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); // compute minimum length (in rax) and difference of lengths (on top of stack) if (VM_Version::supports_cmov()) { --- 545,562 ---- void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { __ movptr (rbx, rcx); // receiver is in rcx __ movptr (rax, arg1->as_register()); // Get addresses of first characters from both Strings ! __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes())); __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); // rbx, may be NULL add_debug_info_for_null_check_here(info); ! __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); // compute minimum length (in rax) and difference of lengths (on top of stack) if (VM_Version::supports_cmov()) {
*** 682,698 **** assert(src->is_constant(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise"); LIR_Const* c = src->as_constant_ptr(); switch (c->type()) { ! case T_INT: ! case T_ADDRESS: { assert(patch_code == lir_patch_none, "no patching handled here"); __ movl(dest->as_register(), c->as_jint()); break; } case T_LONG: { assert(patch_code == lir_patch_none, "no patching handled here"); #ifdef _LP64 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); #else --- 682,703 ---- assert(src->is_constant(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise"); LIR_Const* c = src->as_constant_ptr(); switch (c->type()) { ! case T_INT: { assert(patch_code == lir_patch_none, "no patching handled here"); __ movl(dest->as_register(), c->as_jint()); break; } + case T_ADDRESS: { + assert(patch_code == lir_patch_none, "no patching handled here"); + __ movptr(dest->as_register(), c->as_jint()); + break; + } + case T_LONG: { assert(patch_code == lir_patch_none, "no patching handled here"); #ifdef _LP64 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); #else
*** 766,779 **** LIR_Const* c = src->as_constant_ptr(); switch (c->type()) { case T_INT: // fall through case T_FLOAT: - case T_ADDRESS: __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); break; case T_OBJECT: __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); break; case T_LONG: // fall through --- 771,787 ---- LIR_Const* c = src->as_constant_ptr(); switch (c->type()) { case T_INT: // fall through case T_FLOAT: __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); break; + case T_ADDRESS: + __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); + break; + case T_OBJECT: __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); break; case T_LONG: // fall through
*** 792,828 **** default: ShouldNotReachHere(); } } ! void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) { assert(src->is_constant(), "should not call otherwise"); assert(dest->is_address(), "should not call otherwise"); LIR_Const* c = src->as_constant_ptr(); LIR_Address* addr = dest->as_address_ptr(); int null_check_here = code_offset(); switch (type) { case T_INT: // fall through case T_FLOAT: - case T_ADDRESS: __ movl(as_Address(addr), c->as_jint_bits()); break; case T_OBJECT: // fall through case T_ARRAY: if (c->as_jobject() == NULL) { __ movptr(as_Address(addr), NULL_WORD); } else { if (is_literal_address(addr)) { ShouldNotReachHere(); __ movoop(as_Address(addr, noreg), c->as_jobject()); } else { #ifdef _LP64 __ movoop(rscratch1, c->as_jobject()); null_check_here = code_offset(); __ movptr(as_Address_lo(addr), rscratch1); #else __ movoop(as_Address(addr), c->as_jobject()); #endif } } --- 800,853 ---- default: ShouldNotReachHere(); } } ! void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { assert(src->is_constant(), "should not call otherwise"); assert(dest->is_address(), "should not call otherwise"); LIR_Const* c = src->as_constant_ptr(); LIR_Address* addr = dest->as_address_ptr(); int null_check_here = code_offset(); switch (type) { case T_INT: // fall through case T_FLOAT: __ movl(as_Address(addr), c->as_jint_bits()); break; + case T_ADDRESS: + __ movptr(as_Address(addr), c->as_jint_bits()); + break; + case T_OBJECT: // fall through case T_ARRAY: if (c->as_jobject() == NULL) { + #ifdef _LP64 + if (UseCompressedOops && !wide) { + __ movl(as_Address(addr), (int32_t)NULL_WORD); + } else { __ movptr(as_Address(addr), NULL_WORD); + } + #else + __ movptr(as_Address(addr), NULL_WORD); + #endif } else { if (is_literal_address(addr)) { ShouldNotReachHere(); __ movoop(as_Address(addr, noreg), c->as_jobject()); } else { #ifdef _LP64 __ movoop(rscratch1, c->as_jobject()); + if (UseCompressedOops && !wide) { + __ encode_heap_oop(rscratch1); + null_check_here = code_offset(); + __ movl(as_Address_lo(addr), rscratch1); + } else { null_check_here = code_offset(); __ movptr(as_Address_lo(addr), rscratch1); + } #else __ movoop(as_Address(addr), c->as_jobject()); #endif } }
*** 995,1020 **** ShouldNotReachHere(); } } ! void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) { LIR_Address* to_addr = dest->as_address_ptr(); PatchingStub* patch = NULL; if (type == T_ARRAY || type == T_OBJECT) { __ verify_oop(src->as_register()); } if (patch_code != lir_patch_none) { patch = new PatchingStub(_masm, PatchingStub::access_field_id); Address toa = as_Address(to_addr); assert(toa.disp() != 0, "must have"); } - if (info != NULL) { - add_debug_info_for_null_check_here(info); - } switch (type) { case T_FLOAT: { if (src->is_single_xmm()) { __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); } else { --- 1020,1054 ---- ShouldNotReachHere(); } } ! void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */, bool wide) { LIR_Address* to_addr = dest->as_address_ptr(); PatchingStub* patch = NULL; + #ifdef _LP64 + Register compressed_src = rscratch1; + #endif + if (type == T_ARRAY || type == T_OBJECT) { __ verify_oop(src->as_register()); + #ifdef _LP64 + if (UseCompressedOops && !wide) { + __ movptr(compressed_src, src->as_register()); + __ encode_heap_oop(compressed_src); } + #endif + } + if (patch_code != lir_patch_none) { patch = new PatchingStub(_masm, PatchingStub::access_field_id); Address toa = as_Address(to_addr); assert(toa.disp() != 0, "must have"); } + int null_check_here = code_offset(); switch (type) { case T_FLOAT: { if (src->is_single_xmm()) { __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); } else {
*** 1036,1052 **** else __ fst_d (as_Address(to_addr)); } break; } - case T_ADDRESS: // fall through case T_ARRAY: // fall through case T_OBJECT: // fall through #ifdef _LP64 __ movptr(as_Address(to_addr), src->as_register()); break; #endif // _LP64 case T_INT: __ movl(as_Address(to_addr), src->as_register()); break; case T_LONG: { --- 1070,1092 ---- else __ fst_d (as_Address(to_addr)); } break; } case T_ARRAY: // fall through case T_OBJECT: // fall through #ifdef _LP64 + if (UseCompressedOops && !wide) { + __ movl(as_Address(to_addr), compressed_src); + } else { __ movptr(as_Address(to_addr), src->as_register()); + } break; #endif // _LP64 + case T_ADDRESS: + __ movptr(as_Address(to_addr), src->as_register()); + break; case T_INT: __ movl(as_Address(to_addr), src->as_register()); break; case T_LONG: {
*** 1099,1108 **** --- 1139,1151 ---- break; default: ShouldNotReachHere(); } + if (info != NULL) { + add_debug_info_for_null_check(null_check_here, info); + } if (patch_code != lir_patch_none) { patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); } }
*** 1182,1192 **** ShouldNotReachHere(); } } ! void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) { assert(src->is_address(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise"); LIR_Address* addr = src->as_address_ptr(); Address from_addr = as_Address(addr); --- 1225,1235 ---- ShouldNotReachHere(); } } ! void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */, bool wide) { assert(src->is_address(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise"); LIR_Address* addr = src->as_address_ptr(); Address from_addr = as_Address(addr);
*** 1236,1252 **** __ fld_d(from_addr); } break; } - case T_ADDRESS: // fall through case T_OBJECT: // fall through case T_ARRAY: // fall through #ifdef _LP64 __ movptr(dest->as_register(), from_addr); break; #endif // _L64 case T_INT: __ movl(dest->as_register(), from_addr); break; case T_LONG: { --- 1279,1301 ---- __ fld_d(from_addr); } break; } case T_OBJECT: // fall through case T_ARRAY: // fall through #ifdef _LP64 + if (UseCompressedOops && !wide) { + __ movl(dest->as_register(), from_addr); + } else { __ movptr(dest->as_register(), from_addr); + } break; #endif // _L64 + case T_ADDRESS: + __ movptr(dest->as_register(), from_addr); + break; case T_INT: __ movl(dest->as_register(), from_addr); break; case T_LONG: {
*** 1337,1346 **** --- 1386,1400 ---- if (patch != NULL) { patching_epilog(patch, patch_code, addr->base()->as_register(), info); } if (type == T_ARRAY || type == T_OBJECT) { + #ifdef _LP64 + if (UseCompressedOops && !wide) { + __ decode_heap_oop(dest->as_register()); + } + #endif __ verify_oop(dest->as_register()); } }
*** 1676,1686 **** if (obj == k_RInfo) { k_RInfo = dst; } else if (obj == klass_RInfo) { klass_RInfo = dst; } ! if (k->is_loaded()) { select_different_registers(obj, dst, k_RInfo, klass_RInfo); } else { Rtmp1 = op->tmp3()->as_register(); select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); } --- 1730,1740 ---- if (obj == k_RInfo) { k_RInfo = dst; } else if (obj == klass_RInfo) { klass_RInfo = dst; } ! if (k->is_loaded() && LP64_ONLY(!UseCompressedOops) NOT_LP64(true)) { select_different_registers(obj, dst, k_RInfo, klass_RInfo); } else { Rtmp1 = op->tmp3()->as_register(); select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); }
*** 1713,1737 **** __ verify_oop(obj); if (op->fast_check()) { // get object class // not a safepoint as obj null check happens earlier - if (k->is_loaded()) { #ifdef _LP64 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); #else __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); - #endif // _LP64 } else { __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); } __ jcc(Assembler::notEqual, *failure_target); // successful cast, fall through to profile or jump } else { // get object class // not a safepoint as obj null check happens earlier ! __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); if (k->is_loaded()) { // See if we get an immediate positive hit #ifdef _LP64 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); #else --- 1767,1796 ---- __ verify_oop(obj); if (op->fast_check()) { // get object class // not a safepoint as obj null check happens earlier #ifdef _LP64 + if (UseCompressedOops) { + __ load_klass(Rtmp1, obj); + __ cmpl(k_RInfo, Rtmp1); + } else { __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); + } #else + if (k->is_loaded()) { __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); } else { __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); } + #endif __ jcc(Assembler::notEqual, *failure_target); // successful cast, fall through to profile or jump } else { // get object class // not a safepoint as obj null check happens earlier ! __ load_klass(klass_RInfo, obj); if (k->is_loaded()) { // See if we get an immediate positive hit #ifdef _LP64 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); #else
*** 1782,1792 **** } if (op->should_profile()) { Register mdo = klass_RInfo, recv = k_RInfo; __ bind(profile_cast_success); __ movoop(mdo, md->constant_encoding()); ! __ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes())); Label update_done; type_profile_helper(mdo, md, data, recv, success); __ jmp(*success); __ bind(profile_cast_failure); --- 1841,1851 ---- } if (op->should_profile()) { Register mdo = klass_RInfo, recv = k_RInfo; __ bind(profile_cast_success); __ movoop(mdo, md->constant_encoding()); ! __ load_klass(recv, obj); Label update_done; type_profile_helper(mdo, md, data, recv, success); __ jmp(*success); __ bind(profile_cast_failure);
*** 1846,1859 **** } else { __ jcc(Assembler::equal, done); } add_debug_info_for_null_check_here(op->info_for_exception()); ! __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes())); ! __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes())); ! // get instance klass __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); // perform the fast part of the checking logic __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ push(klass_RInfo); --- 1905,1918 ---- } else { __ jcc(Assembler::equal, done); } add_debug_info_for_null_check_here(op->info_for_exception()); ! __ load_klass(k_RInfo, array); ! __ load_klass(klass_RInfo, value); ! // get instance klass (it's already uncompressed) __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); // perform the fast part of the checking logic __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ push(klass_RInfo);
*** 1868,1878 **** if (op->should_profile()) { Register mdo = klass_RInfo, recv = k_RInfo; __ bind(profile_cast_success); __ movoop(mdo, md->constant_encoding()); ! __ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes())); Label update_done; type_profile_helper(mdo, md, data, recv, &done); __ jmpb(done); __ bind(profile_cast_failure); --- 1927,1937 ---- if (op->should_profile()) { Register mdo = klass_RInfo, recv = k_RInfo; __ bind(profile_cast_success); __ movoop(mdo, md->constant_encoding()); ! __ load_klass(recv, value); Label update_done; type_profile_helper(mdo, md, data, recv, &done); __ jmpb(done); __ bind(profile_cast_failure);
*** 1932,1947 **** assert(cmpval == rax, "wrong register"); assert(newval != NULL, "new val must be register"); assert(cmpval != newval, "cmp and new values must be in different registers"); assert(cmpval != addr, "cmp and addr must be in different registers"); assert(newval != addr, "new value and addr must be in different registers"); if (os::is_MP()) { __ lock(); } - if ( op->code() == lir_cas_obj) { __ cmpxchgptr(newval, Address(addr, 0)); ! } else if (op->code() == lir_cas_int) { __ cmpxchgl(newval, Address(addr, 0)); } #ifdef _LP64 } else if (op->code() == lir_cas_long) { Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); --- 1991,2026 ---- assert(cmpval == rax, "wrong register"); assert(newval != NULL, "new val must be register"); assert(cmpval != newval, "cmp and new values must be in different registers"); assert(cmpval != addr, "cmp and addr must be in different registers"); assert(newval != addr, "new value and addr must be in different registers"); + + if ( op->code() == lir_cas_obj) { + #ifdef _LP64 + if (UseCompressedOops) { + __ mov(rscratch1, cmpval); + __ encode_heap_oop(cmpval); + __ mov(rscratch2, newval); + __ encode_heap_oop(rscratch2); + if (os::is_MP()) { + __ lock(); + } + __ cmpxchgl(rscratch2, Address(addr, 0)); + __ mov(cmpval, rscratch1); + } else + #endif + { if (os::is_MP()) { __ lock(); } __ cmpxchgptr(newval, Address(addr, 0)); ! } ! } else { ! assert(op->code() == lir_cas_int, "lir_cas_int expected"); ! if (os::is_MP()) { ! __ lock(); ! } __ cmpxchgl(newval, Address(addr, 0)); } #ifdef _LP64 } else if (op->code() == lir_cas_long) { Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
*** 3179,3190 **** --- 3258,3277 ---- __ cmpl(tmp, dst_length_addr); __ jcc(Assembler::above, *stub->entry()); } if (flags & LIR_OpArrayCopy::type_check) { + #ifdef _LP64 + if (UseCompressedOops) { + __ movl(tmp, src_klass_addr); + __ cmpl(tmp, dst_klass_addr); + } else + #endif + { __ movptr(tmp, src_klass_addr); __ cmpptr(tmp, dst_klass_addr); + } __ jcc(Assembler::notEqual, *stub->entry()); } #ifdef ASSERT if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
*** 3195,3210 **** --- 3282,3315 ---- // dst type is exactly the expected type and the src type is a // subtype which we can't check or src is the same array as dst // but not necessarily exactly of type default_type. Label known_ok, halt; __ movoop(tmp, default_type->constant_encoding()); + #ifdef _LP64 + if (UseCompressedOops) { + __ encode_heap_oop(tmp); + } + #endif + if (basic_type != T_OBJECT) { + #ifdef _LP64 + if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); + else + #endif __ cmpptr(tmp, dst_klass_addr); __ jcc(Assembler::notEqual, halt); + #ifdef _LP64 + if (UseCompressedOops) __ cmpl(tmp, src_klass_addr); + else + #endif __ cmpptr(tmp, src_klass_addr); __ jcc(Assembler::equal, known_ok); } else { + #ifdef _LP64 + if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); + else + #endif __ cmpptr(tmp, dst_klass_addr); __ jcc(Assembler::equal, known_ok); __ cmpptr(src, dst); __ jcc(Assembler::equal, known_ok); }
*** 3330,3340 **** __ addptr(data_addr, DataLayout::counter_increment); return; } } } else { ! __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); Label update_done; type_profile_helper(mdo, md, data, recv, &update_done); // Receiver did not match any saved receiver and there is no empty row for it. // Increment total counter to indicate polymorphic case. __ addptr(counter_addr, DataLayout::counter_increment); --- 3435,3445 ---- __ addptr(data_addr, DataLayout::counter_increment); return; } } } else { ! __ load_klass(recv, recv); Label update_done; type_profile_helper(mdo, md, data, recv, &update_done); // Receiver did not match any saved receiver and there is no empty row for it. // Increment total counter to indicate polymorphic case. __ addptr(counter_addr, DataLayout::counter_increment);
src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File