< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page

        

*** 38,47 **** --- 38,48 ---- #include "gc/shared/barrierSet.hpp" #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_aarch64.hpp" #include "oops/objArrayKlass.hpp" + #include "oops/oop.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_aarch64.inline.hpp"
*** 240,250 **** // r2: osr buffer // // build frame ciMethod* m = compilation()->method(); ! __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); // OSR buffer is // // locals[nlocals-1..0] // monitors[0..number_of_locks] --- 241,251 ---- // r2: osr buffer // // build frame ciMethod* m = compilation()->method(); ! __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), needs_stack_repair(), NULL); // OSR buffer is // // locals[nlocals-1..0] // monitors[0..number_of_locks]
*** 450,460 **** __ mov(r0, r19); // Restore the exception } // remove the activation and dispatch to the unwind handler __ block_comment("remove_frame and dispatch to the unwind handler"); ! __ remove_frame(initial_frame_size_in_bytes()); __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); // Emit the slow path assembly if (stub != NULL) { stub->emit_code(this); --- 451,461 ---- __ mov(r0, r19); // Restore the exception } // remove the activation and dispatch to the unwind handler __ block_comment("remove_frame and dispatch to the unwind handler"); ! __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); // Emit the slow path assembly if (stub != NULL) { stub->emit_code(this);
*** 501,522 **** } void LIR_Assembler::return_op(LIR_Opr result) { assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,"); // Pop the stack before the safepoint code ! __ remove_frame(initial_frame_size_in_bytes()); if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { __ reserved_stack_check(); } address polling_page(os::get_polling_page()); __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type); __ ret(lr); } int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { address polling_page(os::get_polling_page()); guarantee(info != NULL, "Shouldn't be NULL"); assert(os::is_poll_address(polling_page), "should be"); __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type); --- 502,528 ---- } void LIR_Assembler::return_op(LIR_Opr result) { assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,"); + ciMethod* method = compilation()->method(); // Pop the stack before the safepoint code ! __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { __ reserved_stack_check(); } address polling_page(os::get_polling_page()); __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type); __ ret(lr); } + void LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) { + __ store_value_type_fields_to_buf(vk); + } + int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { address polling_page(os::get_polling_page()); guarantee(info != NULL, "Shouldn't be NULL"); assert(os::is_poll_address(polling_page), "should be"); __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
*** 560,574 **** assert(patch_code == lir_patch_none, "no patching handled here"); __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong()); break; } case T_OBJECT: { ! if (patch_code == lir_patch_none) { ! jobject2reg(c->as_jobject(), dest->as_register()); ! } else { jobject2reg_with_patching(dest->as_register(), info); } break; } case T_METADATA: { --- 566,581 ---- assert(patch_code == lir_patch_none, "no patching handled here"); __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong()); break; } + case T_VALUETYPE: case T_OBJECT: { ! if (patch_code != lir_patch_none) { jobject2reg_with_patching(dest->as_register(), info); + } else { + jobject2reg(c->as_jobject(), dest->as_register()); } break; } case T_METADATA: {
*** 606,615 **** --- 613,623 ---- } void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { LIR_Const* c = src->as_constant_ptr(); switch (c->type()) { + case T_VALUETYPE: case T_OBJECT: { if (! c->as_jobject()) __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix())); else {
*** 672,681 **** --- 680,690 ---- break; case T_INT: assert(c->as_jint() == 0, "should be"); insn = &Assembler::strw; break; + case T_VALUETYPE: // DMS CHECK: the code is significantly differ from x86 case T_OBJECT: case T_ARRAY: assert(c->as_jobject() == 0, "should be"); if (UseCompressedOops && !wide) { insn = &Assembler::strw;
*** 712,728 **** // Can do LONG -> OBJECT move_regs(src->as_register_lo(), dest->as_register()); return; } assert(src->is_single_cpu(), "must match"); ! if (src->type() == T_OBJECT) { __ verify_oop(src->as_register()); } move_regs(src->as_register(), dest->as_register()); } else if (dest->is_double_cpu()) { ! if (src->type() == T_OBJECT || src->type() == T_ARRAY) { // Surprising to me but we can see move of a long to t_object __ verify_oop(src->as_register()); move_regs(src->as_register(), dest->as_register_lo()); return; } --- 721,737 ---- // Can do LONG -> OBJECT move_regs(src->as_register_lo(), dest->as_register()); return; } assert(src->is_single_cpu(), "must match"); ! if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) { __ verify_oop(src->as_register()); } move_regs(src->as_register(), dest->as_register()); } else if (dest->is_double_cpu()) { ! if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) { // Surprising to me but we can see move of a long to t_object __ verify_oop(src->as_register()); move_regs(src->as_register(), dest->as_register_lo()); return; }
*** 746,756 **** } } void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { if (src->is_single_cpu()) { ! if (type == T_ARRAY || type == T_OBJECT) { __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); __ verify_oop(src->as_register()); } else if (type == T_METADATA || type == T_DOUBLE) { __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); } else { --- 755,765 ---- } } void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { if (src->is_single_cpu()) { ! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); __ verify_oop(src->as_register()); } else if (type == T_METADATA || type == T_DOUBLE) { __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); } else {
*** 784,794 **** if (patch_code != lir_patch_none) { deoptimize_trap(info); return; } ! if (type == T_ARRAY || type == T_OBJECT) { __ verify_oop(src->as_register()); if (UseCompressedOops && !wide) { __ encode_heap_oop(compressed_src, src->as_register()); } else { --- 793,803 ---- if (patch_code != lir_patch_none) { deoptimize_trap(info); return; } ! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ verify_oop(src->as_register()); if (UseCompressedOops && !wide) { __ encode_heap_oop(compressed_src, src->as_register()); } else {
*** 806,815 **** --- 815,825 ---- case T_DOUBLE: { __ strd(src->as_double_reg(), as_Address(to_addr)); break; } + case T_VALUETYPE: // fall through case T_ARRAY: // fall through case T_OBJECT: // fall through if (UseCompressedOops && !wide) { __ strw(compressed_src, as_Address(to_addr, rscratch2)); } else {
*** 859,869 **** void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { assert(src->is_stack(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise"); if (dest->is_single_cpu()) { ! if (type == T_ARRAY || type == T_OBJECT) { __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); __ verify_oop(dest->as_register()); } else if (type == T_METADATA) { __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); } else { --- 869,879 ---- void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { assert(src->is_stack(), "should not call otherwise"); assert(dest->is_register(), "should not call otherwise"); if (dest->is_single_cpu()) { ! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); __ verify_oop(dest->as_register()); } else if (type == T_METADATA) { __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); } else {
*** 931,941 **** void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { LIR_Address* addr = src->as_address_ptr(); LIR_Address* from_addr = src->as_address_ptr(); ! if (addr->base()->type() == T_OBJECT) { __ verify_oop(addr->base()->as_pointer_register()); } if (patch_code != lir_patch_none) { deoptimize_trap(info); --- 941,951 ---- void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { LIR_Address* addr = src->as_address_ptr(); LIR_Address* from_addr = src->as_address_ptr(); ! if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) { __ verify_oop(addr->base()->as_pointer_register()); } if (patch_code != lir_patch_none) { deoptimize_trap(info);
*** 955,964 **** --- 965,975 ---- case T_DOUBLE: { __ ldrd(dest->as_double_reg(), as_Address(from_addr)); break; } + case T_VALUETYPE: // fall through case T_ARRAY: // fall through case T_OBJECT: // fall through if (UseCompressedOops && !wide) { __ ldrw(dest->as_register(), as_Address(from_addr)); } else {
*** 1009,1034 **** default: ShouldNotReachHere(); } ! if (type == T_ARRAY || type == T_OBJECT) { if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); } if (!UseZGC) { // Load barrier has not yet been applied, so ZGC can't verify the oop here __ verify_oop(dest->as_register()); } } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { if (UseCompressedClassPointers) { __ decode_klass_not_null(dest->as_register()); } } } int LIR_Assembler::array_element_size(BasicType type) const { int elem_size = type2aelembytes(type); return exact_log2(elem_size); } --- 1020,1062 ---- default: ShouldNotReachHere(); } ! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) { if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); } if (!UseZGC) { // Load barrier has not yet been applied, so ZGC can't verify the oop here __ verify_oop(dest->as_register()); } } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { if (UseCompressedClassPointers) { + __ andr(dest->as_register(), dest->as_register(), oopDesc::compressed_klass_mask()); __ decode_klass_not_null(dest->as_register()); + } else { + __ ubfm(dest->as_register(), dest->as_register(), 0, 63 - oopDesc::storage_props_nof_bits); } } } + void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) { + assert(dst->is_cpu_register(), "must be"); + assert(dst->type() == src->type(), "must be"); + + if (src->is_cpu_register()) { + reg2reg(src, dst); + } else if (src->is_stack()) { + stack2reg(src, dst, dst->type()); + } else if (src->is_constant()) { + const2reg(src, dst, lir_patch_none, NULL); + } else { + ShouldNotReachHere(); + } + } int LIR_Assembler::array_element_size(BasicType type) const { int elem_size = type2aelembytes(type); return exact_log2(elem_size); }
*** 1216,1226 **** void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { Register len = op->len()->as_register(); __ uxtw(len, len); ! if (UseSlowPath || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { __ b(*op->stub()->entry()); } else { Register tmp1 = op->tmp1()->as_register(); --- 1244,1254 ---- void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { Register len = op->len()->as_register(); __ uxtw(len, len); ! if (UseSlowPath || op->type() == T_VALUETYPE || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { __ b(*op->stub()->entry()); } else { Register tmp1 = op->tmp1()->as_register();
*** 1528,1537 **** --- 1556,1681 ---- } else { ShouldNotReachHere(); } } + void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) { + // We are loading/storing an array that *may* be a flattened array (the declared type + // Object[], interface[], or VT?[]). If this array is flattened, take slow path. + + __ load_storage_props(op->tmp()->as_register(), op->array()->as_register()); + __ tst(op->tmp()->as_register(), ArrayStorageProperties::flattened_value); + __ br(Assembler::NE, *op->stub()->entry()); + if (!op->value()->is_illegal()) { + // We are storing into the array. + Label skip; + __ tst(op->tmp()->as_register(), ArrayStorageProperties::null_free_value); + __ br(Assembler::EQ, skip); + // The array is not flattened, but it is null_free. If we are storing + // a null, take the slow path (which will throw NPE). + __ cbz(op->value()->as_register(), *op->stub()->entry()); + __ bind(skip); + } + + } + + void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) { + // This is called when we use aastore into a an array declared as "[LVT;", + // where we know VT is not flattenable (due to ValueArrayElemMaxFlatOops, etc). + // However, we need to do a NULL check if the actual array is a "[QVT;". + + __ load_storage_props(op->tmp()->as_register(), op->array()->as_register()); + __ mov(rscratch1, (uint64_t) ArrayStorageProperties::null_free_value); + __ cmp(op->tmp()->as_register(), rscratch1); + } + + void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) { + Label L_oops_equal; + Label L_oops_not_equal; + Label L_end; + + Register left = op->left()->as_register(); + Register right = op->right()->as_register(); + + __ cmp(left, right); + __ br(Assembler::EQ, L_oops_equal); + + // (1) Null check -- if one of the operands is null, the other must not be null (because + // the two references are not equal), so they are not substitutable, + // FIXME: do null check only if the operand is nullable + { + __ cbz(left, L_oops_not_equal); + __ cbz(right, L_oops_not_equal); + } + + + ciKlass* left_klass = op->left_klass(); + ciKlass* right_klass = op->right_klass(); + + // (2) Value object check -- if either of the operands is not a value object, + // they are not substitutable. We do this only if we are not sure that the + // operands are value objects + if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node. + !left_klass->is_valuetype() || !right_klass->is_valuetype()) { + Register tmp1 = rscratch1; /* op->tmp1()->as_register(); */ + Register tmp2 = rscratch2; /* op->tmp2()->as_register(); */ + + __ mov(tmp1, (intptr_t)markOopDesc::always_locked_pattern); + + __ ldr(tmp2, Address(left, oopDesc::mark_offset_in_bytes())); + __ andr(tmp1, tmp1, tmp2); + + __ ldr(tmp2, Address(right, oopDesc::mark_offset_in_bytes())); + __ andr(tmp1, tmp1, tmp2); + + __ mov(tmp2, (intptr_t)markOopDesc::always_locked_pattern); + __ cmp(tmp1, tmp2); + __ br(Assembler::NE, L_oops_not_equal); + } + + // (3) Same klass check: if the operands are of different klasses, they are not substitutable. + if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) { + // No need to load klass -- the operands are statically known to be the same value klass. + __ b(*op->stub()->entry()); + } else { + Register left_klass_op = op->left_klass_op()->as_register(); + Register right_klass_op = op->right_klass_op()->as_register(); + + // DMS CHECK, likely x86 bug, make aarch64 implementation correct + __ load_klass(left_klass_op, left); + __ load_klass(right_klass_op, right); + __ cmp(left_klass_op, right_klass_op); + __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check + // fall through to L_oops_not_equal + } + + __ bind(L_oops_not_equal); + move(op->not_equal_result(), op->result_opr()); + __ b(L_end); + + __ bind(L_oops_equal); + move(op->equal_result(), op->result_opr()); + __ b(L_end); + + // We've returned from the stub. op->result_opr() contains 0x0 IFF the two + // operands are not substitutable. (Don't compare against 0x1 in case the + // C compiler is naughty) + __ bind(*op->stub()->continuation()); + + if (op->result_opr()->type() == T_LONG) { + __ cbzw(op->result_opr()->as_register(), L_oops_not_equal); // (call_stub() == 0x0) -> not_equal + } else { + __ cbz(op->result_opr()->as_register(), L_oops_not_equal); // (call_stub() == 0x0) -> not_equal + } + + move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal + // fall-through + __ bind(L_end); + + } + + void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) { __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1); __ cset(rscratch1, Assembler::NE); __ membar(__ AnyAny); }
*** 1938,1951 **** } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) { Register reg1 = as_reg(opr1); if (opr2->is_single_cpu()) { // cpu register - cpu register Register reg2 = opr2->as_register(); ! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { __ cmpoop(reg1, reg2); } else { ! assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); __ cmpw(reg1, reg2); } return; } if (opr2->is_double_cpu()) { --- 2082,2095 ---- } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) { Register reg1 = as_reg(opr1); if (opr2->is_single_cpu()) { // cpu register - cpu register Register reg2 = opr2->as_register(); ! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) { __ cmpoop(reg1, reg2); } else { ! assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE, "cmp int, oop?"); __ cmpw(reg1, reg2); } return; } if (opr2->is_double_cpu()) {
*** 1968,1977 **** --- 2112,2122 ---- imm = opr2->as_constant_ptr()->as_jlong(); break; case T_ADDRESS: imm = opr2->as_constant_ptr()->as_jint(); break; + case T_VALUETYPE: case T_OBJECT: case T_ARRAY: jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1); __ cmpoop(reg1, rscratch1); return;
*** 2134,2143 **** --- 2279,2289 ---- ShouldNotReachHere(); break; } break; case T_LONG: + case T_VALUETYPE: case T_ADDRESS: case T_OBJECT: switch (code) { case lir_shl: __ lslv (dreg, lreg, count->as_register()); break; case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
*** 2170,2179 **** --- 2316,2326 ---- break; } break; case T_LONG: case T_ADDRESS: + case T_VALUETYPE: case T_OBJECT: switch (code) { case lir_shl: __ lsl (dreg, lreg, count); break; case lir_shr: __ asr (dreg, lreg, count); break; case lir_ushr: __ lsr (dreg, lreg, count); break;
*** 2214,2223 **** --- 2361,2383 ---- assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); __ lea(rscratch1, __ constant_oop_address(o)); __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes)); } + void LIR_Assembler::arraycopy_valuetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest) { + __ load_storage_props(tmp, obj); + if (is_dest) { + // We also take slow path if it's a null_free destination array, just in case the source array + // contains NULLs. + __ tst(tmp, ArrayStorageProperties::flattened_value | ArrayStorageProperties::null_free_value); + } else { + __ tst(tmp, ArrayStorageProperties::flattened_value); + } + __ br(Assembler::NE, *slow_path->entry()); + } + + // This code replaces a call to arraycopy; no exception may // be thrown in this code, they must be thrown in the System.arraycopy // activation frame; we could save some checks if this would not be the case void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
*** 2233,2243 **** __ resolve(ACCESS_WRITE, dst); CodeStub* stub = op->stub(); int flags = op->flags(); BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; ! if (basic_type == T_ARRAY) basic_type = T_OBJECT; // if we don't know anything, just go through the generic arraycopy if (default_type == NULL // || basic_type == T_OBJECT ) { Label done; --- 2393,2419 ---- __ resolve(ACCESS_WRITE, dst); CodeStub* stub = op->stub(); int flags = op->flags(); BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; ! if (basic_type == T_ARRAY || basic_type == T_VALUETYPE) basic_type = T_OBJECT; ! ! if (flags & LIR_OpArrayCopy::always_slow_path) { ! __ b(*stub->entry()); ! __ bind(*stub->continuation()); ! return; ! } ! ! if (flags & LIR_OpArrayCopy::src_valuetype_check) { ! arraycopy_valuetype_check(src, tmp, stub, false); ! } ! ! if (flags & LIR_OpArrayCopy::dst_valuetype_check) { ! arraycopy_valuetype_check(dst, tmp, stub, true); ! } ! ! // if we don't know anything, just go through the generic arraycopy if (default_type == NULL // || basic_type == T_OBJECT ) { Label done;
*** 2902,2911 **** --- 3078,3088 ---- type = 0; break; case T_INT: case T_LONG: case T_OBJECT: + case T_VALUETYPE: type = 1; break; case T_FLOAT: type = 2; break;
*** 3168,3177 **** --- 3345,3355 ---- break; case T_LONG: xchg = &MacroAssembler::atomic_xchgal; add = &MacroAssembler::atomic_addal; break; + case T_VALUETYPE: case T_OBJECT: case T_ARRAY: if (UseCompressedOops) { xchg = &MacroAssembler::atomic_xchgalw; add = &MacroAssembler::atomic_addalw;
< prev index next >