/* * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "gc/shared/barrierSetAssembler.hpp" #include "runtime/jniHandles.hpp" #define __ masm-> void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread) { // LR is live. It must be saved around calls. bool on_heap = (decorators & IN_HEAP) != 0; bool on_root = (decorators & IN_ROOT) != 0; bool oop_not_null = (decorators & OOP_NOT_NULL) != 0; switch (type) { case T_OBJECT: case T_ARRAY: { if (on_heap) { if (UseCompressedOops) { __ ldrw(dst, src); if (oop_not_null) { __ decode_heap_oop_not_null(dst); } else { __ decode_heap_oop(dst); } } else { __ ldr(dst, src); } } else { assert(on_root, "why else?"); __ ldr(dst, src); } break; } case T_BOOLEAN: __ load_unsigned_byte (dst, src); break; case T_BYTE: __ load_signed_byte (dst, src); break; case T_CHAR: __ load_unsigned_short(dst, src); break; case T_SHORT: __ load_signed_short (dst, src); break; case T_INT: __ ldrw (dst, src); break; case T_LONG: __ ldr (dst, src); break; case T_ADDRESS: __ ldr (dst, src); break; case T_FLOAT: __ ldrs (v0, src); break; case T_DOUBLE: __ ldrd (v0, src); break; default: Unimplemented(); } } void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2) { bool on_heap = (decorators & IN_HEAP) != 0; bool on_root = (decorators & IN_ROOT) != 0; switch (type) { case T_OBJECT: case T_ARRAY: { val = val == noreg ? zr : val; if (on_heap) { if (UseCompressedOops) { assert(!dst.uses(val), "not enough registers"); if (val != zr) { __ encode_heap_oop(val); } __ strw(val, dst); } else { __ str(val, dst); } } else { assert(on_root, "why else?"); __ str(val, dst); } break; } case T_BOOLEAN: __ andw(val, val, 0x1); // boolean is true if LSB is 1 __ strb(val, dst); break; case T_BYTE: __ strb(val, dst); break; case T_CHAR: __ strh(val, dst); break; case T_SHORT: __ strh(val, dst); break; case T_INT: __ strw(val, dst); break; case T_LONG: __ str (val, dst); break; case T_ADDRESS: __ str (val, dst); break; case T_FLOAT: __ strs(v0, dst); break; case T_DOUBLE: __ strd(v0, dst); break; default: Unimplemented(); } } void BarrierSetAssembler::obj_equals(MacroAssembler* masm, DecoratorSet decorators, Register obj1, Register obj2) { __ cmp(obj1, obj2); } void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, Register obj, Register tmp, Label& slowpath) { // If mask changes we need to ensure that the inverse is still encodable as an immediate STATIC_ASSERT(JNIHandles::weak_tag_mask == 1); __ andr(obj, obj, ~JNIHandles::weak_tag_mask); __ ldr(obj, Address(obj, 0)); // *obj }