29 #include "jvm.h"
30 #include "asm/assembler.hpp"
31 #include "asm/assembler.inline.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "gc/shared/cardTable.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "compiler/disassembler.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/accessDecorators.hpp"
42 #include "oops/compressedOops.inline.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "runtime/biasedLocking.hpp"
45 #include "runtime/icache.hpp"
46 #include "runtime/interfaceSupport.inline.hpp"
47 #include "runtime/jniHandles.inline.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/thread.hpp"
50 #ifdef COMPILER1
51 #include "c1/c1_LIRAssembler.hpp"
52 #endif
53 #ifdef COMPILER2
54 #include "oops/oop.hpp"
55 #include "opto/compile.hpp"
56 #include "opto/intrinsicnode.hpp"
57 #include "opto/node.hpp"
58 #endif
59
60 #ifdef PRODUCT
61 #define BLOCK_COMMENT(str) /* nothing */
62 #define STOP(error) stop(error)
63 #else
64 #define BLOCK_COMMENT(str) block_comment(str)
65 #define STOP(error) block_comment(error); stop(error)
66 #endif
67
68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
4071 movz(dst, (nk >> 16), 16);
4072 movk(dst, nk & 0xffff);
4073 }
4074
4075 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4076 Register dst, Address src,
4077 Register tmp1, Register thread_tmp) {
4078 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4079 decorators = AccessInternal::decorator_fixup(decorators);
4080 bool as_raw = (decorators & AS_RAW) != 0;
4081 if (as_raw) {
4082 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4083 } else {
4084 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4085 }
4086 }
4087
4088 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4089 Address dst, Register src,
4090 Register tmp1, Register thread_tmp, Register tmp3) {
4091 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4092 decorators = AccessInternal::decorator_fixup(decorators);
4093 bool as_raw = (decorators & AS_RAW) != 0;
4094 if (as_raw) {
4095 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4096 } else {
4097 bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4098 }
4099 }
4100
4101 void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
4102 // Use stronger ACCESS_WRITE|ACCESS_READ by default.
4103 if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
4104 decorators |= ACCESS_READ | ACCESS_WRITE;
4105 }
4106 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4107 return bs->resolve(this, decorators, obj);
4108 }
4109
4110 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5939
5940 // insert a nop at the start of the prolog so we can patch in a
5941 // branch if we need to invalidate the method later
5942 nop();
5943
5944 int bangsize = C->bang_size_in_bytes();
5945 if (C->need_stack_bang(bangsize) && UseStackBanging)
5946 generate_stack_overflow_check(bangsize);
5947
5948 build_frame(framesize);
5949
5950 if (NotifySimulator) {
5951 notify(Assembler::method_entry);
5952 }
5953
5954 if (VerifyStackAtCalls) {
5955 Unimplemented();
5956 }
5957 }
5958
5959 void MacroAssembler::unpack_value_args(Compile* C, bool receiver_only) {
5960 // Called from MachVEP node
5961 unimplemented("Support for ValueTypePassFieldsAsArgs and ValueTypeReturnedAsFields is not implemented");
5962 }
5963
5964 void MacroAssembler::store_value_type_fields_to_buf(ciValueKlass* vk) {
5965 super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf());
5966 }
|
29 #include "jvm.h"
30 #include "asm/assembler.hpp"
31 #include "asm/assembler.inline.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "gc/shared/cardTable.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "compiler/disassembler.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/accessDecorators.hpp"
42 #include "oops/compressedOops.inline.hpp"
43 #include "oops/klass.inline.hpp"
44 #include "runtime/biasedLocking.hpp"
45 #include "runtime/icache.hpp"
46 #include "runtime/interfaceSupport.inline.hpp"
47 #include "runtime/jniHandles.inline.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/signature_cc.hpp"
50 #include "runtime/thread.hpp"
51 #ifdef COMPILER1
52 #include "c1/c1_LIRAssembler.hpp"
53 #endif
54 #ifdef COMPILER2
55 #include "oops/oop.hpp"
56 #include "opto/compile.hpp"
57 #include "opto/intrinsicnode.hpp"
58 #include "opto/node.hpp"
59 #endif
60
61 #ifdef PRODUCT
62 #define BLOCK_COMMENT(str) /* nothing */
63 #define STOP(error) stop(error)
64 #else
65 #define BLOCK_COMMENT(str) block_comment(str)
66 #define STOP(error) block_comment(error); stop(error)
67 #endif
68
69 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
4072 movz(dst, (nk >> 16), 16);
4073 movk(dst, nk & 0xffff);
4074 }
4075
4076 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4077 Register dst, Address src,
4078 Register tmp1, Register thread_tmp) {
4079 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4080 decorators = AccessInternal::decorator_fixup(decorators);
4081 bool as_raw = (decorators & AS_RAW) != 0;
4082 if (as_raw) {
4083 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4084 } else {
4085 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4086 }
4087 }
4088
4089 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4090 Address dst, Register src,
4091 Register tmp1, Register thread_tmp, Register tmp3) {
4092
4093 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4094 decorators = AccessInternal::decorator_fixup(decorators);
4095 bool as_raw = (decorators & AS_RAW) != 0;
4096 if (as_raw) {
4097 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4098 } else {
4099 bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4100 }
4101 }
4102
4103 void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
4104 // Use stronger ACCESS_WRITE|ACCESS_READ by default.
4105 if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
4106 decorators |= ACCESS_READ | ACCESS_WRITE;
4107 }
4108 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4109 return bs->resolve(this, decorators, obj);
4110 }
4111
4112 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
5941
5942 // insert a nop at the start of the prolog so we can patch in a
5943 // branch if we need to invalidate the method later
5944 nop();
5945
5946 int bangsize = C->bang_size_in_bytes();
5947 if (C->need_stack_bang(bangsize) && UseStackBanging)
5948 generate_stack_overflow_check(bangsize);
5949
5950 build_frame(framesize);
5951
5952 if (NotifySimulator) {
5953 notify(Assembler::method_entry);
5954 }
5955
5956 if (VerifyStackAtCalls) {
5957 Unimplemented();
5958 }
5959 }
5960
5961 int MacroAssembler::store_value_type_fields_to_buf(ciValueKlass* vk, bool from_interpreter) {
5962 // A value type might be returned. If fields are in registers we
5963 // need to allocate a value type instance and initialize it with
5964 // the value of the fields.
5965 Label skip;
5966 // We only need a new buffered value if a new one is not returned
5967 cmp(r0, (u1) 1);
5968 br(Assembler::EQ, skip);
5969 int call_offset = -1;
5970
5971 Label slow_case;
5972
5973 // Try to allocate a new buffered value (from the heap)
5974 if (UseTLAB) {
5975
5976 if (vk != NULL) {
5977 // Called from C1, where the return type is statically known.
5978 mov(r1, (intptr_t)vk->get_ValueKlass());
5979 jint lh = vk->layout_helper();
5980 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
5981 mov(r14, lh);
5982 } else {
5983 // Call from interpreter. R0 contains ((the ValueKlass* of the return type) | 0x01)
5984 andr(r1, r0, -2);
5985 // get obj size
5986 ldrw(r14, Address(rscratch1 /*klass*/, Klass::layout_helper_offset()));
5987 }
5988
5989 ldr(r13, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5990
5991 // check whether we have space in TLAB,
5992 // rscratch1 contains pointer to just allocated obj
5993 lea(r14, Address(r13, r14));
5994 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5995
5996 cmp(r14, rscratch1);
5997 br(Assembler::GT, slow_case);
5998
5999 // OK we have room in TLAB,
6000 // Set new TLAB top
6001 str(r14, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
6002
6003 // Set new class always locked
6004 mov(rscratch1, (uint64_t) markOopDesc::always_locked_prototype());
6005 str(rscratch1, Address(r13, oopDesc::mark_offset_in_bytes()));
6006
6007 store_klass_gap(r13, zr); // zero klass gap for compressed oops
6008 if (vk == NULL) {
6009 // store_klass corrupts rbx, so save it in rax for later use (interpreter case only).
6010 mov(r0, r1);
6011 }
6012
6013 store_klass(r13, r1); // klass
6014
6015 if (vk != NULL) {
6016 // FIXME -- do the packing in-line to avoid the runtime call
6017 mov(r0, r13);
6018 far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
6019 } else {
6020
6021 // We have our new buffered value, initialize its fields with a
6022 // value class specific handler
6023 ldr(r1, Address(r0, InstanceKlass::adr_valueklass_fixed_block_offset()));
6024 ldr(r1, Address(r1, ValueKlass::pack_handler_offset()));
6025
6026 // Mov new class to r0 and call pack_handler
6027 mov(r0, r13);
6028 blr(r1);
6029 }
6030 b(skip);
6031 }
6032
6033 bind(slow_case);
6034 // We failed to allocate a new value, fall back to a runtime
6035 // call. Some oop field may be live in some registers but we can't
6036 // tell. That runtime call will take care of preserving them
6037 // across a GC if there's one.
6038
6039
6040 if (from_interpreter) {
6041 super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf());
6042 } else {
6043 ldr(rscratch1, RuntimeAddress(StubRoutines::store_value_type_fields_to_buf()));
6044 blr(rscratch1);
6045 call_offset = offset();
6046 }
6047
6048 bind(skip);
6049 return call_offset;
6050 }
6051
6052 // Move a value between registers/stack slots and update the reg_state
6053 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[], int ret_off, int extra_stack_offset) {
6054 if (reg_state[to->value()] == reg_written) {
6055 return true; // Already written
6056 }
6057
6058 if (from != to && bt != T_VOID) {
6059 if (reg_state[to->value()] == reg_readonly) {
6060 return false; // Not yet writable
6061 }
6062 if (from->is_reg()) {
6063 if (to->is_reg()) {
6064 mov(to->as_Register(), from->as_Register());
6065 } else {
6066 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6067 Address to_addr = Address(sp, st_off);
6068 if (from->is_FloatRegister()) {
6069 if (bt == T_DOUBLE) {
6070 strd(from->as_FloatRegister(), to_addr);
6071 } else {
6072 assert(bt == T_FLOAT, "must be float");
6073 strs(from->as_FloatRegister(), to_addr);
6074 }
6075 } else {
6076 str(from->as_Register(), to_addr);
6077 }
6078 }
6079 } else {
6080 Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset);
6081 if (to->is_reg()) {
6082 if (to->is_FloatRegister()) {
6083 if (bt == T_DOUBLE) {
6084 ldrd(to->as_FloatRegister(), from_addr);
6085 } else {
6086 assert(bt == T_FLOAT, "must be float");
6087 ldrs(to->as_FloatRegister(), from_addr);
6088 }
6089 } else {
6090 ldr(to->as_Register(), from_addr);
6091 }
6092 } else {
6093 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6094 ldr(rscratch1, from_addr);
6095 str(rscratch1, Address(sp, st_off));
6096 }
6097 }
6098 }
6099
6100 // Update register states
6101 reg_state[from->value()] = reg_writable;
6102 reg_state[to->value()] = reg_written;
6103 return true;
6104 }
6105
6106 // Read all fields from a value type oop and store the values in registers/stack slots
6107 bool MacroAssembler::unpack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, VMReg from, VMRegPair* regs_to,
6108 int& to_index, RegState reg_state[], int ret_off, int extra_stack_offset) {
6109 Register fromReg = from->is_reg() ? from->as_Register() : noreg;
6110 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6111
6112
6113 int vt = 1;
6114 bool done = true;
6115 bool mark_done = true;
6116 do {
6117 sig_index--;
6118 BasicType bt = sig->at(sig_index)._bt;
6119 if (bt == T_VALUETYPE) {
6120 vt--;
6121 } else if (bt == T_VOID &&
6122 sig->at(sig_index-1)._bt != T_LONG &&
6123 sig->at(sig_index-1)._bt != T_DOUBLE) {
6124 vt++;
6125 } else if (SigEntry::is_reserved_entry(sig, sig_index)) {
6126 to_index--; // Ignore this
6127 } else {
6128 assert(to_index >= 0, "invalid to_index");
6129 VMRegPair pair_to = regs_to[to_index--];
6130 VMReg to = pair_to.first();
6131
6132 if (bt == T_VOID) continue;
6133
6134 int idx = (int) to->value();
6135 if (reg_state[idx] == reg_readonly) {
6136 if (idx != from->value()) {
6137 mark_done = false;
6138 }
6139 done = false;
6140 continue;
6141 } else if (reg_state[idx] == reg_written) {
6142 continue;
6143 } else {
6144 assert(reg_state[idx] == reg_writable, "must be writable");
6145 reg_state[idx] = reg_written;
6146 }
6147
6148 if (fromReg == noreg) {
6149 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6150 ldr(rscratch2, Address(sp, st_off));
6151 fromReg = rscratch2;
6152 }
6153
6154 int off = sig->at(sig_index)._offset;
6155 assert(off > 0, "offset in object should be positive");
6156 bool is_oop = (bt == T_OBJECT || bt == T_ARRAY);
6157
6158 Address fromAddr = Address(fromReg, off);
6159 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
6160
6161 if (!to->is_FloatRegister()) {
6162
6163 Register dst = to->is_stack() ? rscratch1 : to->as_Register();
6164
6165 if (is_oop) {
6166 load_heap_oop(dst, fromAddr);
6167 } else {
6168 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
6169 }
6170 if (to->is_stack()) {
6171 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6172 str(dst, Address(sp, st_off));
6173 }
6174 } else {
6175 if (bt == T_DOUBLE) {
6176 ldrd(to->as_FloatRegister(), fromAddr);
6177 } else {
6178 assert(bt == T_FLOAT, "must be float");
6179 ldrs(to->as_FloatRegister(), fromAddr);
6180 }
6181 }
6182
6183 }
6184
6185 } while (vt != 0);
6186
6187 if (mark_done && reg_state[from->value()] != reg_written) {
6188 // This is okay because no one else will write to that slot
6189 reg_state[from->value()] = reg_writable;
6190 }
6191 return done;
6192 }
6193
6194 // Pack fields back into a value type oop
6195 bool MacroAssembler::pack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
6196 VMReg to, VMRegPair* regs_from, int regs_from_count, int& from_index, RegState reg_state[],
6197 int ret_off, int extra_stack_offset) {
6198 assert(sig->at(sig_index)._bt == T_VALUETYPE, "should be at end delimiter");
6199 assert(to->is_valid(), "must be");
6200
6201 if (reg_state[to->value()] == reg_written) {
6202 skip_unpacked_fields(sig, sig_index, regs_from, regs_from_count, from_index);
6203 return true; // Already written
6204 }
6205
6206 Register val_array = r0;
6207 Register val_obj_tmp = r11;
6208 Register from_reg_tmp = r10;
6209 Register tmp1 = r14;
6210 Register tmp2 = r13;
6211 Register tmp3 = r1;
6212 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
6213
6214 if (reg_state[to->value()] == reg_readonly) {
6215 if (!is_reg_in_unpacked_fields(sig, sig_index, to, regs_from, regs_from_count, from_index)) {
6216 skip_unpacked_fields(sig, sig_index, regs_from, regs_from_count, from_index);
6217 return false; // Not yet writable
6218 }
6219 val_obj = val_obj_tmp;
6220 }
6221
6222 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_VALUETYPE);
6223 load_heap_oop(val_obj, Address(val_array, index));
6224
6225 ScalarizedValueArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
6226 VMRegPair from_pair;
6227 BasicType bt;
6228
6229 while (stream.next(from_pair, bt)) {
6230 int off = sig->at(stream.sig_cc_index())._offset;
6231 assert(off > 0, "offset in object should be positive");
6232 bool is_oop = (bt == T_OBJECT || bt == T_ARRAY);
6233 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
6234
6235 VMReg from_r1 = from_pair.first();
6236 VMReg from_r2 = from_pair.second();
6237
6238 // Pack the scalarized field into the value object.
6239 Address dst(val_obj, off);
6240
6241 if (!from_r1->is_FloatRegister()) {
6242 Register from_reg;
6243 if (from_r1->is_stack()) {
6244 from_reg = from_reg_tmp;
6245 int ld_off = from_r1->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6246 load_sized_value(from_reg, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
6247 } else {
6248 from_reg = from_r1->as_Register();
6249 }
6250
6251 if (is_oop) {
6252 DecoratorSet decorators = IN_HEAP | ACCESS_WRITE;
6253 store_heap_oop(dst, from_reg, tmp1, tmp2, tmp3, decorators);
6254 } else {
6255 store_sized_value(dst, from_reg, size_in_bytes);
6256 }
6257 } else {
6258 if (from_r2->is_valid()) {
6259 strd(from_r1->as_FloatRegister(), dst);
6260 } else {
6261 strs(from_r1->as_FloatRegister(), dst);
6262 }
6263 }
6264
6265 reg_state[from_r1->value()] = reg_writable;
6266 }
6267 sig_index = stream.sig_cc_index();
6268 from_index = stream.regs_cc_index();
6269
6270 assert(reg_state[to->value()] == reg_writable, "must have already been read");
6271 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state, ret_off, extra_stack_offset);
6272 assert(success, "to register must be writeable");
6273
6274 return true;
6275 }
6276
6277 // Unpack all value type arguments passed as oops
6278 void MacroAssembler::unpack_value_args(Compile* C, bool receiver_only) {
6279 int sp_inc = unpack_value_args_common(C, receiver_only);
6280 // Emit code for verified entry and save increment for stack repair on return
6281 verified_entry(C, sp_inc);
6282 }
6283
6284 int MacroAssembler::shuffle_value_args(bool is_packing, bool receiver_only, int extra_stack_offset,
6285 BasicType* sig_bt, const GrowableArray<SigEntry>* sig_cc,
6286 int args_passed, int args_on_stack, VMRegPair* regs, // from
6287 int args_passed_to, int args_on_stack_to, VMRegPair* regs_to) { // to
6288 // Check if we need to extend the stack for packing/unpacking
6289 int sp_inc = (args_on_stack_to - args_on_stack) * VMRegImpl::stack_slot_size;
6290 if (sp_inc > 0) {
6291 sp_inc = align_up(sp_inc, StackAlignmentInBytes);
6292 if (!is_packing) {
6293 // Save the return address, adjust the stack (make sure it is properly
6294 // 16-byte aligned) and copy the return address to the new top of the stack.
6295 // (Note: C1 does this in C1_MacroAssembler::scalarized_entry).
6296 // FIXME: We need not to preserve return address on aarch64
6297 pop(rscratch1);
6298 sub(sp, sp, sp_inc);
6299 push(rscratch1);
6300 }
6301 } else {
6302 // The scalarized calling convention needs less stack space than the unscalarized one.
6303 // No need to extend the stack, the caller will take care of these adjustments.
6304 sp_inc = 0;
6305 }
6306
6307 int ret_off; // make sure we don't overwrite the return address
6308 if (is_packing) {
6309 // For C1 code, the VVEP doesn't have reserved slots, so we store the returned address at
6310 // rsp[0] during shuffling.
6311 ret_off = 0;
6312 } else {
6313 // C2 code ensures that sp_inc is a reserved slot.
6314 ret_off = sp_inc;
6315 }
6316
6317 return shuffle_value_args_common(is_packing, receiver_only, extra_stack_offset,
6318 sig_bt, sig_cc,
6319 args_passed, args_on_stack, regs,
6320 args_passed_to, args_on_stack_to, regs_to,
6321 sp_inc, ret_off);
6322 }
6323
6324 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6325 return (reg->is_FloatRegister()) ? v0->as_VMReg() : r14->as_VMReg();
6326 }
|