1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_MacroAssembler.hpp" 27 #include "c1/c1_Runtime1.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/markOop.hpp" 35 #include "runtime/basicLock.hpp" 36 #include "runtime/biasedLocking.hpp" 37 #include "runtime/os.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubRoutines.hpp" 40 41 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { 42 const int aligned_mask = BytesPerWord -1; 43 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 44 assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); 45 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); 46 Label done; 47 int null_check_offset = -1; 48 49 verify_oop(obj); 50 51 // save object being locked into the BasicObjectLock 52 movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); 53 54 if (UseBiasedLocking) { 55 assert(scratch != noreg, "should have scratch register at this point"); 56 null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case); 57 } else { 58 null_check_offset = offset(); 59 } 60 61 // Load object header 62 movptr(hdr, Address(obj, hdr_offset)); 63 // and mark it as unlocked 64 orptr(hdr, markOopDesc::unlocked_value); 65 // save unlocked object header into the displaced header location on the stack 66 movptr(Address(disp_hdr, 0), hdr); 67 // test if object header is still the same (i.e. unlocked), and if so, store the 68 // displaced header address in the object header - if it is not the same, get the 69 // object header instead 70 MacroAssembler::lock(); // must be immediately before cmpxchg! 71 cmpxchgptr(disp_hdr, Address(obj, hdr_offset)); 72 // if the object header was the same, we're done 73 if (PrintBiasedLockingStatistics) { 74 cond_inc32(Assembler::equal, 75 ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr())); 76 } 77 jcc(Assembler::equal, done); 78 // if the object header was not the same, it is now in the hdr register 79 // => test if it is a stack pointer into the same stack (recursive locking), i.e.: 80 // 81 // 1) (hdr & aligned_mask) == 0 82 // 2) rsp <= hdr 83 // 3) hdr <= rsp + page_size 84 // 85 // these 3 tests can be done by evaluating the following expression: 86 // 87 // (hdr - rsp) & (aligned_mask - page_size) 88 // 89 // assuming both the stack pointer and page_size have their least 90 // significant 2 bits cleared and page_size is a power of 2 91 subptr(hdr, rsp); 92 andptr(hdr, aligned_mask - os::vm_page_size()); 93 // for recursive locking, the result is zero => save it in the displaced header 94 // location (NULL in the displaced hdr location indicates recursive locking) 95 movptr(Address(disp_hdr, 0), hdr); 96 // otherwise we don't care about the result and handle locking via runtime call 97 jcc(Assembler::notZero, slow_case); 98 // done 99 bind(done); 100 return null_check_offset; 101 } 102 103 104 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { 105 const int aligned_mask = BytesPerWord -1; 106 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 107 assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); 108 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); 109 Label done; 110 111 if (UseBiasedLocking) { 112 // load object 113 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); 114 biased_locking_exit(obj, hdr, done); 115 } 116 117 // load displaced header 118 movptr(hdr, Address(disp_hdr, 0)); 119 // if the loaded hdr is NULL we had recursive locking 120 testptr(hdr, hdr); 121 // if we had recursive locking, we are done 122 jcc(Assembler::zero, done); 123 if (!UseBiasedLocking) { 124 // load object 125 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); 126 } 127 verify_oop(obj); 128 // test if object header is pointing to the displaced header, and if so, restore 129 // the displaced header in the object - if the object header is not pointing to 130 // the displaced header, get the object header instead 131 MacroAssembler::lock(); // must be immediately before cmpxchg! 132 cmpxchgptr(hdr, Address(obj, hdr_offset)); 133 // if the object header was not pointing to the displaced header, 134 // we do unlocking via runtime call 135 jcc(Assembler::notEqual, slow_case); 136 // done 137 bind(done); 138 } 139 140 141 // Defines obj, preserves var_size_in_bytes 142 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) { 143 if (UseTLAB) { 144 tlab_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 145 } else { 146 eden_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); 147 } 148 } 149 150 151 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { 152 assert_different_registers(obj, klass, len); 153 if (UseBiasedLocking && !len->is_valid()) { 154 assert_different_registers(obj, klass, len, t1, t2); 155 movptr(t1, Address(klass, Klass::prototype_header_offset())); 156 movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); 157 } else { 158 // This assumes that all prototype bits fit in an int32_t 159 movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); 160 } 161 #ifdef _LP64 162 if (UseCompressedClassPointers) { // Take care not to kill klass 163 movptr(t1, klass); 164 encode_klass_not_null(t1); 165 movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); 166 } else 167 #endif 168 { 169 movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 170 } 171 172 if (len->is_valid()) { 173 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); 174 } 175 #ifdef _LP64 176 else if (UseCompressedClassPointers) { 177 xorptr(t1, t1); 178 store_klass_gap(obj, t1); 179 } 180 #endif 181 } 182 183 184 // preserves obj, destroys len_in_bytes 185 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) { 186 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0"); 187 Label done; 188 189 // len_in_bytes is positive and ptr sized 190 subptr(len_in_bytes, hdr_size_in_bytes); 191 jcc(Assembler::zero, done); 192 zero_memory(obj, len_in_bytes, hdr_size_in_bytes, t1); 193 bind(done); 194 } 195 196 197 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) { 198 assert(obj == rax, "obj must be in rax, for cmpxchg"); 199 assert_different_registers(obj, t1, t2); // XXX really? 200 assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); 201 202 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case); 203 204 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB); 205 } 206 207 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) { 208 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, 209 "con_size_in_bytes is not multiple of alignment"); 210 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 211 212 initialize_header(obj, klass, noreg, t1, t2); 213 214 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { 215 // clear rest of allocated space 216 const Register t1_zero = t1; 217 const Register index = t2; 218 const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below) 219 if (var_size_in_bytes != noreg) { 220 mov(index, var_size_in_bytes); 221 initialize_body(obj, index, hdr_size_in_bytes, t1_zero); 222 } else if (con_size_in_bytes <= threshold) { 223 // use explicit null stores 224 // code size = 2 + 3*n bytes (n = number of fields to clear) 225 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) 226 for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) 227 movptr(Address(obj, i), t1_zero); 228 } else if (con_size_in_bytes > hdr_size_in_bytes) { 229 // use loop to null out the fields 230 // code size = 16 bytes for even n (n = number of fields to clear) 231 // initialize last object field first if odd number of fields 232 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) 233 movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); 234 // initialize last object field if constant size is odd 235 if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) 236 movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); 237 // initialize remaining object fields: rdx is a multiple of 2 238 { Label loop; 239 bind(loop); 240 movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)), 241 t1_zero); 242 NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)), 243 t1_zero);) 244 decrement(index); 245 jcc(Assembler::notZero, loop); 246 } 247 } 248 } 249 250 if (CURRENT_ENV->dtrace_alloc_probes()) { 251 assert(obj == rax, "must be"); 252 call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); 253 } 254 255 verify_oop(obj); 256 } 257 258 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) { 259 assert(obj == rax, "obj must be in rax, for cmpxchg"); 260 assert_different_registers(obj, len, t1, t2, klass); 261 262 // determine alignment mask 263 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work"); 264 265 // check for negative or excessive length 266 cmpptr(len, (int32_t)max_array_allocation_length); 267 jcc(Assembler::above, slow_case); 268 269 const Register arr_size = t2; // okay to be the same 270 // align object end 271 movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask); 272 lea(arr_size, Address(arr_size, len, f)); 273 andptr(arr_size, ~MinObjAlignmentInBytesMask); 274 275 try_allocate(obj, arr_size, 0, t1, t2, slow_case); 276 277 initialize_header(obj, klass, len, t1, t2); 278 279 // clear rest of allocated space 280 const Register len_zero = len; 281 initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); 282 283 if (CURRENT_ENV->dtrace_alloc_probes()) { 284 assert(obj == rax, "must be"); 285 call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); 286 } 287 288 verify_oop(obj); 289 } 290 291 292 293 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { 294 verify_oop(receiver); 295 // explicit NULL check not needed since load from [klass_offset] causes a trap 296 // check against inline cache 297 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); 298 int start_offset = offset(); 299 300 if (UseCompressedClassPointers) { 301 load_klass(rscratch1, receiver); 302 cmpptr(rscratch1, iCache); 303 } else { 304 cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 305 } 306 // if icache check fails, then jump to runtime routine 307 // Note: RECEIVER must still contain the receiver! 308 jump_cc(Assembler::notEqual, 309 RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 310 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 311 assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 312 } 313 314 315 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) { 316 assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect"); 317 // Make sure there is enough stack space for this method's activation. 318 // Note that we do this before doing an enter(). This matches the 319 // ordering of C2's stack overflow check / rsp decrement and allows 320 // the SharedRuntime stack overflow handling to be consistent 321 // between the two compilers. 322 generate_stack_overflow_check(bang_size_in_bytes); 323 324 push(rbp); 325 if (PreserveFramePointer) { 326 mov(rbp, rsp); 327 } 328 #ifdef TIERED 329 // c2 leaves fpu stack dirty. Clean it on entry 330 if (UseSSE < 2 ) { 331 empty_FPU_stack(); 332 } 333 #endif // TIERED 334 decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0 335 336 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 337 bs->nmethod_entry_barrier(this); 338 } 339 340 341 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { 342 increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0 343 pop(rbp); 344 } 345 346 347 void C1_MacroAssembler::verified_entry() { 348 if (C1Breakpoint || VerifyFPU || !UseStackBanging) { 349 // Verified Entry first instruction should be 5 bytes long for correct 350 // patching by patch_verified_entry(). 351 // 352 // C1Breakpoint and VerifyFPU have one byte first instruction. 353 // Also first instruction will be one byte "push(rbp)" if stack banging 354 // code is not generated (see build_frame() above). 355 // For all these cases generate long instruction first. 356 fat_nop(); 357 } 358 if (C1Breakpoint)int3(); 359 // build frame 360 verify_FPU(0, "method_entry"); 361 } 362 363 void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) { 364 // rbp, + 0: link 365 // + 1: return address 366 // + 2: argument with offset 0 367 // + 3: argument with offset 1 368 // + 4: ... 369 370 movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); 371 } 372 373 #ifndef PRODUCT 374 375 void C1_MacroAssembler::verify_stack_oop(int stack_offset) { 376 if (!VerifyOops) return; 377 verify_oop_addr(Address(rsp, stack_offset)); 378 } 379 380 void C1_MacroAssembler::verify_not_null_oop(Register r) { 381 if (!VerifyOops) return; 382 Label not_null; 383 testptr(r, r); 384 jcc(Assembler::notZero, not_null); 385 stop("non-null oop required"); 386 bind(not_null); 387 verify_oop(r); 388 } 389 390 void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { 391 #ifdef ASSERT 392 if (inv_rax) movptr(rax, 0xDEAD); 393 if (inv_rbx) movptr(rbx, 0xDEAD); 394 if (inv_rcx) movptr(rcx, 0xDEAD); 395 if (inv_rdx) movptr(rdx, 0xDEAD); 396 if (inv_rsi) movptr(rsi, 0xDEAD); 397 if (inv_rdi) movptr(rdi, 0xDEAD); 398 #endif 399 } 400 401 #endif // ifndef PRODUCT