1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_MacroAssembler.hpp" 27 #include "c1/c1_Runtime1.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/markOop.hpp" 35 #include "runtime/basicLock.hpp" 36 #include "runtime/biasedLocking.hpp" 37 #include "runtime/os.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubRoutines.hpp" 40 41 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case, bool check_always_locked) { 42 const int aligned_mask = BytesPerWord -1; 43 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 44 assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); 45 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); 46 Label done; 47 int null_check_offset = -1; 48 49 verify_oop(obj); 50 51 // save object being locked into the BasicObjectLock 52 movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); 53 54 if (UseBiasedLocking) { 55 assert(scratch != noreg, "should have scratch register at this point"); 56 null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case); 57 } else { 58 null_check_offset = offset(); 59 } 60 61 // Load object header 62 movptr(hdr, Address(obj, hdr_offset)); 63 if (check_always_locked) { 64 testl(hdr, markOopDesc::always_locked_pattern); 65 jcc(Assembler::notZero, slow_case); 66 } 67 // and mark it as unlocked 68 orptr(hdr, markOopDesc::unlocked_value); 69 // save unlocked object header into the displaced header location on the stack 70 movptr(Address(disp_hdr, 0), hdr); 71 // test if object header is still the same (i.e. unlocked), and if so, store the 72 // displaced header address in the object header - if it is not the same, get the 73 // object header instead 74 MacroAssembler::lock(); // must be immediately before cmpxchg! 75 cmpxchgptr(disp_hdr, Address(obj, hdr_offset)); 76 // if the object header was the same, we're done 77 if (PrintBiasedLockingStatistics) { 78 cond_inc32(Assembler::equal, 79 ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr())); 80 } 81 jcc(Assembler::equal, done); 82 // if the object header was not the same, it is now in the hdr register 83 // => test if it is a stack pointer into the same stack (recursive locking), i.e.: 84 // 85 // 1) (hdr & aligned_mask) == 0 86 // 2) rsp <= hdr 87 // 3) hdr <= rsp + page_size 88 // 89 // these 3 tests can be done by evaluating the following expression: 90 // 91 // (hdr - rsp) & (aligned_mask - page_size) 92 // 93 // assuming both the stack pointer and page_size have their least 94 // significant 2 bits cleared and page_size is a power of 2 95 subptr(hdr, rsp); 96 andptr(hdr, aligned_mask - os::vm_page_size()); 97 // for recursive locking, the result is zero => save it in the displaced header 98 // location (NULL in the displaced hdr location indicates recursive locking) 99 movptr(Address(disp_hdr, 0), hdr); 100 // otherwise we don't care about the result and handle locking via runtime call 101 jcc(Assembler::notZero, slow_case); 102 // done 103 bind(done); 104 return null_check_offset; 105 } 106 107 108 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { 109 const int aligned_mask = BytesPerWord -1; 110 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 111 assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); 112 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); 113 Label done; 114 115 if (UseBiasedLocking) { 116 // load object 117 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); 118 biased_locking_exit(obj, hdr, done); 119 } 120 121 // load displaced header 122 movptr(hdr, Address(disp_hdr, 0)); 123 // if the loaded hdr is NULL we had recursive locking 124 testptr(hdr, hdr); 125 // if we had recursive locking, we are done 126 jcc(Assembler::zero, done); 127 if (!UseBiasedLocking) { 128 // load object 129 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); 130 } 131 verify_oop(obj); 132 // test if object header is pointing to the displaced header, and if so, restore 133 // the displaced header in the object - if the object header is not pointing to 134 // the displaced header, get the object header instead 135 MacroAssembler::lock(); // must be immediately before cmpxchg! 136 cmpxchgptr(hdr, Address(obj, hdr_offset)); 137 // if the object header was not pointing to the displaced header, 138 // we do unlocking via runtime call 139 jcc(Assembler::notEqual, slow_case); 140 // done 141 bind(done); 142 } 143 144 145 // Defines obj, preserves var_size_in_bytes 146 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) { 147 if (UseTLAB) { 148 tlab_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 149 } else { 150 eden_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); 151 } 152 } 153 154 155 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { 156 assert_different_registers(obj, klass, len); 157 if (UseBiasedLocking && !len->is_valid()) { 158 assert_different_registers(obj, klass, len, t1, t2); 159 movptr(t1, Address(klass, Klass::prototype_header_offset())); 160 movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); 161 } else { 162 // This assumes that all prototype bits fit in an int32_t 163 movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); 164 } 165 #ifdef _LP64 166 if (UseCompressedClassPointers) { // Take care not to kill klass 167 movptr(t1, klass); 168 encode_klass_not_null(t1); 169 movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); 170 } else 171 #endif 172 { 173 movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 174 } 175 176 if (len->is_valid()) { 177 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); 178 } 179 #ifdef _LP64 180 else if (UseCompressedClassPointers) { 181 xorptr(t1, t1); 182 store_klass_gap(obj, t1); 183 } 184 #endif 185 } 186 187 188 // preserves obj, destroys len_in_bytes 189 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) { 190 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0"); 191 Label done; 192 193 // len_in_bytes is positive and ptr sized 194 subptr(len_in_bytes, hdr_size_in_bytes); 195 jcc(Assembler::zero, done); 196 zero_memory(obj, len_in_bytes, hdr_size_in_bytes, t1); 197 bind(done); 198 } 199 200 201 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) { 202 assert(obj == rax, "obj must be in rax, for cmpxchg"); 203 assert_different_registers(obj, t1, t2); // XXX really? 204 assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); 205 206 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case); 207 208 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB); 209 } 210 211 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) { 212 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, 213 "con_size_in_bytes is not multiple of alignment"); 214 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 215 216 initialize_header(obj, klass, noreg, t1, t2); 217 218 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { 219 // clear rest of allocated space 220 const Register t1_zero = t1; 221 const Register index = t2; 222 const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below) 223 if (var_size_in_bytes != noreg) { 224 mov(index, var_size_in_bytes); 225 initialize_body(obj, index, hdr_size_in_bytes, t1_zero); 226 } else if (con_size_in_bytes <= threshold) { 227 // use explicit null stores 228 // code size = 2 + 3*n bytes (n = number of fields to clear) 229 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) 230 for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) 231 movptr(Address(obj, i), t1_zero); 232 } else if (con_size_in_bytes > hdr_size_in_bytes) { 233 // use loop to null out the fields 234 // code size = 16 bytes for even n (n = number of fields to clear) 235 // initialize last object field first if odd number of fields 236 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) 237 movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); 238 // initialize last object field if constant size is odd 239 if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) 240 movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); 241 // initialize remaining object fields: rdx is a multiple of 2 242 { Label loop; 243 bind(loop); 244 movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)), 245 t1_zero); 246 NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)), 247 t1_zero);) 248 decrement(index); 249 jcc(Assembler::notZero, loop); 250 } 251 } 252 } 253 254 if (CURRENT_ENV->dtrace_alloc_probes()) { 255 assert(obj == rax, "must be"); 256 call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); 257 } 258 259 verify_oop(obj); 260 } 261 262 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) { 263 assert(obj == rax, "obj must be in rax, for cmpxchg"); 264 assert_different_registers(obj, len, t1, t2, klass); 265 266 // determine alignment mask 267 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work"); 268 269 // check for negative or excessive length 270 cmpptr(len, (int32_t)max_array_allocation_length); 271 jcc(Assembler::above, slow_case); 272 273 const Register arr_size = t2; // okay to be the same 274 // align object end 275 movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask); 276 lea(arr_size, Address(arr_size, len, f)); 277 andptr(arr_size, ~MinObjAlignmentInBytesMask); 278 279 try_allocate(obj, arr_size, 0, t1, t2, slow_case); 280 281 initialize_header(obj, klass, len, t1, t2); 282 283 // clear rest of allocated space 284 const Register len_zero = len; 285 initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); 286 287 if (CURRENT_ENV->dtrace_alloc_probes()) { 288 assert(obj == rax, "must be"); 289 call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); 290 } 291 292 verify_oop(obj); 293 } 294 295 296 297 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { 298 verify_oop(receiver); 299 // explicit NULL check not needed since load from [klass_offset] causes a trap 300 // check against inline cache 301 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); 302 int start_offset = offset(); 303 304 if (UseCompressedClassPointers) { 305 load_klass(rscratch1, receiver); 306 cmpptr(rscratch1, iCache); 307 } else { 308 cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 309 } 310 // if icache check fails, then jump to runtime routine 311 // Note: RECEIVER must still contain the receiver! 312 jump_cc(Assembler::notEqual, 313 RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 314 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 315 assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 316 } 317 318 319 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) { 320 assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect"); 321 // Make sure there is enough stack space for this method's activation. 322 // Note that we do this before doing an enter(). This matches the 323 // ordering of C2's stack overflow check / rsp decrement and allows 324 // the SharedRuntime stack overflow handling to be consistent 325 // between the two compilers. 326 generate_stack_overflow_check(bang_size_in_bytes); 327 328 push(rbp); 329 if (PreserveFramePointer) { 330 mov(rbp, rsp); 331 } 332 #ifdef TIERED 333 // c2 leaves fpu stack dirty. Clean it on entry 334 if (UseSSE < 2 ) { 335 empty_FPU_stack(); 336 } 337 #endif // TIERED 338 decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0 339 340 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 341 bs->nmethod_entry_barrier(this); 342 } 343 344 345 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { 346 increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0 347 pop(rbp); 348 } 349 350 351 void C1_MacroAssembler::verified_entry() { 352 if (C1Breakpoint || VerifyFPU || !UseStackBanging) { 353 // Verified Entry first instruction should be 5 bytes long for correct 354 // patching by patch_verified_entry(). 355 // 356 // C1Breakpoint and VerifyFPU have one byte first instruction. 357 // Also first instruction will be one byte "push(rbp)" if stack banging 358 // code is not generated (see build_frame() above). 359 // For all these cases generate long instruction first. 360 fat_nop(); 361 } 362 if (C1Breakpoint)int3(); 363 // build frame 364 verify_FPU(0, "method_entry"); 365 } 366 367 void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) { 368 // rbp, + 0: link 369 // + 1: return address 370 // + 2: argument with offset 0 371 // + 3: argument with offset 1 372 // + 4: ... 373 374 movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); 375 } 376 377 #ifndef PRODUCT 378 379 void C1_MacroAssembler::verify_stack_oop(int stack_offset) { 380 if (!VerifyOops) return; 381 verify_oop_addr(Address(rsp, stack_offset)); 382 } 383 384 void C1_MacroAssembler::verify_not_null_oop(Register r) { 385 if (!VerifyOops) return; 386 Label not_null; 387 testptr(r, r); 388 jcc(Assembler::notZero, not_null); 389 stop("non-null oop required"); 390 bind(not_null); 391 verify_oop(r); 392 } 393 394 void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { 395 #ifdef ASSERT 396 if (inv_rax) movptr(rax, 0xDEAD); 397 if (inv_rbx) movptr(rbx, 0xDEAD); 398 if (inv_rcx) movptr(rcx, 0xDEAD); 399 if (inv_rdx) movptr(rdx, 0xDEAD); 400 if (inv_rsi) movptr(rsi, 0xDEAD); 401 if (inv_rdi) movptr(rdi, 0xDEAD); 402 #endif 403 } 404 405 #endif // ifndef PRODUCT