1 /* 2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_MacroAssembler.hpp" 27 #include "c1/c1_Runtime1.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "oops/arrayOop.hpp" 32 #include "oops/markOop.hpp" 33 #include "runtime/basicLock.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/os.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 39 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { 40 const int aligned_mask = BytesPerWord -1; 41 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 42 assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); 43 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); 44 Label done; 45 int null_check_offset = -1; 46 47 verify_oop(obj); 48 49 // save object being locked into the BasicObjectLock 50 movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); 51 52 if (UseBiasedLocking) { 53 assert(scratch != noreg, "should have scratch register at this point"); 54 null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case); 55 } else { 56 null_check_offset = offset(); 57 } 58 59 // Load object header 60 movptr(hdr, Address(obj, hdr_offset)); 61 // and mark it as unlocked 62 orptr(hdr, markOopDesc::unlocked_value); 63 // save unlocked object header into the displaced header location on the stack 64 movptr(Address(disp_hdr, 0), hdr); 65 // test if object header is still the same (i.e. unlocked), and if so, store the 66 // displaced header address in the object header - if it is not the same, get the 67 // object header instead 68 if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! 69 cmpxchgptr(disp_hdr, Address(obj, hdr_offset)); 70 // if the object header was the same, we're done 71 if (PrintBiasedLockingStatistics) { 72 cond_inc32(Assembler::equal, 73 ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr())); 74 } 75 jcc(Assembler::equal, done); 76 // if the object header was not the same, it is now in the hdr register 77 // => test if it is a stack pointer into the same stack (recursive locking), i.e.: 78 // 79 // 1) (hdr & aligned_mask) == 0 80 // 2) rsp <= hdr 81 // 3) hdr <= rsp + page_size 82 // 83 // these 3 tests can be done by evaluating the following expression: 84 // 85 // (hdr - rsp) & (aligned_mask - page_size) 86 // 87 // assuming both the stack pointer and page_size have their least 88 // significant 2 bits cleared and page_size is a power of 2 89 subptr(hdr, rsp); 90 andptr(hdr, aligned_mask - os::vm_page_size()); 91 // for recursive locking, the result is zero => save it in the displaced header 92 // location (NULL in the displaced hdr location indicates recursive locking) 93 movptr(Address(disp_hdr, 0), hdr); 94 // otherwise we don't care about the result and handle locking via runtime call 95 jcc(Assembler::notZero, slow_case); 96 // done 97 bind(done); 98 return null_check_offset; 99 } 100 101 102 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { 103 const int aligned_mask = BytesPerWord -1; 104 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 105 assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); 106 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); 107 Label done; 108 109 if (UseBiasedLocking) { 110 // load object 111 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); 112 biased_locking_exit(obj, hdr, done); 113 } 114 115 // load displaced header 116 movptr(hdr, Address(disp_hdr, 0)); 117 // if the loaded hdr is NULL we had recursive locking 118 testptr(hdr, hdr); 119 // if we had recursive locking, we are done 120 jcc(Assembler::zero, done); 121 if (!UseBiasedLocking) { 122 // load object 123 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); 124 } 125 verify_oop(obj); 126 // test if object header is pointing to the displaced header, and if so, restore 127 // the displaced header in the object - if the object header is not pointing to 128 // the displaced header, get the object header instead 129 if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! 130 cmpxchgptr(hdr, Address(obj, hdr_offset)); 131 // if the object header was not pointing to the displaced header, 132 // we do unlocking via runtime call 133 jcc(Assembler::notEqual, slow_case); 134 // done 135 bind(done); 136 } 137 138 139 // Defines obj, preserves var_size_in_bytes 140 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) { 141 if (UseTLAB) { 142 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 143 } else { 144 eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); 145 incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1); 146 } 147 } 148 149 150 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { 151 assert_different_registers(obj, klass, len); 152 if (UseBiasedLocking && !len->is_valid()) { 153 assert_different_registers(obj, klass, len, t1, t2); 154 movptr(t1, Address(klass, Klass::prototype_header_offset())); 155 movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); 156 } else { 157 // This assumes that all prototype bits fit in an int32_t 158 movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); 159 } 160 #ifdef _LP64 161 if (UseCompressedClassPointers) { // Take care not to kill klass 162 movptr(t1, klass); 163 encode_klass_not_null(t1); 164 movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); 165 } else 166 #endif 167 { 168 movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 169 } 170 171 if (len->is_valid()) { 172 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); 173 } 174 #ifdef _LP64 175 else if (UseCompressedClassPointers) { 176 xorptr(t1, t1); 177 store_klass_gap(obj, t1); 178 } 179 #endif 180 } 181 182 183 // preserves obj, destroys len_in_bytes 184 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) { 185 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0"); 186 Label done; 187 188 // len_in_bytes is positive and ptr sized 189 subptr(len_in_bytes, hdr_size_in_bytes); 190 jcc(Assembler::zero, done); 191 zero_memory(obj, len_in_bytes, hdr_size_in_bytes, t1); 192 bind(done); 193 } 194 195 196 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) { 197 assert(obj == rax, "obj must be in rax, for cmpxchg"); 198 assert_different_registers(obj, t1, t2); // XXX really? 199 assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); 200 201 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case); 202 203 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB); 204 } 205 206 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) { 207 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, 208 "con_size_in_bytes is not multiple of alignment"); 209 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 210 211 initialize_header(obj, klass, noreg, t1, t2); 212 213 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { 214 // clear rest of allocated space 215 const Register t1_zero = t1; 216 const Register index = t2; 217 const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below) 218 if (var_size_in_bytes != noreg) { 219 mov(index, var_size_in_bytes); 220 initialize_body(obj, index, hdr_size_in_bytes, t1_zero); 221 } else if (con_size_in_bytes <= threshold) { 222 // use explicit null stores 223 // code size = 2 + 3*n bytes (n = number of fields to clear) 224 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) 225 for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) 226 movptr(Address(obj, i), t1_zero); 227 } else if (con_size_in_bytes > hdr_size_in_bytes) { 228 // use loop to null out the fields 229 // code size = 16 bytes for even n (n = number of fields to clear) 230 // initialize last object field first if odd number of fields 231 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) 232 movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); 233 // initialize last object field if constant size is odd 234 if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) 235 movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); 236 // initialize remaining object fields: rdx is a multiple of 2 237 { Label loop; 238 bind(loop); 239 movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)), 240 t1_zero); 241 NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)), 242 t1_zero);) 243 decrement(index); 244 jcc(Assembler::notZero, loop); 245 } 246 } 247 } 248 249 if (CURRENT_ENV->dtrace_alloc_probes()) { 250 assert(obj == rax, "must be"); 251 call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); 252 } 253 254 verify_oop(obj); 255 } 256 257 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) { 258 assert(obj == rax, "obj must be in rax, for cmpxchg"); 259 assert_different_registers(obj, len, t1, t2, klass); 260 261 // determine alignment mask 262 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work"); 263 264 // check for negative or excessive length 265 cmpptr(len, (int32_t)max_array_allocation_length); 266 jcc(Assembler::above, slow_case); 267 268 const Register arr_size = t2; // okay to be the same 269 // align object end 270 movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask); 271 lea(arr_size, Address(arr_size, len, f)); 272 andptr(arr_size, ~MinObjAlignmentInBytesMask); 273 274 try_allocate(obj, arr_size, 0, t1, t2, slow_case); 275 276 initialize_header(obj, klass, len, t1, t2); 277 278 // clear rest of allocated space 279 const Register len_zero = len; 280 initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); 281 282 if (CURRENT_ENV->dtrace_alloc_probes()) { 283 assert(obj == rax, "must be"); 284 call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); 285 } 286 287 verify_oop(obj); 288 } 289 290 291 292 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { 293 verify_oop(receiver); 294 // explicit NULL check not needed since load from [klass_offset] causes a trap 295 // check against inline cache 296 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); 297 int start_offset = offset(); 298 299 if (UseCompressedClassPointers) { 300 load_klass(rscratch1, receiver); 301 cmpptr(rscratch1, iCache); 302 } else { 303 cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 304 } 305 // if icache check fails, then jump to runtime routine 306 // Note: RECEIVER must still contain the receiver! 307 jump_cc(Assembler::notEqual, 308 RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 309 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 310 assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 311 } 312 313 314 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) { 315 assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect"); 316 // Make sure there is enough stack space for this method's activation. 317 // Note that we do this before doing an enter(). This matches the 318 // ordering of C2's stack overflow check / rsp decrement and allows 319 // the SharedRuntime stack overflow handling to be consistent 320 // between the two compilers. 321 generate_stack_overflow_check(bang_size_in_bytes); 322 323 push(rbp); 324 if (PreserveFramePointer) { 325 mov(rbp, rsp); 326 } 327 #ifdef TIERED 328 // c2 leaves fpu stack dirty. Clean it on entry 329 if (UseSSE < 2 ) { 330 empty_FPU_stack(); 331 } 332 #endif // TIERED 333 decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0 334 } 335 336 337 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { 338 increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0 339 pop(rbp); 340 } 341 342 343 void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) { 344 if (C1Breakpoint) int3(); 345 inline_cache_check(receiver, ic_klass); 346 } 347 348 349 void C1_MacroAssembler::verified_entry() { 350 if (C1Breakpoint || VerifyFPU || !UseStackBanging) { 351 // Verified Entry first instruction should be 5 bytes long for correct 352 // patching by patch_verified_entry(). 353 // 354 // C1Breakpoint and VerifyFPU have one byte first instruction. 355 // Also first instruction will be one byte "push(rbp)" if stack banging 356 // code is not generated (see build_frame() above). 357 // For all these cases generate long instruction first. 358 fat_nop(); 359 } 360 if (C1Breakpoint)int3(); 361 // build frame 362 verify_FPU(0, "method_entry"); 363 } 364 365 366 #ifndef PRODUCT 367 368 void C1_MacroAssembler::verify_stack_oop(int stack_offset) { 369 if (!VerifyOops) return; 370 verify_oop_addr(Address(rsp, stack_offset)); 371 } 372 373 void C1_MacroAssembler::verify_not_null_oop(Register r) { 374 if (!VerifyOops) return; 375 Label not_null; 376 testptr(r, r); 377 jcc(Assembler::notZero, not_null); 378 stop("non-null oop required"); 379 bind(not_null); 380 verify_oop(r); 381 } 382 383 void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { 384 #ifdef ASSERT 385 if (inv_rax) movptr(rax, 0xDEAD); 386 if (inv_rbx) movptr(rbx, 0xDEAD); 387 if (inv_rcx) movptr(rcx, 0xDEAD); 388 if (inv_rdx) movptr(rdx, 0xDEAD); 389 if (inv_rsi) movptr(rsi, 0xDEAD); 390 if (inv_rdi) movptr(rdi, 0xDEAD); 391 #endif 392 } 393 394 #endif // ifndef PRODUCT