24
25 #include "precompiled.hpp"
26 #include "c1/c1_MacroAssembler.hpp"
27 #include "c1/c1_Runtime1.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "gc_interface/collectedHeap.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/markOop.hpp"
33 #include "runtime/basicLock.hpp"
34 #include "runtime/biasedLocking.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/stubRoutines.hpp"
37
38 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
39 Label L;
40 const Register temp_reg = G3_scratch;
41 // Note: needs more testing of out-of-line vs. inline slow case
42 verify_oop(receiver);
43 load_klass(receiver, temp_reg);
44 cmp(temp_reg, iCache);
45 brx(Assembler::equal, true, Assembler::pt, L);
46 delayed()->nop();
47 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
48 jump_to(ic_miss, temp_reg);
49 delayed()->nop();
50 align(CodeEntryAlignment);
51 bind(L);
52 }
53
54
55 void C1_MacroAssembler::explicit_null_check(Register base) {
56 Unimplemented();
57 }
58
59
60 void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
61
62 generate_stack_overflow_check(frame_size_in_bytes);
63 // Create the frame.
64 save_frame_c1(frame_size_in_bytes);
65 }
66
125 bind(done);
126 }
127
128
129 void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
130 assert_different_registers(Rmark, Roop, Rbox);
131
132 Label done;
133
134 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
135 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
136
137 if (UseBiasedLocking) {
138 // load the object out of the BasicObjectLock
139 ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop);
140 verify_oop(Roop);
141 biased_locking_exit(mark_addr, Rmark, done);
142 }
143 // Test first it it is a fast recursive unlock
144 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
145 br_null(Rmark, false, Assembler::pt, done);
146 delayed()->nop();
147 if (!UseBiasedLocking) {
148 // load object
149 ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop);
150 verify_oop(Roop);
151 }
152
153 // Check if it is still a light weight lock, this is is true if we see
154 // the stack address of the basicLock in the markOop of the object
155 casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
156 cmp(Rbox, Rmark);
157
158 brx(Assembler::notEqual, false, Assembler::pn, slow_case);
159 delayed()->nop();
160 // Done
161 bind(done);
162 }
163
164
165 void C1_MacroAssembler::try_allocate(
166 Register obj, // result: pointer to object after successful allocation
214 }
215
216
217 void C1_MacroAssembler::allocate_object(
218 Register obj, // result: pointer to object after successful allocation
219 Register t1, // temp register
220 Register t2, // temp register, must be a global register for try_allocate
221 Register t3, // temp register
222 int hdr_size, // object header size in words
223 int obj_size, // object size in words
224 Register klass, // object klass
225 Label& slow_case // continuation point if fast allocation fails
226 ) {
227 assert_different_registers(obj, t1, t2, t3, klass);
228 assert(klass == G5, "must be G5");
229
230 // allocate space & initialize header
231 if (!is_simm13(obj_size * wordSize)) {
232 // would need to use extra register to load
233 // object size => go the slow case for now
234 br(Assembler::always, false, Assembler::pt, slow_case);
235 delayed()->nop();
236 return;
237 }
238 try_allocate(obj, noreg, obj_size * wordSize, t2, t3, slow_case);
239
240 initialize_object(obj, klass, noreg, obj_size * HeapWordSize, t1, t2);
241 }
242
243 void C1_MacroAssembler::initialize_object(
244 Register obj, // result: pointer to object after successful allocation
245 Register klass, // object klass
246 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
247 int con_size_in_bytes, // object size in bytes if known at compile time
248 Register t1, // temp register
249 Register t2 // temp register
250 ) {
251 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
252
253 initialize_header(obj, klass, noreg, t1, t2);
254
255 #ifdef ASSERT
256 {
257 Label ok;
258 ld(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), t1);
259 if (var_size_in_bytes != noreg) {
260 cmp(t1, var_size_in_bytes);
261 } else {
262 cmp(t1, con_size_in_bytes);
263 }
264 brx(Assembler::equal, false, Assembler::pt, ok);
265 delayed()->nop();
266 stop("bad size in initialize_object");
267 should_not_reach_here();
268
269 bind(ok);
270 }
271
272 #endif
273
274 // initialize body
275 const int threshold = 5 * HeapWordSize; // approximate break even point for code size
276 if (var_size_in_bytes != noreg) {
277 // use a loop
278 add(obj, hdr_size_in_bytes, t1); // compute address of first element
279 sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
280 initialize_body(t1, t2);
281 #ifndef _LP64
282 } else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) {
283 // on v9 we can do double word stores to fill twice as much space.
284 assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
285 assert(con_size_in_bytes % 8 == 0, "double word aligned");
370 if (CURRENT_ENV->dtrace_alloc_probes()) {
371 assert(obj == O0, "must be");
372 call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
373 relocInfo::runtime_call_type);
374 delayed()->nop();
375 }
376
377 verify_oop(obj);
378 }
379
380
381 #ifndef PRODUCT
382
383 void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
384 if (!VerifyOops) return;
385 verify_oop_addr(Address(SP, stack_offset + STACK_BIAS));
386 }
387
388 void C1_MacroAssembler::verify_not_null_oop(Register r) {
389 Label not_null;
390 br_notnull(r, false, Assembler::pt, not_null);
391 delayed()->nop();
392 stop("non-null oop required");
393 bind(not_null);
394 if (!VerifyOops) return;
395 verify_oop(r);
396 }
397
398 void C1_MacroAssembler::invalidate_registers(bool iregisters, bool lregisters, bool oregisters,
399 Register preserve1, Register preserve2) {
400 if (iregisters) {
401 for (int i = 0; i < 6; i++) {
402 Register r = as_iRegister(i);
403 if (r != preserve1 && r != preserve2) set(0xdead, r);
404 }
405 }
406 if (oregisters) {
407 for (int i = 0; i < 6; i++) {
408 Register r = as_oRegister(i);
409 if (r != preserve1 && r != preserve2) set(0xdead, r);
410 }
411 }
|
24
25 #include "precompiled.hpp"
26 #include "c1/c1_MacroAssembler.hpp"
27 #include "c1/c1_Runtime1.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "gc_interface/collectedHeap.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/markOop.hpp"
33 #include "runtime/basicLock.hpp"
34 #include "runtime/biasedLocking.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/stubRoutines.hpp"
37
38 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
39 Label L;
40 const Register temp_reg = G3_scratch;
41 // Note: needs more testing of out-of-line vs. inline slow case
42 verify_oop(receiver);
43 load_klass(receiver, temp_reg);
44 cmp_and_brx_short(temp_reg, iCache, Assembler::equal, Assembler::pt, L);
45 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
46 jump_to(ic_miss, temp_reg);
47 delayed()->nop();
48 align(CodeEntryAlignment);
49 bind(L);
50 }
51
52
53 void C1_MacroAssembler::explicit_null_check(Register base) {
54 Unimplemented();
55 }
56
57
58 void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
59
60 generate_stack_overflow_check(frame_size_in_bytes);
61 // Create the frame.
62 save_frame_c1(frame_size_in_bytes);
63 }
64
123 bind(done);
124 }
125
126
127 void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
128 assert_different_registers(Rmark, Roop, Rbox);
129
130 Label done;
131
132 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
133 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
134
135 if (UseBiasedLocking) {
136 // load the object out of the BasicObjectLock
137 ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop);
138 verify_oop(Roop);
139 biased_locking_exit(mark_addr, Rmark, done);
140 }
141 // Test first it it is a fast recursive unlock
142 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
143 br_null_short(Rmark, Assembler::pt, done);
144 if (!UseBiasedLocking) {
145 // load object
146 ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop);
147 verify_oop(Roop);
148 }
149
150 // Check if it is still a light weight lock, this is is true if we see
151 // the stack address of the basicLock in the markOop of the object
152 casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
153 cmp(Rbox, Rmark);
154
155 brx(Assembler::notEqual, false, Assembler::pn, slow_case);
156 delayed()->nop();
157 // Done
158 bind(done);
159 }
160
161
162 void C1_MacroAssembler::try_allocate(
163 Register obj, // result: pointer to object after successful allocation
211 }
212
213
214 void C1_MacroAssembler::allocate_object(
215 Register obj, // result: pointer to object after successful allocation
216 Register t1, // temp register
217 Register t2, // temp register, must be a global register for try_allocate
218 Register t3, // temp register
219 int hdr_size, // object header size in words
220 int obj_size, // object size in words
221 Register klass, // object klass
222 Label& slow_case // continuation point if fast allocation fails
223 ) {
224 assert_different_registers(obj, t1, t2, t3, klass);
225 assert(klass == G5, "must be G5");
226
227 // allocate space & initialize header
228 if (!is_simm13(obj_size * wordSize)) {
229 // would need to use extra register to load
230 // object size => go the slow case for now
231 ba(slow_case);
232 delayed()->nop();
233 return;
234 }
235 try_allocate(obj, noreg, obj_size * wordSize, t2, t3, slow_case);
236
237 initialize_object(obj, klass, noreg, obj_size * HeapWordSize, t1, t2);
238 }
239
240 void C1_MacroAssembler::initialize_object(
241 Register obj, // result: pointer to object after successful allocation
242 Register klass, // object klass
243 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
244 int con_size_in_bytes, // object size in bytes if known at compile time
245 Register t1, // temp register
246 Register t2 // temp register
247 ) {
248 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
249
250 initialize_header(obj, klass, noreg, t1, t2);
251
252 #ifdef ASSERT
253 {
254 Label ok;
255 ld(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), t1);
256 if (var_size_in_bytes != noreg) {
257 cmp_and_brx_short(t1, var_size_in_bytes, Assembler::equal, Assembler::pt, ok);
258 } else {
259 cmp_and_brx_short(t1, con_size_in_bytes, Assembler::equal, Assembler::pt, ok);
260 }
261 stop("bad size in initialize_object");
262 should_not_reach_here();
263
264 bind(ok);
265 }
266
267 #endif
268
269 // initialize body
270 const int threshold = 5 * HeapWordSize; // approximate break even point for code size
271 if (var_size_in_bytes != noreg) {
272 // use a loop
273 add(obj, hdr_size_in_bytes, t1); // compute address of first element
274 sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
275 initialize_body(t1, t2);
276 #ifndef _LP64
277 } else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) {
278 // on v9 we can do double word stores to fill twice as much space.
279 assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
280 assert(con_size_in_bytes % 8 == 0, "double word aligned");
365 if (CURRENT_ENV->dtrace_alloc_probes()) {
366 assert(obj == O0, "must be");
367 call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
368 relocInfo::runtime_call_type);
369 delayed()->nop();
370 }
371
372 verify_oop(obj);
373 }
374
375
376 #ifndef PRODUCT
377
378 void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
379 if (!VerifyOops) return;
380 verify_oop_addr(Address(SP, stack_offset + STACK_BIAS));
381 }
382
383 void C1_MacroAssembler::verify_not_null_oop(Register r) {
384 Label not_null;
385 br_notnull_short(r, Assembler::pt, not_null);
386 stop("non-null oop required");
387 bind(not_null);
388 if (!VerifyOops) return;
389 verify_oop(r);
390 }
391
392 void C1_MacroAssembler::invalidate_registers(bool iregisters, bool lregisters, bool oregisters,
393 Register preserve1, Register preserve2) {
394 if (iregisters) {
395 for (int i = 0; i < 6; i++) {
396 Register r = as_iRegister(i);
397 if (r != preserve1 && r != preserve2) set(0xdead, r);
398 }
399 }
400 if (oregisters) {
401 for (int i = 0; i < 6; i++) {
402 Register r = as_oRegister(i);
403 if (r != preserve1 && r != preserve2) set(0xdead, r);
404 }
405 }
|