src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File c1-coops Sdiff src/cpu/x86/vm

src/cpu/x86/vm/c1_MacroAssembler_x86.cpp

Print this page




 138 // Defines obj, preserves var_size_in_bytes
 139 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
 140   if (UseTLAB) {
 141     tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 142   } else {
 143     eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
 144   }
 145 }
 146 
 147 
 148 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
 149   assert_different_registers(obj, klass, len);
 150   if (UseBiasedLocking && !len->is_valid()) {
 151     assert_different_registers(obj, klass, len, t1, t2);
 152     movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
 153     movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
 154   } else {
 155     // This assumes that all prototype bits fit in an int32_t
 156     movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
 157   }
 158 







 159   movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);


 160   if (len->is_valid()) {
 161     movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
 162   }






 163 }
 164 
 165 
 166 // preserves obj, destroys len_in_bytes
 167 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
 168   Label done;
 169   assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
 170   assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
 171   Register index = len_in_bytes;
 172   // index is positive and ptr sized
 173   subptr(index, hdr_size_in_bytes);
 174   jcc(Assembler::zero, done);
 175   // initialize topmost word, divide index by 2, check if odd and test if zero
 176   // note: for the remaining code to work, index must be a multiple of BytesPerWord
 177 #ifdef ASSERT
 178   { Label L;
 179     testptr(index, BytesPerWord - 1);
 180     jcc(Assembler::zero, L);
 181     stop("index is not a multiple of BytesPerWord");
 182     bind(L);


 213   }
 214 
 215   // done
 216   bind(done);
 217 }
 218 
 219 
 220 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
 221   assert(obj == rax, "obj must be in rax, for cmpxchg");
 222   assert(obj != t1 && obj != t2 && t1 != t2, "registers must be different"); // XXX really?
 223   assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
 224 
 225   try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
 226 
 227   initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2);
 228 }
 229 
 230 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
 231   assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
 232          "con_size_in_bytes is not multiple of alignment");
 233   const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
 234 
 235   initialize_header(obj, klass, noreg, t1, t2);
 236 
 237   // clear rest of allocated space
 238   const Register t1_zero = t1;
 239   const Register index = t2;
 240   const int threshold = 6 * BytesPerWord;   // approximate break even point for code size (see comments below)
 241   if (var_size_in_bytes != noreg) {
 242     mov(index, var_size_in_bytes);
 243     initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
 244   } else if (con_size_in_bytes <= threshold) {
 245     // use explicit null stores
 246     // code size = 2 + 3*n bytes (n = number of fields to clear)
 247     xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
 248     for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
 249       movptr(Address(obj, i), t1_zero);
 250   } else if (con_size_in_bytes > hdr_size_in_bytes) {
 251     // use loop to null out the fields
 252     // code size = 16 bytes for even n (n = number of fields to clear)
 253     // initialize last object field first if odd number of fields


 300   // clear rest of allocated space
 301   const Register len_zero = len;
 302   initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
 303 
 304   if (CURRENT_ENV->dtrace_alloc_probes()) {
 305     assert(obj == rax, "must be");
 306     call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
 307   }
 308 
 309   verify_oop(obj);
 310 }
 311 
 312 
 313 
 314 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
 315   verify_oop(receiver);
 316   // explicit NULL check not needed since load from [klass_offset] causes a trap
 317   // check against inline cache
 318   assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
 319   int start_offset = offset();





 320   cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));

 321   // if icache check fails, then jump to runtime routine
 322   // Note: RECEIVER must still contain the receiver!
 323   jump_cc(Assembler::notEqual,
 324           RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 325   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
 326   assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
 327 }
 328 
 329 
 330 void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
 331   // Make sure there is enough stack space for this method's activation.
 332   // Note that we do this before doing an enter(). This matches the
 333   // ordering of C2's stack overflow check / rsp decrement and allows
 334   // the SharedRuntime stack overflow handling to be consistent
 335   // between the two compilers.
 336   generate_stack_overflow_check(frame_size_in_bytes);
 337 
 338   push(rbp);
 339 #ifdef TIERED
 340   // c2 leaves fpu stack dirty. Clean it on entry
 341   if (UseSSE < 2 ) {
 342     empty_FPU_stack();
 343   }
 344 #endif // TIERED
 345   decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0
 346 }




 138 // Defines obj, preserves var_size_in_bytes
 139 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
 140   if (UseTLAB) {
 141     tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
 142   } else {
 143     eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
 144   }
 145 }
 146 
 147 
 148 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
 149   assert_different_registers(obj, klass, len);
 150   if (UseBiasedLocking && !len->is_valid()) {
 151     assert_different_registers(obj, klass, len, t1, t2);
 152     movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
 153     movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
 154   } else {
 155     // This assumes that all prototype bits fit in an int32_t
 156     movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
 157   }
 158 #ifdef _LP64
 159   if (UseCompressedOops) { // Take care not to kill klass
 160     movptr(t1, klass);
 161     encode_heap_oop_not_null(t1);
 162     movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
 163   } else 
 164 #endif
 165   {
 166     movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
 167   }
 168 
 169   if (len->is_valid()) {
 170     movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
 171   }
 172 #ifdef _LP64 
 173   else if (UseCompressedOops) {
 174     xorptr(t1, t1);
 175     store_klass_gap(obj, t1);
 176   }
 177 #endif
 178 }
 179 
 180 
 181 // preserves obj, destroys len_in_bytes
 182 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
 183   Label done;
 184   assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
 185   assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
 186   Register index = len_in_bytes;
 187   // index is positive and ptr sized
 188   subptr(index, hdr_size_in_bytes);
 189   jcc(Assembler::zero, done);
 190   // initialize topmost word, divide index by 2, check if odd and test if zero
 191   // note: for the remaining code to work, index must be a multiple of BytesPerWord
 192 #ifdef ASSERT
 193   { Label L;
 194     testptr(index, BytesPerWord - 1);
 195     jcc(Assembler::zero, L);
 196     stop("index is not a multiple of BytesPerWord");
 197     bind(L);


 228   }
 229 
 230   // done
 231   bind(done);
 232 }
 233 
 234 
 235 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
 236   assert(obj == rax, "obj must be in rax, for cmpxchg");
 237   assert(obj != t1 && obj != t2 && t1 != t2, "registers must be different"); // XXX really?
 238   assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
 239 
 240   try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
 241 
 242   initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2);
 243 }
 244 
 245 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
 246   assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
 247          "con_size_in_bytes is not multiple of alignment");
 248   const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
 249 
 250   initialize_header(obj, klass, noreg, t1, t2);
 251 
 252   // clear rest of allocated space
 253   const Register t1_zero = t1;
 254   const Register index = t2;
 255   const int threshold = 6 * BytesPerWord;   // approximate break even point for code size (see comments below)
 256   if (var_size_in_bytes != noreg) {
 257     mov(index, var_size_in_bytes);
 258     initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
 259   } else if (con_size_in_bytes <= threshold) {
 260     // use explicit null stores
 261     // code size = 2 + 3*n bytes (n = number of fields to clear)
 262     xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
 263     for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
 264       movptr(Address(obj, i), t1_zero);
 265   } else if (con_size_in_bytes > hdr_size_in_bytes) {
 266     // use loop to null out the fields
 267     // code size = 16 bytes for even n (n = number of fields to clear)
 268     // initialize last object field first if odd number of fields


 315   // clear rest of allocated space
 316   const Register len_zero = len;
 317   initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
 318 
 319   if (CURRENT_ENV->dtrace_alloc_probes()) {
 320     assert(obj == rax, "must be");
 321     call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
 322   }
 323 
 324   verify_oop(obj);
 325 }
 326 
 327 
 328 
 329 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
 330   verify_oop(receiver);
 331   // explicit NULL check not needed since load from [klass_offset] causes a trap
 332   // check against inline cache
 333   assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
 334   int start_offset = offset();
 335 
 336   if (UseCompressedOops) {
 337     load_klass(rscratch1, receiver);
 338     cmpptr(rscratch1, iCache);
 339   } else {
 340     cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
 341   }
 342   // if icache check fails, then jump to runtime routine
 343   // Note: RECEIVER must still contain the receiver!
 344   jump_cc(Assembler::notEqual,
 345           RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 346   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
 347   assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
 348 }
 349 
 350 
 351 void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
 352   // Make sure there is enough stack space for this method's activation.
 353   // Note that we do this before doing an enter(). This matches the
 354   // ordering of C2's stack overflow check / rsp decrement and allows
 355   // the SharedRuntime stack overflow handling to be consistent
 356   // between the two compilers.
 357   generate_stack_overflow_check(frame_size_in_bytes);
 358 
 359   push(rbp);
 360 #ifdef TIERED
 361   // c2 leaves fpu stack dirty. Clean it on entry
 362   if (UseSSE < 2 ) {
 363     empty_FPU_stack();
 364   }
 365 #endif // TIERED
 366   decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0
 367 }


src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File