src/cpu/x86/vm/c1_Runtime1_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Dec  7 12:03:22 2011
--- new/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Dec  7 12:03:22 2011

*** 1009,1027 **** --- 1009,1027 ---- __ push(rdi); __ push(rbx); if (id == fast_new_instance_init_check_id) { // make sure the klass is initialized - __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized); __ jcc(Assembler::notEqual, slow_path); } #ifdef ASSERT // assert object can be fast path allocated { Label ok, not_ok; - __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) __ jcc(Assembler::lessEqual, not_ok); __ testl(obj_size, Klass::_lh_instance_slow_path_bit); __ jcc(Assembler::zero, ok); __ bind(not_ok);
*** 1038,1048 **** --- 1038,1048 ---- __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi __ bind(retry_tlab); // get the instance size (size is postive so movl is fine for 64bit) - __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); __ initialize_object(obj, klass, obj_size, 0, t1, t2); __ verify_oop(obj);
*** 1050,1060 **** --- 1050,1060 ---- __ pop(rdi); __ ret(0); __ bind(try_eden); // get the instance size (size is postive so movl is fine for 64bit) - __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ eden_allocate(obj, obj_size, 0, t1, slow_path); __ incr_allocated_bytes(thread, obj_size, 0); __ initialize_object(obj, klass, obj_size, 0, t1, t2);
*** 1117,1127 **** --- 1117,1127 ---- #ifdef ASSERT // assert object type is really an array of the proper kind { Label ok; Register t0 = obj; - __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); __ sarl(t0, Klass::_lh_array_tag_shift); int tag = ((id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ cmpl(t0, tag);
*** 1151,1161 **** --- 1151,1161 ---- __ bind(retry_tlab); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) // since size is positive movl does right thing on 64bit - __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); // since size is postive movl does right thing on 64bit __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); __ shlptr(arr_size /* by t1=rcx, mod 32 */); __ shrptr(t1, Klass::_lh_header_size_shift);
*** 1165,1175 **** --- 1165,1175 ---- __ andptr(arr_size, ~MinObjAlignmentInBytesMask); __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size __ initialize_header(obj, klass, length, t1, t2); - __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); __ andptr(t1, Klass::_lh_header_size_mask); __ subptr(arr_size, t1); // body length __ addptr(t1, obj); // body start
*** 1178,1188 **** --- 1178,1188 ---- __ ret(0); __ bind(try_eden); // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) // since size is positive movl does right thing on 64bit - __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); // since size is postive movl does right thing on 64bit __ movl(arr_size, length); assert(t1 == rcx, "fixed register usage"); __ shlptr(arr_size /* by t1=rcx, mod 32 */); __ shrptr(t1, Klass::_lh_header_size_shift);
*** 1193,1203 **** --- 1193,1203 ---- __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size __ incr_allocated_bytes(thread, arr_size, 0); __ initialize_header(obj, klass, length, t1, t2); - __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); __ andptr(t1, Klass::_lh_header_size_mask); __ subptr(arr_size, t1); // body length __ addptr(t1, obj); // body start
*** 1265,1275 **** --- 1265,1275 ---- // load the klass and check the has finalizer flag Label register_finalizer; Register t = rsi; __ load_klass(t, rax); - __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(t, JVM_ACC_HAS_FINALIZER); __ jcc(Assembler::notZero, register_finalizer); __ ret(0); __ bind(register_finalizer);

src/cpu/x86/vm/c1_Runtime1_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File