src/cpu/x86/vm/c1_Runtime1_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8086053-search Sdiff src/cpu/x86/vm

src/cpu/x86/vm/c1_Runtime1_x86.cpp

Print this page




1023             __ bind(not_ok);
1024             __ stop("assert(can be fast path allocated)");
1025             __ should_not_reach_here();
1026             __ bind(ok);
1027           }
1028 #endif // ASSERT
1029 
1030           // if we got here then the TLAB allocation failed, so try
1031           // refilling the TLAB or allocating directly from eden.
1032           Label retry_tlab, try_eden;
1033           const Register thread =
1034             __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1035 
1036           __ bind(retry_tlab);
1037 
1038           // get the instance size (size is postive so movl is fine for 64bit)
1039           __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1040 
1041           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1042 
1043           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1044           __ verify_oop(obj);
1045           __ pop(rbx);
1046           __ pop(rdi);
1047           __ ret(0);
1048 
1049           __ bind(try_eden);
1050           // get the instance size (size is postive so movl is fine for 64bit)
1051           __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1052 
1053           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1054           __ incr_allocated_bytes(thread, obj_size, 0);
1055 
1056           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1057           __ verify_oop(obj);
1058           __ pop(rbx);
1059           __ pop(rdi);
1060           __ ret(0);
1061 
1062           __ bind(slow_path);
1063           __ pop(rbx);
1064           __ pop(rdi);
1065         }
1066 
1067         __ enter();
1068         OopMap* map = save_live_registers(sasm, 2);
1069         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1070         oop_maps = new OopMapSet();
1071         oop_maps->add_gc_map(call_offset, map);
1072         restore_live_registers_except_rax(sasm);
1073         __ verify_oop(obj);
1074         __ leave();
1075         __ ret(0);
1076 


1152           __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1153           // since size is postive movl does right thing on 64bit
1154           __ movl(arr_size, length);
1155           assert(t1 == rcx, "fixed register usage");
1156           __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1157           __ shrptr(t1, Klass::_lh_header_size_shift);
1158           __ andptr(t1, Klass::_lh_header_size_mask);
1159           __ addptr(arr_size, t1);
1160           __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1161           __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1162 
1163           __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
1164 
1165           __ initialize_header(obj, klass, length, t1, t2);
1166           __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1167           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1168           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1169           __ andptr(t1, Klass::_lh_header_size_mask);
1170           __ subptr(arr_size, t1);  // body length
1171           __ addptr(t1, obj);       // body start

1172           __ initialize_body(t1, arr_size, 0, t2);

1173           __ verify_oop(obj);
1174           __ ret(0);
1175 
1176           __ bind(try_eden);
1177           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1178           // since size is positive movl does right thing on 64bit
1179           __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1180           // since size is postive movl does right thing on 64bit
1181           __ movl(arr_size, length);
1182           assert(t1 == rcx, "fixed register usage");
1183           __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1184           __ shrptr(t1, Klass::_lh_header_size_shift);
1185           __ andptr(t1, Klass::_lh_header_size_mask);
1186           __ addptr(arr_size, t1);
1187           __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1188           __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1189 
1190           __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
1191           __ incr_allocated_bytes(thread, arr_size, 0);
1192 




1023             __ bind(not_ok);
1024             __ stop("assert(can be fast path allocated)");
1025             __ should_not_reach_here();
1026             __ bind(ok);
1027           }
1028 #endif // ASSERT
1029 
1030           // if we got here then the TLAB allocation failed, so try
1031           // refilling the TLAB or allocating directly from eden.
1032           Label retry_tlab, try_eden;
1033           const Register thread =
1034             __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1035 
1036           __ bind(retry_tlab);
1037 
1038           // get the instance size (size is postive so movl is fine for 64bit)
1039           __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1040 
1041           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1042 
1043           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true);
1044           __ verify_oop(obj);
1045           __ pop(rbx);
1046           __ pop(rdi);
1047           __ ret(0);
1048 
1049           __ bind(try_eden);
1050           // get the instance size (size is postive so movl is fine for 64bit)
1051           __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1052 
1053           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1054           __ incr_allocated_bytes(thread, obj_size, 0);
1055 
1056           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
1057           __ verify_oop(obj);
1058           __ pop(rbx);
1059           __ pop(rdi);
1060           __ ret(0);
1061 
1062           __ bind(slow_path);
1063           __ pop(rbx);
1064           __ pop(rdi);
1065         }
1066 
1067         __ enter();
1068         OopMap* map = save_live_registers(sasm, 2);
1069         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1070         oop_maps = new OopMapSet();
1071         oop_maps->add_gc_map(call_offset, map);
1072         restore_live_registers_except_rax(sasm);
1073         __ verify_oop(obj);
1074         __ leave();
1075         __ ret(0);
1076 


1152           __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1153           // since size is postive movl does right thing on 64bit
1154           __ movl(arr_size, length);
1155           assert(t1 == rcx, "fixed register usage");
1156           __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1157           __ shrptr(t1, Klass::_lh_header_size_shift);
1158           __ andptr(t1, Klass::_lh_header_size_mask);
1159           __ addptr(arr_size, t1);
1160           __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1161           __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1162 
1163           __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
1164 
1165           __ initialize_header(obj, klass, length, t1, t2);
1166           __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1167           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1168           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1169           __ andptr(t1, Klass::_lh_header_size_mask);
1170           __ subptr(arr_size, t1);  // body length
1171           __ addptr(t1, obj);       // body start
1172           if (!ZeroTLAB) {
1173             __ initialize_body(t1, arr_size, 0, t2);
1174           }
1175           __ verify_oop(obj);
1176           __ ret(0);
1177 
1178           __ bind(try_eden);
1179           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1180           // since size is positive movl does right thing on 64bit
1181           __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1182           // since size is postive movl does right thing on 64bit
1183           __ movl(arr_size, length);
1184           assert(t1 == rcx, "fixed register usage");
1185           __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1186           __ shrptr(t1, Klass::_lh_header_size_shift);
1187           __ andptr(t1, Klass::_lh_header_size_mask);
1188           __ addptr(arr_size, t1);
1189           __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1190           __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1191 
1192           __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
1193           __ incr_allocated_bytes(thread, arr_size, 0);
1194 


src/cpu/x86/vm/c1_Runtime1_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File