< prev index next >

src/hotspot/cpu/x86/c1_Runtime1_x86.cpp

Print this page
rev 51052 : [mq]: eden


 996         __ ret(0);
 997       }
 998       break;
 999 
1000     case new_instance_id:
1001     case fast_new_instance_id:
1002     case fast_new_instance_init_check_id:
1003       {
1004         Register klass = rdx; // Incoming
1005         Register obj   = rax; // Result
1006 
1007         if (id == new_instance_id) {
1008           __ set_info("new_instance", dont_gc_arguments);
1009         } else if (id == fast_new_instance_id) {
1010           __ set_info("fast new_instance", dont_gc_arguments);
1011         } else {
1012           assert(id == fast_new_instance_init_check_id, "bad StubID");
1013           __ set_info("fast new_instance init check", dont_gc_arguments);
1014         }
1015 
1016         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB



1017             && Universe::heap()->supports_inline_contig_alloc()) {
1018           Label slow_path;
1019           Register obj_size = rcx;
1020           Register t1       = rbx;
1021           Register t2       = rsi;
1022           assert_different_registers(klass, obj, obj_size, t1, t2);
1023 
1024           __ push(rdi);
1025           __ push(rbx);
1026 
1027           if (id == fast_new_instance_init_check_id) {
1028             // make sure the klass is initialized
1029             __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
1030             __ jcc(Assembler::notEqual, slow_path);
1031           }
1032 
1033 #ifdef ASSERT
1034           // assert object can be fast path allocated
1035           {
1036             Label ok, not_ok;
1037             __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1038             __ cmpl(obj_size, 0);  // make sure it's an instance (LH > 0)
1039             __ jcc(Assembler::lessEqual, not_ok);
1040             __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1041             __ jcc(Assembler::zero, ok);
1042             __ bind(not_ok);
1043             __ stop("assert(can be fast path allocated)");
1044             __ should_not_reach_here();
1045             __ bind(ok);
1046           }
1047 #endif // ASSERT
1048 
1049           // if we got here then the TLAB allocation failed, so try
1050           // refilling the TLAB or allocating directly from eden.
1051           Label retry_tlab, try_eden;
1052           const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
1053           NOT_LP64(__ get_thread(thread));
1054 
1055           __ bind(try_eden);
1056           // get the instance size (size is postive so movl is fine for 64bit)
1057           __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1058 
1059           __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path);
1060 
1061           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
1062           __ verify_oop(obj);
1063           __ pop(rbx);
1064           __ pop(rdi);
1065           __ ret(0);
1066 
1067           __ bind(slow_path);
1068           __ pop(rbx);
1069           __ pop(rdi);
1070         }
1071 
1072         __ enter();
1073         OopMap* map = save_live_registers(sasm, 2);
1074         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1075         oop_maps = new OopMapSet();


1116         }
1117 
1118 #ifdef ASSERT
1119         // assert object type is really an array of the proper kind
1120         {
1121           Label ok;
1122           Register t0 = obj;
1123           __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1124           __ sarl(t0, Klass::_lh_array_tag_shift);
1125           int tag = ((id == new_type_array_id)
1126                      ? Klass::_lh_array_tag_type_value
1127                      : Klass::_lh_array_tag_obj_value);
1128           __ cmpl(t0, tag);
1129           __ jcc(Assembler::equal, ok);
1130           __ stop("assert(is an array klass)");
1131           __ should_not_reach_here();
1132           __ bind(ok);
1133         }
1134 #endif // ASSERT
1135 
1136         // If we got here, the TLAB allocation failed, so try allocating from
1137         // eden if inline contiguous allocations are supported.
1138         if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {

1139           Register arr_size = rsi;
1140           Register t1       = rcx;  // must be rcx for use as shift count
1141           Register t2       = rdi;
1142           Label slow_path;
1143 
1144           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1145           // since size is positive movl does right thing on 64bit
1146           __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1147           // since size is postive movl does right thing on 64bit
1148           __ movl(arr_size, length);
1149           assert(t1 == rcx, "fixed register usage");
1150           __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1151           __ shrptr(t1, Klass::_lh_header_size_shift);
1152           __ andptr(t1, Klass::_lh_header_size_mask);
1153           __ addptr(arr_size, t1);
1154           __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1155           __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1156 
1157           // Using t2 for non 64-bit.
1158           const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread);




 996         __ ret(0);
 997       }
 998       break;
 999 
1000     case new_instance_id:
1001     case fast_new_instance_id:
1002     case fast_new_instance_init_check_id:
1003       {
1004         Register klass = rdx; // Incoming
1005         Register obj   = rax; // Result
1006 
1007         if (id == new_instance_id) {
1008           __ set_info("new_instance", dont_gc_arguments);
1009         } else if (id == fast_new_instance_id) {
1010           __ set_info("fast new_instance", dont_gc_arguments);
1011         } else {
1012           assert(id == fast_new_instance_init_check_id, "bad StubID");
1013           __ set_info("fast new_instance init check", dont_gc_arguments);
1014         }
1015 
1016         // If TLAB is disabled, see if there is support for inlining contiguous
1017         // allocations.
1018         // Otherwise, just go to the slow path.
1019         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && !UseTLAB
1020             && Universe::heap()->supports_inline_contig_alloc()) {
1021           Label slow_path;
1022           Register obj_size = rcx;
1023           Register t1       = rbx;
1024           Register t2       = rsi;
1025           assert_different_registers(klass, obj, obj_size, t1, t2);
1026 
1027           __ push(rdi);
1028           __ push(rbx);
1029 
1030           if (id == fast_new_instance_init_check_id) {
1031             // make sure the klass is initialized
1032             __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
1033             __ jcc(Assembler::notEqual, slow_path);
1034           }
1035 
1036 #ifdef ASSERT
1037           // assert object can be fast path allocated
1038           {
1039             Label ok, not_ok;
1040             __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1041             __ cmpl(obj_size, 0);  // make sure it's an instance (LH > 0)
1042             __ jcc(Assembler::lessEqual, not_ok);
1043             __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1044             __ jcc(Assembler::zero, ok);
1045             __ bind(not_ok);
1046             __ stop("assert(can be fast path allocated)");
1047             __ should_not_reach_here();
1048             __ bind(ok);
1049           }
1050 #endif // ASSERT
1051 



1052           const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
1053           NOT_LP64(__ get_thread(thread));
1054 

1055           // get the instance size (size is postive so movl is fine for 64bit)
1056           __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1057 
1058           __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path);
1059 
1060           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
1061           __ verify_oop(obj);
1062           __ pop(rbx);
1063           __ pop(rdi);
1064           __ ret(0);
1065 
1066           __ bind(slow_path);
1067           __ pop(rbx);
1068           __ pop(rdi);
1069         }
1070 
1071         __ enter();
1072         OopMap* map = save_live_registers(sasm, 2);
1073         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1074         oop_maps = new OopMapSet();


1115         }
1116 
1117 #ifdef ASSERT
1118         // assert object type is really an array of the proper kind
1119         {
1120           Label ok;
1121           Register t0 = obj;
1122           __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1123           __ sarl(t0, Klass::_lh_array_tag_shift);
1124           int tag = ((id == new_type_array_id)
1125                      ? Klass::_lh_array_tag_type_value
1126                      : Klass::_lh_array_tag_obj_value);
1127           __ cmpl(t0, tag);
1128           __ jcc(Assembler::equal, ok);
1129           __ stop("assert(is an array klass)");
1130           __ should_not_reach_here();
1131           __ bind(ok);
1132         }
1133 #endif // ASSERT
1134 
1135         // If TLAB is disabled, see if there is support for inlining contiguous
1136         // allocations.
1137         // Otherwise, just go to the slow path.
1138         if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
1139           Register arr_size = rsi;
1140           Register t1       = rcx;  // must be rcx for use as shift count
1141           Register t2       = rdi;
1142           Label slow_path;
1143 
1144           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1145           // since size is positive movl does right thing on 64bit
1146           __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1147           // since size is postive movl does right thing on 64bit
1148           __ movl(arr_size, length);
1149           assert(t1 == rcx, "fixed register usage");
1150           __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1151           __ shrptr(t1, Klass::_lh_header_size_shift);
1152           __ andptr(t1, Klass::_lh_header_size_mask);
1153           __ addptr(arr_size, t1);
1154           __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1155           __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1156 
1157           // Using t2 for non 64-bit.
1158           const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread);


< prev index next >