994 } else if (id == fast_new_instance_id) {
995 __ set_info("fast new_instance", dont_gc_arguments);
996 } else {
997 assert(id == fast_new_instance_init_check_id, "bad StubID");
998 __ set_info("fast new_instance init check", dont_gc_arguments);
999 }
1000
1001 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1002 UseTLAB && FastTLABRefill) {
1003 Label slow_path;
1004 Register obj_size = rcx;
1005 Register t1 = rbx;
1006 Register t2 = rsi;
1007 assert_different_registers(klass, obj, obj_size, t1, t2);
1008
1009 __ push(rdi);
1010 __ push(rbx);
1011
1012 if (id == fast_new_instance_init_check_id) {
1013 // make sure the klass is initialized
1014 __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
1015 __ jcc(Assembler::notEqual, slow_path);
1016 }
1017
1018 #ifdef ASSERT
1019 // assert object can be fast path allocated
1020 {
1021 Label ok, not_ok;
1022 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1023 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1024 __ jcc(Assembler::lessEqual, not_ok);
1025 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1026 __ jcc(Assembler::zero, ok);
1027 __ bind(not_ok);
1028 __ stop("assert(can be fast path allocated)");
1029 __ should_not_reach_here();
1030 __ bind(ok);
1031 }
1032 #endif // ASSERT
1033
1034 // if we got here then the TLAB allocation failed, so try
1035 // refilling the TLAB or allocating directly from eden.
1036 Label retry_tlab, try_eden;
1037 const Register thread =
1038 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1039
1040 __ bind(retry_tlab);
1041
1042 // get the instance size (size is postive so movl is fine for 64bit)
1043 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1044
1045 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1046
1047 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1048 __ verify_oop(obj);
1049 __ pop(rbx);
1050 __ pop(rdi);
1051 __ ret(0);
1052
1053 __ bind(try_eden);
1054 // get the instance size (size is postive so movl is fine for 64bit)
1055 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1056
1057 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1058 __ incr_allocated_bytes(thread, obj_size, 0);
1059
1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1061 __ verify_oop(obj);
1062 __ pop(rbx);
1063 __ pop(rdi);
1064 __ ret(0);
1065
1066 __ bind(slow_path);
1067 __ pop(rbx);
1068 __ pop(rdi);
1069 }
1070
1071 __ enter();
1072 OopMap* map = save_live_registers(sasm, 2);
1073 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1074 oop_maps = new OopMapSet();
1075 oop_maps->add_gc_map(call_offset, map);
1102 break;
1103
1104 case new_type_array_id:
1105 case new_object_array_id:
1106 {
1107 Register length = rbx; // Incoming
1108 Register klass = rdx; // Incoming
1109 Register obj = rax; // Result
1110
1111 if (id == new_type_array_id) {
1112 __ set_info("new_type_array", dont_gc_arguments);
1113 } else {
1114 __ set_info("new_object_array", dont_gc_arguments);
1115 }
1116
1117 #ifdef ASSERT
1118 // assert object type is really an array of the proper kind
1119 {
1120 Label ok;
1121 Register t0 = obj;
1122 __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1123 __ sarl(t0, Klass::_lh_array_tag_shift);
1124 int tag = ((id == new_type_array_id)
1125 ? Klass::_lh_array_tag_type_value
1126 : Klass::_lh_array_tag_obj_value);
1127 __ cmpl(t0, tag);
1128 __ jcc(Assembler::equal, ok);
1129 __ stop("assert(is an array klass)");
1130 __ should_not_reach_here();
1131 __ bind(ok);
1132 }
1133 #endif // ASSERT
1134
1135 if (UseTLAB && FastTLABRefill) {
1136 Register arr_size = rsi;
1137 Register t1 = rcx; // must be rcx for use as shift count
1138 Register t2 = rdi;
1139 Label slow_path;
1140 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1141
1142 // check that array length is small enough for fast path.
1143 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1144 __ jcc(Assembler::above, slow_path);
1145
1146 // if we got here then the TLAB allocation failed, so try
1147 // refilling the TLAB or allocating directly from eden.
1148 Label retry_tlab, try_eden;
1149 const Register thread =
1150 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
1151
1152 __ bind(retry_tlab);
1153
1154 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1155 // since size is positive movl does right thing on 64bit
1156 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1157 // since size is postive movl does right thing on 64bit
1158 __ movl(arr_size, length);
1159 assert(t1 == rcx, "fixed register usage");
1160 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1161 __ shrptr(t1, Klass::_lh_header_size_shift);
1162 __ andptr(t1, Klass::_lh_header_size_mask);
1163 __ addptr(arr_size, t1);
1164 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1165 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1166
1167 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1168
1169 __ initialize_header(obj, klass, length, t1, t2);
1170 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1171 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1172 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1173 __ andptr(t1, Klass::_lh_header_size_mask);
1174 __ subptr(arr_size, t1); // body length
1175 __ addptr(t1, obj); // body start
1176 __ initialize_body(t1, arr_size, 0, t2);
1177 __ verify_oop(obj);
1178 __ ret(0);
1179
1180 __ bind(try_eden);
1181 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1182 // since size is positive movl does right thing on 64bit
1183 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1184 // since size is postive movl does right thing on 64bit
1185 __ movl(arr_size, length);
1186 assert(t1 == rcx, "fixed register usage");
1187 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1188 __ shrptr(t1, Klass::_lh_header_size_shift);
1189 __ andptr(t1, Klass::_lh_header_size_mask);
1190 __ addptr(arr_size, t1);
1191 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1192 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1193
1194 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1195 __ incr_allocated_bytes(thread, arr_size, 0);
1196
1197 __ initialize_header(obj, klass, length, t1, t2);
1198 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1199 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1200 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1201 __ andptr(t1, Klass::_lh_header_size_mask);
1202 __ subptr(arr_size, t1); // body length
1203 __ addptr(t1, obj); // body start
1204 __ initialize_body(t1, arr_size, 0, t2);
1205 __ verify_oop(obj);
1206 __ ret(0);
1207
1208 __ bind(slow_path);
1209 }
1210
1211 __ enter();
1212 OopMap* map = save_live_registers(sasm, 3);
1213 int call_offset;
1214 if (id == new_type_array_id) {
1215 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1216 } else {
1217 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1218 }
1250 {
1251 __ set_info("register_finalizer", dont_gc_arguments);
1252
1253 // This is called via call_runtime so the arguments
1254 // will be place in C abi locations
1255
1256 #ifdef _LP64
1257 __ verify_oop(c_rarg0);
1258 __ mov(rax, c_rarg0);
1259 #else
1260 // The object is passed on the stack and we haven't pushed a
1261 // frame yet so it's one work away from top of stack.
1262 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1263 __ verify_oop(rax);
1264 #endif // _LP64
1265
1266 // load the klass and check the has finalizer flag
1267 Label register_finalizer;
1268 Register t = rsi;
1269 __ load_klass(t, rax);
1270 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1271 __ testl(t, JVM_ACC_HAS_FINALIZER);
1272 __ jcc(Assembler::notZero, register_finalizer);
1273 __ ret(0);
1274
1275 __ bind(register_finalizer);
1276 __ enter();
1277 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1278 int call_offset = __ call_RT(noreg, noreg,
1279 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1280 oop_maps = new OopMapSet();
1281 oop_maps->add_gc_map(call_offset, oop_map);
1282
1283 // Now restore all the live registers
1284 restore_live_registers(sasm);
1285
1286 __ leave();
1287 __ ret(0);
1288 }
1289 break;
1290
|
994 } else if (id == fast_new_instance_id) {
995 __ set_info("fast new_instance", dont_gc_arguments);
996 } else {
997 assert(id == fast_new_instance_init_check_id, "bad StubID");
998 __ set_info("fast new_instance init check", dont_gc_arguments);
999 }
1000
1001 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1002 UseTLAB && FastTLABRefill) {
1003 Label slow_path;
1004 Register obj_size = rcx;
1005 Register t1 = rbx;
1006 Register t2 = rsi;
1007 assert_different_registers(klass, obj, obj_size, t1, t2);
1008
1009 __ push(rdi);
1010 __ push(rbx);
1011
1012 if (id == fast_new_instance_init_check_id) {
1013 // make sure the klass is initialized
1014 __ cmpl(Address(klass, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
1015 __ jcc(Assembler::notEqual, slow_path);
1016 }
1017
1018 #ifdef ASSERT
1019 // assert object can be fast path allocated
1020 {
1021 Label ok, not_ok;
1022 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1023 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1024 __ jcc(Assembler::lessEqual, not_ok);
1025 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1026 __ jcc(Assembler::zero, ok);
1027 __ bind(not_ok);
1028 __ stop("assert(can be fast path allocated)");
1029 __ should_not_reach_here();
1030 __ bind(ok);
1031 }
1032 #endif // ASSERT
1033
1034 // if we got here then the TLAB allocation failed, so try
1035 // refilling the TLAB or allocating directly from eden.
1036 Label retry_tlab, try_eden;
1037 const Register thread =
1038 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1039
1040 __ bind(retry_tlab);
1041
1042 // get the instance size (size is postive so movl is fine for 64bit)
1043 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1044
1045 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1046
1047 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1048 __ verify_oop(obj);
1049 __ pop(rbx);
1050 __ pop(rdi);
1051 __ ret(0);
1052
1053 __ bind(try_eden);
1054 // get the instance size (size is postive so movl is fine for 64bit)
1055 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1056
1057 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1058 __ incr_allocated_bytes(thread, obj_size, 0);
1059
1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1061 __ verify_oop(obj);
1062 __ pop(rbx);
1063 __ pop(rdi);
1064 __ ret(0);
1065
1066 __ bind(slow_path);
1067 __ pop(rbx);
1068 __ pop(rdi);
1069 }
1070
1071 __ enter();
1072 OopMap* map = save_live_registers(sasm, 2);
1073 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1074 oop_maps = new OopMapSet();
1075 oop_maps->add_gc_map(call_offset, map);
1102 break;
1103
1104 case new_type_array_id:
1105 case new_object_array_id:
1106 {
1107 Register length = rbx; // Incoming
1108 Register klass = rdx; // Incoming
1109 Register obj = rax; // Result
1110
1111 if (id == new_type_array_id) {
1112 __ set_info("new_type_array", dont_gc_arguments);
1113 } else {
1114 __ set_info("new_object_array", dont_gc_arguments);
1115 }
1116
1117 #ifdef ASSERT
1118 // assert object type is really an array of the proper kind
1119 {
1120 Label ok;
1121 Register t0 = obj;
1122 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1123 __ sarl(t0, Klass::_lh_array_tag_shift);
1124 int tag = ((id == new_type_array_id)
1125 ? Klass::_lh_array_tag_type_value
1126 : Klass::_lh_array_tag_obj_value);
1127 __ cmpl(t0, tag);
1128 __ jcc(Assembler::equal, ok);
1129 __ stop("assert(is an array klass)");
1130 __ should_not_reach_here();
1131 __ bind(ok);
1132 }
1133 #endif // ASSERT
1134
1135 if (UseTLAB && FastTLABRefill) {
1136 Register arr_size = rsi;
1137 Register t1 = rcx; // must be rcx for use as shift count
1138 Register t2 = rdi;
1139 Label slow_path;
1140 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1141
1142 // check that array length is small enough for fast path.
1143 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1144 __ jcc(Assembler::above, slow_path);
1145
1146 // if we got here then the TLAB allocation failed, so try
1147 // refilling the TLAB or allocating directly from eden.
1148 Label retry_tlab, try_eden;
1149 const Register thread =
1150 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
1151
1152 __ bind(retry_tlab);
1153
1154 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1155 // since size is positive movl does right thing on 64bit
1156 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1157 // since size is postive movl does right thing on 64bit
1158 __ movl(arr_size, length);
1159 assert(t1 == rcx, "fixed register usage");
1160 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1161 __ shrptr(t1, Klass::_lh_header_size_shift);
1162 __ andptr(t1, Klass::_lh_header_size_mask);
1163 __ addptr(arr_size, t1);
1164 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1165 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1166
1167 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1168
1169 __ initialize_header(obj, klass, length, t1, t2);
1170 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1171 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1172 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1173 __ andptr(t1, Klass::_lh_header_size_mask);
1174 __ subptr(arr_size, t1); // body length
1175 __ addptr(t1, obj); // body start
1176 __ initialize_body(t1, arr_size, 0, t2);
1177 __ verify_oop(obj);
1178 __ ret(0);
1179
1180 __ bind(try_eden);
1181 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1182 // since size is positive movl does right thing on 64bit
1183 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1184 // since size is postive movl does right thing on 64bit
1185 __ movl(arr_size, length);
1186 assert(t1 == rcx, "fixed register usage");
1187 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1188 __ shrptr(t1, Klass::_lh_header_size_shift);
1189 __ andptr(t1, Klass::_lh_header_size_mask);
1190 __ addptr(arr_size, t1);
1191 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1192 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1193
1194 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1195 __ incr_allocated_bytes(thread, arr_size, 0);
1196
1197 __ initialize_header(obj, klass, length, t1, t2);
1198 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1199 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1200 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1201 __ andptr(t1, Klass::_lh_header_size_mask);
1202 __ subptr(arr_size, t1); // body length
1203 __ addptr(t1, obj); // body start
1204 __ initialize_body(t1, arr_size, 0, t2);
1205 __ verify_oop(obj);
1206 __ ret(0);
1207
1208 __ bind(slow_path);
1209 }
1210
1211 __ enter();
1212 OopMap* map = save_live_registers(sasm, 3);
1213 int call_offset;
1214 if (id == new_type_array_id) {
1215 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1216 } else {
1217 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1218 }
1250 {
1251 __ set_info("register_finalizer", dont_gc_arguments);
1252
1253 // This is called via call_runtime so the arguments
1254 // will be place in C abi locations
1255
1256 #ifdef _LP64
1257 __ verify_oop(c_rarg0);
1258 __ mov(rax, c_rarg0);
1259 #else
1260 // The object is passed on the stack and we haven't pushed a
1261 // frame yet so it's one work away from top of stack.
1262 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1263 __ verify_oop(rax);
1264 #endif // _LP64
1265
1266 // load the klass and check the has finalizer flag
1267 Label register_finalizer;
1268 Register t = rsi;
1269 __ load_klass(t, rax);
1270 __ movl(t, Address(t, Klass::access_flags_offset()));
1271 __ testl(t, JVM_ACC_HAS_FINALIZER);
1272 __ jcc(Assembler::notZero, register_finalizer);
1273 __ ret(0);
1274
1275 __ bind(register_finalizer);
1276 __ enter();
1277 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1278 int call_offset = __ call_RT(noreg, noreg,
1279 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1280 oop_maps = new OopMapSet();
1281 oop_maps->add_gc_map(call_offset, oop_map);
1282
1283 // Now restore all the live registers
1284 restore_live_registers(sasm);
1285
1286 __ leave();
1287 __ ret(0);
1288 }
1289 break;
1290
|