1086 case counter_overflow_id:
1087 {
1088 Register bci = rax, method = rbx;
1089 __ enter();
1090 OopMap* map = save_live_registers(sasm, 3);
1091 // Retrieve bci
1092 __ movl(bci, Address(rbp, 2*BytesPerWord));
1093 // And a pointer to the Method*
1094 __ movptr(method, Address(rbp, 3*BytesPerWord));
1095 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1096 oop_maps = new OopMapSet();
1097 oop_maps->add_gc_map(call_offset, map);
1098 restore_live_registers(sasm);
1099 __ leave();
1100 __ ret(0);
1101 }
1102 break;
1103
1104 case new_type_array_id:
1105 case new_object_array_id:
1106 {
1107 Register length = rbx; // Incoming
1108 Register klass = rdx; // Incoming
1109 Register obj = rax; // Result
1110
1111 if (id == new_type_array_id) {
1112 __ set_info("new_type_array", dont_gc_arguments);
1113 } else {
1114 __ set_info("new_object_array", dont_gc_arguments);
1115 }
1116
1117 #ifdef ASSERT
1118 // assert object type is really an array of the proper kind
1119 {
1120 Label ok;
1121 Register t0 = obj;
1122 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1123 __ sarl(t0, Klass::_lh_array_tag_shift);
1124 int tag = ((id == new_type_array_id)
1125 ? Klass::_lh_array_tag_type_value
1126 : Klass::_lh_array_tag_obj_value);
1127 __ cmpl(t0, tag);
1128 __ jcc(Assembler::equal, ok);
1129 __ stop("assert(is an array klass)");
1130 __ should_not_reach_here();
1131 __ bind(ok);
1132 }
1133 #endif // ASSERT
1134
1135 // If TLAB is disabled, see if there is support for inlining contiguous
1136 // allocations.
1137 // Otherwise, just go to the slow path.
1138 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
1139 Register arr_size = rsi;
1140 Register t1 = rcx; // must be rcx for use as shift count
1141 Register t2 = rdi;
1142 Label slow_path;
1143
1144 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1145 // since size is positive movl does right thing on 64bit
1146 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1147 // since size is postive movl does right thing on 64bit
1162 __ initialize_header(obj, klass, length, t1, t2);
1163 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1164 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1165 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1166 __ andptr(t1, Klass::_lh_header_size_mask);
1167 __ subptr(arr_size, t1); // body length
1168 __ addptr(t1, obj); // body start
1169 __ initialize_body(t1, arr_size, 0, t2);
1170 __ verify_oop(obj);
1171 __ ret(0);
1172
1173 __ bind(slow_path);
1174 }
1175
1176 __ enter();
1177 OopMap* map = save_live_registers(sasm, 3);
1178 int call_offset;
1179 if (id == new_type_array_id) {
1180 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1181 } else {
1182 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1183 }
1184
1185 oop_maps = new OopMapSet();
1186 oop_maps->add_gc_map(call_offset, map);
1187 restore_live_registers_except_rax(sasm);
1188
1189 __ verify_oop(obj);
1190 __ leave();
1191 __ ret(0);
1192
1193 // rax,: new array
1194 }
1195 break;
1196
1197 case new_multi_array_id:
1198 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1199 // rax,: klass
1200 // rbx,: rank
1201 // rcx: address of 1st dimension
|
1086 case counter_overflow_id:
1087 {
1088 Register bci = rax, method = rbx;
1089 __ enter();
1090 OopMap* map = save_live_registers(sasm, 3);
1091 // Retrieve bci
1092 __ movl(bci, Address(rbp, 2*BytesPerWord));
1093 // And a pointer to the Method*
1094 __ movptr(method, Address(rbp, 3*BytesPerWord));
1095 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1096 oop_maps = new OopMapSet();
1097 oop_maps->add_gc_map(call_offset, map);
1098 restore_live_registers(sasm);
1099 __ leave();
1100 __ ret(0);
1101 }
1102 break;
1103
1104 case new_type_array_id:
1105 case new_object_array_id:
1106 case new_value_array_id:
1107 {
1108 Register length = rbx; // Incoming
1109 Register klass = rdx; // Incoming
1110 Register obj = rax; // Result
1111
1112 if (id == new_type_array_id) {
1113 __ set_info("new_type_array", dont_gc_arguments);
1114 } else if (id == new_object_array_id) {
1115 __ set_info("new_object_array", dont_gc_arguments);
1116 } else {
1117 __ set_info("new_value_array", dont_gc_arguments);
1118 }
1119
1120 #ifdef ASSERT
1121 // assert object type is really an array of the proper kind
1122 {
1123 Label ok;
1124 Register t0 = obj;
1125 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1126 __ sarl(t0, Klass::_lh_array_tag_shift);
1127 switch (id) {
1128 case new_type_array_id: __ cmpl(t0, Klass::_lh_array_tag_type_value); break;
1129 case new_object_array_id: __ cmpl(t0, Klass::_lh_array_tag_obj_value); break;
1130 case new_value_array_id: __ cmpl(t0, Klass::_lh_array_tag_vt_value); break;
1131 }
1132 __ jcc(Assembler::equal, ok);
1133 __ stop("assert(is an array klass)");
1134 __ should_not_reach_here();
1135 __ bind(ok);
1136 }
1137 #endif // ASSERT
1138
1139 // If TLAB is disabled, see if there is support for inlining contiguous
1140 // allocations.
1141 // Otherwise, just go to the slow path.
1142 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
1143 Register arr_size = rsi;
1144 Register t1 = rcx; // must be rcx for use as shift count
1145 Register t2 = rdi;
1146 Label slow_path;
1147
1148 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1149 // since size is positive movl does right thing on 64bit
1150 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1151 // since size is postive movl does right thing on 64bit
1166 __ initialize_header(obj, klass, length, t1, t2);
1167 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1168 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1169 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1170 __ andptr(t1, Klass::_lh_header_size_mask);
1171 __ subptr(arr_size, t1); // body length
1172 __ addptr(t1, obj); // body start
1173 __ initialize_body(t1, arr_size, 0, t2);
1174 __ verify_oop(obj);
1175 __ ret(0);
1176
1177 __ bind(slow_path);
1178 }
1179
1180 __ enter();
1181 OopMap* map = save_live_registers(sasm, 3);
1182 int call_offset;
1183 if (id == new_type_array_id) {
1184 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1185 } else {
1186 // Runtime1::new_object_array handles both object and value arrays
1187 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1188 }
1189
1190 oop_maps = new OopMapSet();
1191 oop_maps->add_gc_map(call_offset, map);
1192 restore_live_registers_except_rax(sasm);
1193
1194 __ verify_oop(obj);
1195 __ leave();
1196 __ ret(0);
1197
1198 // rax,: new array
1199 }
1200 break;
1201
1202 case new_multi_array_id:
1203 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1204 // rax,: klass
1205 // rbx,: rank
1206 // rcx: address of 1st dimension
|