< prev index next >

src/hotspot/cpu/x86/c1_Runtime1_x86.cpp

Print this page


   1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1086     case counter_overflow_id:
1087       {
1088         Register bci = rax, method = rbx;
1089         __ enter();
1090         OopMap* map = save_live_registers(sasm, 3);
1091         // Retrieve bci
1092         __ movl(bci, Address(rbp, 2*BytesPerWord));
1093         // And a pointer to the Method*
1094         __ movptr(method, Address(rbp, 3*BytesPerWord));
1095         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1096         oop_maps = new OopMapSet();
1097         oop_maps->add_gc_map(call_offset, map);
1098         restore_live_registers(sasm);
1099         __ leave();
1100         __ ret(0);
1101       }
1102       break;
1103 
1104     case new_type_array_id:
1105     case new_object_array_id:

1106       {
1107         Register length   = rbx; // Incoming
1108         Register klass    = rdx; // Incoming
1109         Register obj      = rax; // Result
1110 
1111         if (id == new_type_array_id) {
1112           __ set_info("new_type_array", dont_gc_arguments);
1113         } else {
1114           __ set_info("new_object_array", dont_gc_arguments);


1115         }
1116 
1117 #ifdef ASSERT
1118         // assert object type is really an array of the proper kind
1119         {
1120           Label ok;
1121           Register t0 = obj;
1122           __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1123           __ sarl(t0, Klass::_lh_array_tag_shift);
1124           int tag = ((id == new_type_array_id)
1125                      ? Klass::_lh_array_tag_type_value
1126                      : Klass::_lh_array_tag_obj_value);
1127           __ cmpl(t0, tag);













1128           __ jcc(Assembler::equal, ok);
1129           __ stop("assert(is an array klass)");



1130           __ should_not_reach_here();
1131           __ bind(ok);
1132         }
1133 #endif // ASSERT
1134 
1135         // If TLAB is disabled, see if there is support for inlining contiguous
1136         // allocations.
1137         // Otherwise, just go to the slow path.
1138         if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
1139           Register arr_size = rsi;
1140           Register t1       = rcx;  // must be rcx for use as shift count
1141           Register t2       = rdi;
1142           Label slow_path;
1143 
1144           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1145           // since size is positive movl does right thing on 64bit
1146           __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1147           // since size is postive movl does right thing on 64bit
1148           __ movl(arr_size, length);
1149           assert(t1 == rcx, "fixed register usage");


1162           __ initialize_header(obj, klass, length, t1, t2);
1163           __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1164           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1165           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1166           __ andptr(t1, Klass::_lh_header_size_mask);
1167           __ subptr(arr_size, t1);  // body length
1168           __ addptr(t1, obj);       // body start
1169           __ initialize_body(t1, arr_size, 0, t2);
1170           __ verify_oop(obj);
1171           __ ret(0);
1172 
1173           __ bind(slow_path);
1174         }
1175 
1176         __ enter();
1177         OopMap* map = save_live_registers(sasm, 3);
1178         int call_offset;
1179         if (id == new_type_array_id) {
1180           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1181         } else {


1182           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1183         }
1184 
1185         oop_maps = new OopMapSet();
1186         oop_maps->add_gc_map(call_offset, map);
1187         restore_live_registers_except_rax(sasm);
1188 
1189         __ verify_oop(obj);
1190         __ leave();
1191         __ ret(0);
1192 
1193         // rax,: new array
1194       }
1195       break;
1196 
1197     case new_multi_array_id:
1198       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1199         // rax,: klass
1200         // rbx,: rank
1201         // rcx: address of 1st dimension
1202         OopMap* map = save_live_registers(sasm, 4);
1203         int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1204 
1205         oop_maps = new OopMapSet();
1206         oop_maps->add_gc_map(call_offset, map);
1207         restore_live_registers_except_rax(sasm);
1208 
1209         // rax,: new multi array
1210         __ verify_oop(rax);
1211       }
1212       break;
1213 






































1214     case register_finalizer_id:
1215       {
1216         __ set_info("register_finalizer", dont_gc_arguments);
1217 
1218         // This is called via call_runtime so the arguments
1219         // will be place in C abi locations
1220 
1221 #ifdef _LP64
1222         __ verify_oop(c_rarg0);
1223         __ mov(rax, c_rarg0);
1224 #else
1225         // The object is passed on the stack and we haven't pushed a
1226         // frame yet so it's one work away from top of stack.
1227         __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1228         __ verify_oop(rax);
1229 #endif // _LP64
1230 
1231         // load the klass and check the has finalizer flag
1232         Label register_finalizer;
1233         Register t = rsi;


1295         //       activation and we are calling a leaf VM function only.
1296         generate_unwind_exception(sasm);
1297       }
1298       break;
1299 
1300     case throw_array_store_exception_id:
1301       { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1302         // tos + 0: link
1303         //     + 1: return address
1304         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1305       }
1306       break;
1307 
1308     case throw_class_cast_exception_id:
1309       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1310         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1311       }
1312       break;
1313 
1314     case throw_incompatible_class_change_error_id:
1315       { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1316         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);






1317       }
1318       break;
1319 
1320     case slow_subtype_check_id:
1321       {
1322         // Typical calling sequence:
1323         // __ push(klass_RInfo);  // object klass or other subclass
1324         // __ push(sup_k_RInfo);  // array element klass or other superclass
1325         // __ call(slow_subtype_check);
1326         // Note that the subclass is pushed first, and is therefore deepest.
1327         // Previous versions of this code reversed the names 'sub' and 'super'.
1328         // This was operationally harmless but made the code unreadable.
1329         enum layout {
1330           rax_off, SLOT2(raxH_off)
1331           rcx_off, SLOT2(rcxH_off)
1332           rsi_off, SLOT2(rsiH_off)
1333           rdi_off, SLOT2(rdiH_off)
1334           // saved_rbp_off, SLOT2(saved_rbpH_off)
1335           return_off, SLOT2(returnH_off)
1336           sup_k_off, SLOT2(sup_kH_off)


   1 /*
   2  * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1086     case counter_overflow_id:
1087       {
1088         Register bci = rax, method = rbx;
1089         __ enter();
1090         OopMap* map = save_live_registers(sasm, 3);
1091         // Retrieve bci
1092         __ movl(bci, Address(rbp, 2*BytesPerWord));
1093         // And a pointer to the Method*
1094         __ movptr(method, Address(rbp, 3*BytesPerWord));
1095         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1096         oop_maps = new OopMapSet();
1097         oop_maps->add_gc_map(call_offset, map);
1098         restore_live_registers(sasm);
1099         __ leave();
1100         __ ret(0);
1101       }
1102       break;
1103 
1104     case new_type_array_id:
1105     case new_object_array_id:
1106     case new_value_array_id:
1107       {
1108         Register length   = rbx; // Incoming
1109         Register klass    = rdx; // Incoming
1110         Register obj      = rax; // Result
1111 
1112         if (id == new_type_array_id) {
1113           __ set_info("new_type_array", dont_gc_arguments);
1114         } else if (id == new_object_array_id) {
1115           __ set_info("new_object_array", dont_gc_arguments);
1116         } else {
1117           __ set_info("new_value_array", dont_gc_arguments);
1118         }
1119 
1120 #ifdef ASSERT
1121         // assert object type is really an array of the proper kind
1122         {
1123           Label ok;
1124           Register t0 = obj;
1125           __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1126           __ sarl(t0, Klass::_lh_array_tag_shift);
1127           switch (id) {
1128           case new_type_array_id:
1129             __ cmpl(t0, Klass::_lh_array_tag_type_value);
1130             __ jcc(Assembler::equal, ok);
1131             __ stop("assert(is a type array klass)");
1132             break;
1133           case new_object_array_id:
1134           case new_value_array_id: // <-- needs to be renamed to new_non_null_array_id!
1135             // FIXME:
1136             // The VM currently does not distinguish between anewarray of
1137             // "[QV;" (elements are non-nullable) vs "[LV;" (elements may be null).
1138             // Instead, both are treated essentially as "[QV;". This code needs
1139             // to be reimplemented after proper support of "[LV;" is implemented in the VM.
1140             //
1141             __ cmpl(t0, Klass::_lh_array_tag_obj_value);
1142             __ jcc(Assembler::equal, ok);
1143             __ cmpl(t0, Klass::_lh_array_tag_vt_value);
1144             __ jcc(Assembler::equal, ok);
1145             __ stop("assert(is an object or value array klass)");
1146             break;
1147           default:  ShouldNotReachHere();
1148           }
1149           __ should_not_reach_here();
1150           __ bind(ok);
1151         }
1152 #endif // ASSERT
1153 
1154         // If TLAB is disabled, see if there is support for inlining contiguous
1155         // allocations.
1156         // Otherwise, just go to the slow path.
1157         if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
1158           Register arr_size = rsi;
1159           Register t1       = rcx;  // must be rcx for use as shift count
1160           Register t2       = rdi;
1161           Label slow_path;
1162 
1163           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1164           // since size is positive movl does right thing on 64bit
1165           __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1166           // since size is postive movl does right thing on 64bit
1167           __ movl(arr_size, length);
1168           assert(t1 == rcx, "fixed register usage");


1181           __ initialize_header(obj, klass, length, t1, t2);
1182           __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1183           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1184           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1185           __ andptr(t1, Klass::_lh_header_size_mask);
1186           __ subptr(arr_size, t1);  // body length
1187           __ addptr(t1, obj);       // body start
1188           __ initialize_body(t1, arr_size, 0, t2);
1189           __ verify_oop(obj);
1190           __ ret(0);
1191 
1192           __ bind(slow_path);
1193         }
1194 
1195         __ enter();
1196         OopMap* map = save_live_registers(sasm, 3);
1197         int call_offset;
1198         if (id == new_type_array_id) {
1199           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1200         } else {
1201           // Runtime1::new_object_array handles both object and value arrays.
1202           // See comments in the ASSERT block above.
1203           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1204         }
1205 
1206         oop_maps = new OopMapSet();
1207         oop_maps->add_gc_map(call_offset, map);
1208         restore_live_registers_except_rax(sasm);
1209 
1210         __ verify_oop(obj);
1211         __ leave();
1212         __ ret(0);
1213 
1214         // rax,: new array
1215       }
1216       break;
1217 
1218     case new_multi_array_id:
1219       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1220         // rax,: klass
1221         // rbx,: rank
1222         // rcx: address of 1st dimension
1223         OopMap* map = save_live_registers(sasm, 4);
1224         int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1225 
1226         oop_maps = new OopMapSet();
1227         oop_maps->add_gc_map(call_offset, map);
1228         restore_live_registers_except_rax(sasm);
1229 
1230         // rax,: new multi array
1231         __ verify_oop(rax);
1232       }
1233       break;
1234 
1235     case load_flattened_array_id:
1236       {
1237         StubFrame f(sasm, "load_flattened_array", dont_gc_arguments);
1238         OopMap* map = save_live_registers(sasm, 3);
1239 
1240         // Called with store_parameter and not C abi
1241 
1242         f.load_argument(1, rax); // rax,: array
1243         f.load_argument(0, rbx); // rbx,: index
1244         int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), rax, rbx);
1245 
1246         oop_maps = new OopMapSet();
1247         oop_maps->add_gc_map(call_offset, map);
1248         restore_live_registers_except_rax(sasm);
1249 
1250         // rax,: loaded element at array[index]
1251         __ verify_oop(rax);
1252       }
1253       break;
1254 
1255     case store_flattened_array_id:
1256       {
1257         StubFrame f(sasm, "store_flattened_array", dont_gc_arguments);
1258         OopMap* map = save_live_registers(sasm, 4);
1259 
1260         // Called with store_parameter and not C abi
1261 
1262         f.load_argument(2, rax); // rax,: array
1263         f.load_argument(1, rbx); // rbx,: index
1264         f.load_argument(0, rcx); // rcx,: value
1265         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), rax, rbx, rcx);
1266 
1267         oop_maps = new OopMapSet();
1268         oop_maps->add_gc_map(call_offset, map);
1269         restore_live_registers_except_rax(sasm);
1270       }
1271       break;
1272 
1273     case register_finalizer_id:
1274       {
1275         __ set_info("register_finalizer", dont_gc_arguments);
1276 
1277         // This is called via call_runtime so the arguments
1278         // will be place in C abi locations
1279 
1280 #ifdef _LP64
1281         __ verify_oop(c_rarg0);
1282         __ mov(rax, c_rarg0);
1283 #else
1284         // The object is passed on the stack and we haven't pushed a
1285         // frame yet so it's one work away from top of stack.
1286         __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1287         __ verify_oop(rax);
1288 #endif // _LP64
1289 
1290         // load the klass and check the has finalizer flag
1291         Label register_finalizer;
1292         Register t = rsi;


1354         //       activation and we are calling a leaf VM function only.
1355         generate_unwind_exception(sasm);
1356       }
1357       break;
1358 
1359     case throw_array_store_exception_id:
1360       { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1361         // tos + 0: link
1362         //     + 1: return address
1363         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1364       }
1365       break;
1366 
1367     case throw_class_cast_exception_id:
1368       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1369         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1370       }
1371       break;
1372 
1373     case throw_incompatible_class_change_error_id:
1374       { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments);
1375         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1376       }
1377       break;
1378 
1379     case throw_illegal_monitor_state_exception_id:
1380       { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
1381         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
1382       }
1383       break;
1384 
1385     case slow_subtype_check_id:
1386       {
1387         // Typical calling sequence:
1388         // __ push(klass_RInfo);  // object klass or other subclass
1389         // __ push(sup_k_RInfo);  // array element klass or other superclass
1390         // __ call(slow_subtype_check);
1391         // Note that the subclass is pushed first, and is therefore deepest.
1392         // Previous versions of this code reversed the names 'sub' and 'super'.
1393         // This was operationally harmless but made the code unreadable.
1394         enum layout {
1395           rax_off, SLOT2(raxH_off)
1396           rcx_off, SLOT2(rcxH_off)
1397           rsi_off, SLOT2(rsiH_off)
1398           rdi_off, SLOT2(rdiH_off)
1399           // saved_rbp_off, SLOT2(saved_rbpH_off)
1400           return_off, SLOT2(returnH_off)
1401           sup_k_off, SLOT2(sup_kH_off)


< prev index next >