1164 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1165 if (args[i].first()->is_Register()) {
1166 __ pop(args[i].first()->as_Register());
1167 } else if (args[i].first()->is_XMMRegister()) {
1168 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1169 __ addptr(rsp, 2*wordSize);
1170 }
1171 }
1172 }
1173
1174
1175 static void save_or_restore_arguments(MacroAssembler* masm,
1176 const int stack_slots,
1177 const int total_in_args,
1178 const int arg_save_area,
1179 OopMap* map,
1180 VMRegPair* in_regs,
1181 BasicType* in_sig_bt) {
1182 // if map is non-NULL then the code should store the values,
1183 // otherwise it should load them.
1184 int handle_index = 0;
1185 // Save down double word first
1186 for ( int i = 0; i < total_in_args; i++) {
1187 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1188 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1189 int offset = slot * VMRegImpl::stack_slot_size;
1190 handle_index += 2;
1191 assert(handle_index <= stack_slots, "overflow");
1192 if (map != NULL) {
1193 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1194 } else {
1195 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1196 }
1197 }
1198 if (in_regs[i].first()->is_Register() &&
1199 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1200 int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
1201 int offset = slot * VMRegImpl::stack_slot_size;
1202 handle_index += 2;
1203 assert(handle_index <= stack_slots, "overflow");
1204 if (map != NULL) {
1205 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1206 if (in_sig_bt[i] == T_ARRAY) {
1207 map->set_oop(VMRegImpl::stack2reg(slot));;
1208 }
1209 } else {
1210 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1211 }
1212 }
1213 }
1214 // Save or restore single word registers
1215 for ( int i = 0; i < total_in_args; i++) {
1216 if (in_regs[i].first()->is_Register()) {
1217 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1218 int offset = slot * VMRegImpl::stack_slot_size;
1219 assert(handle_index <= stack_slots, "overflow");
1220
1221 // Value is in an input register pass we must flush it to the stack
1222 const Register reg = in_regs[i].first()->as_Register();
1223 switch (in_sig_bt[i]) {
1224 case T_BOOLEAN:
1225 case T_CHAR:
1226 case T_BYTE:
1227 case T_SHORT:
1228 case T_INT:
1229 if (map != NULL) {
1230 __ movl(Address(rsp, offset), reg);
1231 } else {
1232 __ movl(reg, Address(rsp, offset));
1233 }
1234 break;
1235 case T_ARRAY:
1236 case T_LONG:
1237 // handled above
1238 break;
1239 case T_OBJECT:
1240 default: ShouldNotReachHere();
1241 }
1242 } else if (in_regs[i].first()->is_XMMRegister()) {
1243 if (in_sig_bt[i] == T_FLOAT) {
1244 int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
1245 int offset = slot * VMRegImpl::stack_slot_size;
1246 assert(handle_index <= stack_slots, "overflow");
1247 if (map != NULL) {
1248 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1249 } else {
1250 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1251 }
1252 }
1253 } else if (in_regs[i].first()->is_stack()) {
1254 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1255 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1256 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1257 }
1258 }
1259 }
1260 }
1261
1262
1263 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1264 // keeps a new JNI critical region from starting until a GC has been
1265 // forced. Save down any oops in registers and describe them in an
1266 // OopMap.
1351 move_ptr(masm, reg, tmp);
1352 reg = tmp;
1353 }
1354 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1355 __ jccb(Assembler::equal, is_null);
1356 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1357 move_ptr(masm, tmp, body_arg);
1358 // load the length relative to the body.
1359 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1360 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1361 move32_64(masm, tmp, length_arg);
1362 __ jmpb(done);
1363 __ bind(is_null);
1364 // Pass zeros
1365 __ xorptr(tmp_reg, tmp_reg);
1366 move_ptr(masm, tmp, body_arg);
1367 move32_64(masm, tmp, length_arg);
1368 __ bind(done);
1369 }
1370
1371 // ---------------------------------------------------------------------------
1372 // Generate a native wrapper for a given method. The method takes arguments
1373 // in the Java compiled code convention, marshals them to the native
1374 // convention (handlizes oops, etc), transitions to native, makes the call,
1375 // returns to java state (possibly blocking), unhandlizes any result and
1376 // returns.
1377 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1378 methodHandle method,
1379 int compile_id,
1380 int total_in_args,
1381 int comp_args_on_stack,
1382 BasicType *in_sig_bt,
1383 VMRegPair *in_regs,
1384 BasicType ret_type) {
1385 bool is_critical_native = true;
1386 address native_func = method->critical_native_function();
1387 if (native_func == NULL) {
1388 native_func = method->native_function();
1389 is_critical_native = false;
1390 }
1471
1472 // Compute framesize for the wrapper. We need to handlize all oops in
1473 // incoming registers
1474
1475 // Calculate the total number of stack slots we will need.
1476
1477 // First count the abi requirement plus all of the outgoing args
1478 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1479
1480 // Now the space for the inbound oop handle area
1481 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
1482 if (is_critical_native) {
1483 // Critical natives may have to call out so they need a save area
1484 // for register arguments.
1485 int double_slots = 0;
1486 int single_slots = 0;
1487 for ( int i = 0; i < total_in_args; i++) {
1488 if (in_regs[i].first()->is_Register()) {
1489 const Register reg = in_regs[i].first()->as_Register();
1490 switch (in_sig_bt[i]) {
1491 case T_ARRAY:
1492 case T_BOOLEAN:
1493 case T_BYTE:
1494 case T_SHORT:
1495 case T_CHAR:
1496 case T_INT: single_slots++; break;
1497 case T_LONG: double_slots++; break;
1498 default: ShouldNotReachHere();
1499 }
1500 } else if (in_regs[i].first()->is_XMMRegister()) {
1501 switch (in_sig_bt[i]) {
1502 case T_FLOAT: single_slots++; break;
1503 case T_DOUBLE: double_slots++; break;
1504 default: ShouldNotReachHere();
1505 }
1506 } else if (in_regs[i].first()->is_FloatRegister()) {
1507 ShouldNotReachHere();
1508 }
1509 }
1510 total_save_slots = double_slots * 2 + single_slots;
1511 // align the save area
1512 if (double_slots != 0) {
1513 stack_slots = round_to(stack_slots, 2);
1514 }
1515 }
1516
1673
1674 // Mark location of rbp (someday)
1675 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
1676
1677 // Use eax, ebx as temporaries during any memory-memory moves we have to do
1678 // All inbound args are referenced based on rbp and all outbound args via rsp.
1679
1680
1681 #ifdef ASSERT
1682 bool reg_destroyed[RegisterImpl::number_of_registers];
1683 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
1684 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
1685 reg_destroyed[r] = false;
1686 }
1687 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
1688 freg_destroyed[f] = false;
1689 }
1690
1691 #endif /* ASSERT */
1692
1693 if (is_critical_native) {
1694 // The mapping of Java and C arguments passed in registers are
1695 // rotated by one, which helps when passing arguments to regular
1696 // Java method but for critical natives that creates a cycle which
1697 // can cause arguments to be killed before they are used. Break
1698 // the cycle by moving the first argument into a temporary
1699 // register.
1700 for (int i = 0; i < total_c_args; i++) {
1701 if (in_regs[i].first()->is_Register() &&
1702 in_regs[i].first()->as_Register() == rdi) {
1703 __ mov(rbx, rdi);
1704 in_regs[i].set1(rbx->as_VMReg());
1705 }
1706 }
1707 }
1708
1709 // This may iterate in two different directions depending on the
1710 // kind of native it is. The reason is that for regular JNI natives
1711 // the incoming and outgoing registers are offset upwards and for
1712 // critical natives they are offset down.
1713 int c_arg = total_c_args - 1;
1714 int stride = -1;
1715 int init = total_in_args - 1;
1716 if (is_critical_native) {
1717 // stride forwards
1718 c_arg = 0;
1719 stride = 1;
1720 init = 0;
1721 }
1722 for (int i = init, count = 0; count < total_in_args; i += stride, c_arg += stride, count++ ) {
1723 #ifdef ASSERT
1724 if (in_regs[i].first()->is_Register()) {
1725 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1726 } else if (in_regs[i].first()->is_XMMRegister()) {
1727 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
1728 }
1729 if (out_regs[c_arg].first()->is_Register()) {
1730 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1731 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
1732 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
1733 }
1734 #endif /* ASSERT */
1735 switch (in_sig_bt[i]) {
1736 case T_ARRAY:
1737 if (is_critical_native) {
1738 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1739 c_arg++;
1740 #ifdef ASSERT
1741 if (out_regs[c_arg].first()->is_Register()) {
1742 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1762 case T_DOUBLE:
1763 assert( i + 1 < total_in_args &&
1764 in_sig_bt[i + 1] == T_VOID &&
1765 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1766 double_move(masm, in_regs[i], out_regs[c_arg]);
1767 break;
1768
1769 case T_LONG :
1770 long_move(masm, in_regs[i], out_regs[c_arg]);
1771 break;
1772
1773 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1774
1775 default:
1776 move32_64(masm, in_regs[i], out_regs[c_arg]);
1777 }
1778 }
1779
1780 // point c_arg at the first arg that is already loaded in case we
1781 // need to spill before we call out
1782 c_arg++;
1783
1784 // Pre-load a static method's oop into r14. Used both by locking code and
1785 // the normal JNI call code.
1786 if (method->is_static() && !is_critical_native) {
1787
1788 // load oop into a register
1789 __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
1790
1791 // Now handlize the static class mirror it's known not-null.
1792 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1793 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1794
1795 // Now get the handle
1796 __ lea(oop_handle_reg, Address(rsp, klass_offset));
1797 // store the klass handle as second argument
1798 __ movptr(c_rarg1, oop_handle_reg);
1799 // and protect the arg if we must spill
1800 c_arg--;
1801 }
1802
|
1164 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1165 if (args[i].first()->is_Register()) {
1166 __ pop(args[i].first()->as_Register());
1167 } else if (args[i].first()->is_XMMRegister()) {
1168 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1169 __ addptr(rsp, 2*wordSize);
1170 }
1171 }
1172 }
1173
1174
1175 static void save_or_restore_arguments(MacroAssembler* masm,
1176 const int stack_slots,
1177 const int total_in_args,
1178 const int arg_save_area,
1179 OopMap* map,
1180 VMRegPair* in_regs,
1181 BasicType* in_sig_bt) {
1182 // if map is non-NULL then the code should store the values,
1183 // otherwise it should load them.
1184 int slot = arg_save_area;
1185 // Save down double word first
1186 for ( int i = 0; i < total_in_args; i++) {
1187 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1188 int offset = slot * VMRegImpl::stack_slot_size;
1189 slot += VMRegImpl::slots_per_word;
1190 assert(slot <= stack_slots, "overflow");
1191 if (map != NULL) {
1192 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1193 } else {
1194 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1195 }
1196 }
1197 if (in_regs[i].first()->is_Register() &&
1198 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1199 int offset = slot * VMRegImpl::stack_slot_size;
1200 slot += VMRegImpl::slots_per_word;
1201 if (map != NULL) {
1202 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1203 if (in_sig_bt[i] == T_ARRAY) {
1204 map->set_oop(VMRegImpl::stack2reg(slot));;
1205 }
1206 } else {
1207 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1208 }
1209 }
1210 }
1211 // Save or restore single word registers
1212 for ( int i = 0; i < total_in_args; i++) {
1213 if (in_regs[i].first()->is_Register()) {
1214 int offset = slot * VMRegImpl::stack_slot_size;
1215 slot++;
1216 assert(slot <= stack_slots, "overflow");
1217
1218 // Value is in an input register pass we must flush it to the stack
1219 const Register reg = in_regs[i].first()->as_Register();
1220 switch (in_sig_bt[i]) {
1221 case T_BOOLEAN:
1222 case T_CHAR:
1223 case T_BYTE:
1224 case T_SHORT:
1225 case T_INT:
1226 if (map != NULL) {
1227 __ movl(Address(rsp, offset), reg);
1228 } else {
1229 __ movl(reg, Address(rsp, offset));
1230 }
1231 break;
1232 case T_ARRAY:
1233 case T_LONG:
1234 // handled above
1235 break;
1236 case T_OBJECT:
1237 default: ShouldNotReachHere();
1238 }
1239 } else if (in_regs[i].first()->is_XMMRegister()) {
1240 if (in_sig_bt[i] == T_FLOAT) {
1241 int offset = slot * VMRegImpl::stack_slot_size;
1242 slot++;
1243 assert(slot <= stack_slots, "overflow");
1244 if (map != NULL) {
1245 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1246 } else {
1247 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1248 }
1249 }
1250 } else if (in_regs[i].first()->is_stack()) {
1251 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1252 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1253 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1254 }
1255 }
1256 }
1257 }
1258
1259
1260 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1261 // keeps a new JNI critical region from starting until a GC has been
1262 // forced. Save down any oops in registers and describe them in an
1263 // OopMap.
1348 move_ptr(masm, reg, tmp);
1349 reg = tmp;
1350 }
1351 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1352 __ jccb(Assembler::equal, is_null);
1353 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1354 move_ptr(masm, tmp, body_arg);
1355 // load the length relative to the body.
1356 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1357 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1358 move32_64(masm, tmp, length_arg);
1359 __ jmpb(done);
1360 __ bind(is_null);
1361 // Pass zeros
1362 __ xorptr(tmp_reg, tmp_reg);
1363 move_ptr(masm, tmp, body_arg);
1364 move32_64(masm, tmp, length_arg);
1365 __ bind(done);
1366 }
1367
1368
1369 class ComputeMoveOrder: public StackObj {
1370 class MoveOperation: public ResourceObj {
1371 friend class ComputeMoveOrder;
1372 private:
1373 VMRegPair _src;
1374 VMRegPair _dst;
1375 int _src_index;
1376 int _dst_index;
1377 bool _processed;
1378 MoveOperation* _next;
1379 MoveOperation* _prev;
1380
1381 static int get_id(VMRegPair r) {
1382 return r.first()->value();
1383 }
1384
1385 public:
1386 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1387 _src(src)
1388 , _src_index(src_index)
1389 , _dst(dst)
1390 , _dst_index(dst_index)
1391 , _next(NULL)
1392 , _prev(NULL)
1393 , _processed(false) {
1394 }
1395
1396 VMRegPair src() const { return _src; }
1397 int src_id() const { return get_id(src()); }
1398 int src_index() const { return _src_index; }
1399 VMRegPair dst() const { return _dst; }
1400 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1401 int dst_index() const { return _dst_index; }
1402 int dst_id() const { return get_id(dst()); }
1403 MoveOperation* next() const { return _next; }
1404 MoveOperation* prev() const { return _prev; }
1405 void set_processed() { _processed = true; }
1406 bool is_processed() const { return _processed; }
1407
1408 // insert
1409 void break_cycle(VMRegPair temp_register) {
1410 // create a new store following the last store
1411 // to move from the temp_register to the original
1412 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1413
1414 // break the cycle of links and insert new_store at the end
1415 // break the reverse link.
1416 MoveOperation* p = prev();
1417 if (p->_next != NULL) {
1418 p->_next->_prev = NULL;
1419 }
1420 p->_next = new_store;
1421 new_store->_prev = p;
1422
1423 // change the original store to save it's value in the temp.
1424 set_dst(-1, temp_register);
1425 }
1426
1427 void link(GrowableArray<MoveOperation*>& killer) {
1428 // link this store in front the store that it depends on
1429 MoveOperation* n = killer.at_grow(src_id(), NULL);
1430 if (n != NULL) {
1431 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1432 _next = n;
1433 n->_prev = this;
1434 }
1435 }
1436 };
1437
1438 private:
1439 GrowableArray<MoveOperation*> edges;
1440
1441 public:
1442 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1443 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1444 // Move operations where the dest is the stack can all be
1445 // scheduled first since they can't interfere with the other moves.
1446 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1447 if (in_sig_bt[i] == T_ARRAY) {
1448 c_arg--;
1449 if (out_regs[c_arg].first()->is_stack() &&
1450 out_regs[c_arg + 1].first()->is_stack()) {
1451 arg_order.push(i);
1452 arg_order.push(c_arg);
1453 } else {
1454 if (out_regs[c_arg].first()->is_stack() ||
1455 in_regs[i].first() == out_regs[c_arg].first()) {
1456 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1457 } else {
1458 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1459 }
1460 }
1461 } else if (in_sig_bt[i] == T_VOID) {
1462 arg_order.push(i);
1463 arg_order.push(c_arg);
1464 } else {
1465 if (out_regs[c_arg].first()->is_stack() ||
1466 in_regs[i].first() == out_regs[c_arg].first()) {
1467 arg_order.push(i);
1468 arg_order.push(c_arg);
1469 } else {
1470 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1471 }
1472 }
1473 }
1474 // Break any cycles in the register moves and emit the in the
1475 // proper order.
1476 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1477 for (int i = 0; i < stores->length(); i++) {
1478 arg_order.push(stores->at(i)->src_index());
1479 arg_order.push(stores->at(i)->dst_index());
1480 }
1481 }
1482
1483 // Collected all the move operations
1484 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1485 if (src.first() == dst.first()) return;
1486 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1487 }
1488
1489 // Walk the edges breaking cycles between moves. The result list
1490 // can be walked in order to produce the proper set of loads
1491 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1492 // Record which moves kill which values
1493 GrowableArray<MoveOperation*> killer;
1494 for (int i = 0; i < edges.length(); i++) {
1495 MoveOperation* s = edges.at(i);
1496 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1497 killer.at_put_grow(s->dst_id(), s, NULL);
1498 }
1499 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1500 "make sure temp isn't in the registers that are killed");
1501
1502 // create links between loads and stores
1503 for (int i = 0; i < edges.length(); i++) {
1504 edges.at(i)->link(killer);
1505 }
1506
1507 // at this point, all the move operations are chained together
1508 // in a doubly linked list. Processing it backwards finds
1509 // the beginning of the chain, forwards finds the end. If there's
1510 // a cycle it can be broken at any point, so pick an edge and walk
1511 // backward until the list ends or we end where we started.
1512 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1513 for (int e = 0; e < edges.length(); e++) {
1514 MoveOperation* s = edges.at(e);
1515 if (!s->is_processed()) {
1516 MoveOperation* start = s;
1517 // search for the beginning of the chain or cycle
1518 while (start->prev() != NULL && start->prev() != s) {
1519 start = start->prev();
1520 }
1521 if (start->prev() == s) {
1522 start->break_cycle(temp_register);
1523 }
1524 // walk the chain forward inserting to store list
1525 while (start != NULL) {
1526 stores->append(start);
1527 start->set_processed();
1528 start = start->next();
1529 }
1530 }
1531 }
1532 return stores;
1533 }
1534 };
1535
1536
1537 // ---------------------------------------------------------------------------
1538 // Generate a native wrapper for a given method. The method takes arguments
1539 // in the Java compiled code convention, marshals them to the native
1540 // convention (handlizes oops, etc), transitions to native, makes the call,
1541 // returns to java state (possibly blocking), unhandlizes any result and
1542 // returns.
1543 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1544 methodHandle method,
1545 int compile_id,
1546 int total_in_args,
1547 int comp_args_on_stack,
1548 BasicType *in_sig_bt,
1549 VMRegPair *in_regs,
1550 BasicType ret_type) {
1551 bool is_critical_native = true;
1552 address native_func = method->critical_native_function();
1553 if (native_func == NULL) {
1554 native_func = method->native_function();
1555 is_critical_native = false;
1556 }
1637
1638 // Compute framesize for the wrapper. We need to handlize all oops in
1639 // incoming registers
1640
1641 // Calculate the total number of stack slots we will need.
1642
1643 // First count the abi requirement plus all of the outgoing args
1644 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1645
1646 // Now the space for the inbound oop handle area
1647 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
1648 if (is_critical_native) {
1649 // Critical natives may have to call out so they need a save area
1650 // for register arguments.
1651 int double_slots = 0;
1652 int single_slots = 0;
1653 for ( int i = 0; i < total_in_args; i++) {
1654 if (in_regs[i].first()->is_Register()) {
1655 const Register reg = in_regs[i].first()->as_Register();
1656 switch (in_sig_bt[i]) {
1657 case T_BOOLEAN:
1658 case T_BYTE:
1659 case T_SHORT:
1660 case T_CHAR:
1661 case T_INT: single_slots++; break;
1662 case T_ARRAY:
1663 case T_LONG: double_slots++; break;
1664 default: ShouldNotReachHere();
1665 }
1666 } else if (in_regs[i].first()->is_XMMRegister()) {
1667 switch (in_sig_bt[i]) {
1668 case T_FLOAT: single_slots++; break;
1669 case T_DOUBLE: double_slots++; break;
1670 default: ShouldNotReachHere();
1671 }
1672 } else if (in_regs[i].first()->is_FloatRegister()) {
1673 ShouldNotReachHere();
1674 }
1675 }
1676 total_save_slots = double_slots * 2 + single_slots;
1677 // align the save area
1678 if (double_slots != 0) {
1679 stack_slots = round_to(stack_slots, 2);
1680 }
1681 }
1682
1839
1840 // Mark location of rbp (someday)
1841 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
1842
1843 // Use eax, ebx as temporaries during any memory-memory moves we have to do
1844 // All inbound args are referenced based on rbp and all outbound args via rsp.
1845
1846
1847 #ifdef ASSERT
1848 bool reg_destroyed[RegisterImpl::number_of_registers];
1849 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
1850 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
1851 reg_destroyed[r] = false;
1852 }
1853 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
1854 freg_destroyed[f] = false;
1855 }
1856
1857 #endif /* ASSERT */
1858
1859 // This may iterate in two different directions depending on the
1860 // kind of native it is. The reason is that for regular JNI natives
1861 // the incoming and outgoing registers are offset upwards and for
1862 // critical natives they are offset down.
1863 GrowableArray<int> arg_order(2 * total_in_args);
1864 VMRegPair tmp_vmreg;
1865 tmp_vmreg.set1(rbx->as_VMReg());
1866
1867 if (!is_critical_native) {
1868 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1869 arg_order.push(i);
1870 arg_order.push(c_arg);
1871 }
1872 } else {
1873 // Compute a valid move order, using tmp_vmreg to break any cycles
1874 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1875 }
1876
1877 int temploc = -1;
1878 for (int ai = 0; ai < arg_order.length(); ai += 2) {
1879 int i = arg_order.at(ai);
1880 int c_arg = arg_order.at(ai + 1);
1881 __ block_comment(err_msg("move %d -> %d", i, c_arg));
1882 if (c_arg == -1) {
1883 assert(is_critical_native, "should only be required for critical natives");
1884 // This arg needs to be moved to a temporary
1885 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1886 in_regs[i] = tmp_vmreg;
1887 temploc = i;
1888 continue;
1889 } else if (i == -1) {
1890 assert(is_critical_native, "should only be required for critical natives");
1891 // Read from the temporary location
1892 assert(temploc != -1, "must be valid");
1893 i = temploc;
1894 temploc = -1;
1895 }
1896 #ifdef ASSERT
1897 if (in_regs[i].first()->is_Register()) {
1898 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1899 } else if (in_regs[i].first()->is_XMMRegister()) {
1900 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
1901 }
1902 if (out_regs[c_arg].first()->is_Register()) {
1903 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1904 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
1905 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
1906 }
1907 #endif /* ASSERT */
1908 switch (in_sig_bt[i]) {
1909 case T_ARRAY:
1910 if (is_critical_native) {
1911 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1912 c_arg++;
1913 #ifdef ASSERT
1914 if (out_regs[c_arg].first()->is_Register()) {
1915 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1935 case T_DOUBLE:
1936 assert( i + 1 < total_in_args &&
1937 in_sig_bt[i + 1] == T_VOID &&
1938 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1939 double_move(masm, in_regs[i], out_regs[c_arg]);
1940 break;
1941
1942 case T_LONG :
1943 long_move(masm, in_regs[i], out_regs[c_arg]);
1944 break;
1945
1946 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1947
1948 default:
1949 move32_64(masm, in_regs[i], out_regs[c_arg]);
1950 }
1951 }
1952
1953 // point c_arg at the first arg that is already loaded in case we
1954 // need to spill before we call out
1955 int c_arg = total_c_args - total_in_args;
1956
1957 // Pre-load a static method's oop into r14. Used both by locking code and
1958 // the normal JNI call code.
1959 if (method->is_static() && !is_critical_native) {
1960
1961 // load oop into a register
1962 __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
1963
1964 // Now handlize the static class mirror it's known not-null.
1965 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1966 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1967
1968 // Now get the handle
1969 __ lea(oop_handle_reg, Address(rsp, klass_offset));
1970 // store the klass handle as second argument
1971 __ movptr(c_rarg1, oop_handle_reg);
1972 // and protect the arg if we must spill
1973 c_arg--;
1974 }
1975
|