1 //
2 // Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
294 // R_G6: reserved by Solaris ABI to tools
295 // R_G7: reserved by Solaris ABI to libthread
296 // R_O7: Used as a temp in many encodings
297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
298
299 // Class for all integer registers, except the G registers. This is used for
300 // encodings which use G registers as temps. The regular inputs to such
301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator
302 // will not put an input into a temp register.
303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
304
305 reg_class g1_regI(R_G1);
306 reg_class g3_regI(R_G3);
307 reg_class g4_regI(R_G4);
308 reg_class o0_regI(R_O0);
309 reg_class o7_regI(R_O7);
310
311 // ----------------------------
312 // Pointer Register Classes
313 // ----------------------------
314 #ifdef _LP64
315 // 64-bit build means 64-bit pointers means hi/lo pairs
316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
320 // Lock encodings use G3 and G4 internally
321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5,
322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
325 // Special class for storeP instructions, which can store SP or RPC to TLS.
326 // It is also used for memory addressing, allowing direct TLS addressing.
327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP,
329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP );
331 // R_L7 is the lowest-priority callee-save (i.e., NS) register
332 // We use it to save R_G2 across calls out of Java.
333 reg_class l7_regP(R_L7H,R_L7);
334
335 // Other special pointer regs
336 reg_class g1_regP(R_G1H,R_G1);
337 reg_class g2_regP(R_G2H,R_G2);
338 reg_class g3_regP(R_G3H,R_G3);
339 reg_class g4_regP(R_G4H,R_G4);
340 reg_class g5_regP(R_G5H,R_G5);
341 reg_class i0_regP(R_I0H,R_I0);
342 reg_class o0_regP(R_O0H,R_O0);
343 reg_class o1_regP(R_O1H,R_O1);
344 reg_class o2_regP(R_O2H,R_O2);
345 reg_class o7_regP(R_O7H,R_O7);
346
347 #else // _LP64
348 // 32-bit build means 32-bit pointers means 1 register.
349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
353 // Lock encodings use G3 and G4 internally
354 reg_class lock_ptr_reg(R_G1, R_G5,
355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
358 // Special class for storeP instructions, which can store SP or RPC to TLS.
359 // It is also used for memory addressing, allowing direct TLS addressing.
360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
364 // R_L7 is the lowest-priority callee-save (i.e., NS) register
365 // We use it to save R_G2 across calls out of Java.
366 reg_class l7_regP(R_L7);
367
368 // Other special pointer regs
369 reg_class g1_regP(R_G1);
370 reg_class g2_regP(R_G2);
371 reg_class g3_regP(R_G3);
372 reg_class g4_regP(R_G4);
373 reg_class g5_regP(R_G5);
374 reg_class i0_regP(R_I0);
375 reg_class o0_regP(R_O0);
376 reg_class o1_regP(R_O1);
377 reg_class o2_regP(R_O2);
378 reg_class o7_regP(R_O7);
379 #endif // _LP64
380
381
382 // ----------------------------
383 // Long Register Classes
384 // ----------------------------
385 // Longs in 1 register. Aligned adjacent hi/lo pairs.
386 // Note: O7 is never in this class; it is sometimes used as an encoding temp.
387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
389 #ifdef _LP64
390 // 64-bit, longs in 1 register: use all 64-bit integer registers
391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
394 #endif // _LP64
395 );
396
397 reg_class g1_regL(R_G1H,R_G1);
398 reg_class g3_regL(R_G3H,R_G3);
399 reg_class o2_regL(R_O2H,R_O2);
400 reg_class o7_regL(R_O7H,R_O7);
401
402 // ----------------------------
403 // Special Class for Condition Code Flags Register
404 reg_class int_flags(CCR);
405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3);
406 reg_class float_flag0(FCC0);
407
408
409 // ----------------------------
410 // Float Point Register Classes
411 // ----------------------------
412 // Skip F30/F31, they are reserved for mem-mem copies
413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
414
516
517 // tertiary op of a LoadP or StoreP encoding
518 #define REGP_OP true
519
520 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding);
521 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding);
522 static Register reg_to_register_object(int register_encoding);
523
524 // Used by the DFA in dfa_sparc.cpp.
525 // Check for being able to use a V9 branch-on-register. Requires a
526 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign-
527 // extended. Doesn't work following an integer ADD, for example, because of
528 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On
529 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and
530 // replace them with zero, which could become sign-extension in a different OS
531 // release. There's no obvious reason why an interrupt will ever fill these
532 // bits with non-zero junk (the registers are reloaded with standard LD
533 // instructions which either zero-fill or sign-fill).
534 bool can_branch_register( Node *bol, Node *cmp ) {
535 if( !BranchOnRegister ) return false;
536 #ifdef _LP64
537 if( cmp->Opcode() == Op_CmpP )
538 return true; // No problems with pointer compares
539 #endif
540 if( cmp->Opcode() == Op_CmpL )
541 return true; // No problems with long compares
542
543 if( !SparcV9RegsHiBitsZero ) return false;
544 if( bol->as_Bool()->_test._test != BoolTest::ne &&
545 bol->as_Bool()->_test._test != BoolTest::eq )
546 return false;
547
548 // Check for comparing against a 'safe' value. Any operation which
549 // clears out the high word is safe. Thus, loads and certain shifts
550 // are safe, as are non-negative constants. Any operation which
551 // preserves zero bits in the high word is safe as long as each of its
552 // inputs are safe. Thus, phis and bitwise booleans are safe if their
553 // inputs are safe. At present, the only important case to recognize
554 // seems to be loads. Constants should fold away, and shifts &
555 // logicals can use the 'cc' forms.
556 Node *x = cmp->in(1);
557 if( x->is_Load() ) return true;
558 if( x->is_Phi() ) {
559 for( uint i = 1; i < x->req(); i++ )
600 int klass_load_size;
601 if (UseCompressedClassPointers) {
602 assert(Universe::heap() != NULL, "java heap should be initialized");
603 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
604 } else {
605 klass_load_size = 1*BytesPerInstWord;
606 }
607 if (Assembler::is_simm13(v_off)) {
608 return klass_load_size +
609 (2*BytesPerInstWord + // ld_ptr, ld_ptr
610 NativeCall::instruction_size); // call; delay slot
611 } else {
612 return klass_load_size +
613 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
614 NativeCall::instruction_size); // call; delay slot
615 }
616 }
617 }
618
619 int MachCallRuntimeNode::ret_addr_offset() {
620 #ifdef _LP64
621 if (MacroAssembler::is_far_target(entry_point())) {
622 return NativeFarCall::instruction_size;
623 } else {
624 return NativeCall::instruction_size;
625 }
626 #else
627 return NativeCall::instruction_size; // call; delay slot
628 #endif
629 }
630
631 // Indicate if the safepoint node needs the polling page as an input.
632 // Since Sparc does not have absolute addressing, it does.
633 bool SafePointNode::needs_polling_address_input() {
634 return true;
635 }
636
637 // emit an interrupt that is caught by the debugger (for debugging compiler)
638 void emit_break(CodeBuffer &cbuf) {
639 MacroAssembler _masm(&cbuf);
640 __ breakpoint_trap();
641 }
642
643 #ifndef PRODUCT
644 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
645 st->print("TA");
646 }
647 #endif
648
1007 MacroAssembler _masm(&cbuf);
1008 __ set_inst_mark();
1009
1010 // We flush the current window just so that there is a valid stack copy
1011 // the fact that the current window becomes active again instantly is
1012 // not a problem there is nothing live in it.
1013
1014 #ifdef ASSERT
1015 int startpos = __ offset();
1016 #endif /* ASSERT */
1017
1018 __ call((address)entry_point, rspec);
1019
1020 if (preserve_g2) __ delayed()->mov(G2, L7);
1021 else __ delayed()->nop();
1022
1023 if (preserve_g2) __ mov(L7, G2);
1024
1025 #ifdef ASSERT
1026 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
1027 #ifdef _LP64
1028 // Trash argument dump slots.
1029 __ set(0xb0b8ac0db0b8ac0d, G1);
1030 __ mov(G1, G5);
1031 __ stx(G1, SP, STACK_BIAS + 0x80);
1032 __ stx(G1, SP, STACK_BIAS + 0x88);
1033 __ stx(G1, SP, STACK_BIAS + 0x90);
1034 __ stx(G1, SP, STACK_BIAS + 0x98);
1035 __ stx(G1, SP, STACK_BIAS + 0xA0);
1036 __ stx(G1, SP, STACK_BIAS + 0xA8);
1037 #else // _LP64
1038 // this is also a native call, so smash the first 7 stack locations,
1039 // and the various registers
1040
1041 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
1042 // while [SP+0x44..0x58] are the argument dump slots.
1043 __ set((intptr_t)0xbaadf00d, G1);
1044 __ mov(G1, G5);
1045 __ sllx(G1, 32, G1);
1046 __ or3(G1, G5, G1);
1047 __ mov(G1, G5);
1048 __ stx(G1, SP, 0x40);
1049 __ stx(G1, SP, 0x48);
1050 __ stx(G1, SP, 0x50);
1051 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
1052 #endif // _LP64
1053 }
1054 #endif /*ASSERT*/
1055 }
1056
1057 //=============================================================================
1058 // REQUIRED FUNCTIONALITY for encoding
1059 void emit_lo(CodeBuffer &cbuf, int val) { }
1060 void emit_hi(CodeBuffer &cbuf, int val) { }
1061
1062
1063 //=============================================================================
1064 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
1065
1066 int Compile::ConstantTable::calculate_table_base_offset() const {
1067 if (UseRDPCForConstantTableBase) {
1068 // The table base offset might be less but then it fits into
1069 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit).
1070 return Assembler::min_simm13();
1071 } else {
1072 int offset = -(size() / 2);
1245 Compile::ConstantTable& constant_table = C->constant_table();
1246 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1247 }
1248 }
1249
1250 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1251 return MachNode::size(ra_);
1252 }
1253
1254 int MachPrologNode::reloc() const {
1255 return 10; // a large enough number
1256 }
1257
1258 //=============================================================================
1259 #ifndef PRODUCT
1260 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1261 Compile* C = ra_->C;
1262
1263 if(do_polling() && ra_->C->is_method_compilation()) {
1264 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
1265 #ifdef _LP64
1266 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
1267 #else
1268 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
1269 #endif
1270 }
1271
1272 if(do_polling()) {
1273 if (UseCBCond && !ra_->C->is_method_compilation()) {
1274 st->print("NOP\n\t");
1275 }
1276 st->print("RET\n\t");
1277 }
1278
1279 st->print("RESTORE");
1280 }
1281 #endif
1282
1283 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1284 MacroAssembler _masm(&cbuf);
1285 Compile* C = ra_->C;
1286
1287 __ verify_thread();
1288
1289 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1455 return;
1456 }
1457 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mstouw_opf, "MOVSTOUW", st);
1458 }
1459 // Check for int->float copy on T4
1460 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
1461 // Further check for aligned-adjacent pair, so we can use a double move
1462 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1463 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mxtod_opf, "MOVXTOD", st);
1464 return;
1465 }
1466 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mwtos_opf, "MOVWTOS", st);
1467 }
1468
1469 // --------------------------------------
1470 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
1471 // In such cases, I have to do the big-endian swap. For aligned targets, the
1472 // hardware does the flop for me. Doubles are always aligned, so no problem
1473 // there. Misaligned sources only come from native-long-returns (handled
1474 // special below).
1475 #ifndef _LP64
1476 if (src_first_rc == rc_int && // source is already big-endian
1477 src_second_rc != rc_bad && // 64-bit move
1478 ((dst_first & 1) != 0 || dst_second != dst_first + 1)) { // misaligned dst
1479 assert((src_first & 1) == 0 && src_second == src_first + 1, "source must be aligned");
1480 // Do the big-endian flop.
1481 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
1482 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
1483 }
1484 #endif
1485
1486 // --------------------------------------
1487 // Check for integer reg-reg copy
1488 if (src_first_rc == rc_int && dst_first_rc == rc_int) {
1489 #ifndef _LP64
1490 if (src_first == R_O0_num && src_second == R_O1_num) { // Check for the evil O0/O1 native long-return case
1491 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1492 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1493 // operand contains the least significant word of the 64-bit value and vice versa.
1494 OptoReg::Name tmp = OptoReg::Name(R_O7_num);
1495 assert((dst_first & 1) == 0 && dst_second == dst_first + 1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
1496 // Shift O0 left in-place, zero-extend O1, then OR them into the dst
1497 if ( cbuf ) {
1498 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020);
1499 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000);
1500 emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second]);
1501 #ifndef PRODUCT
1502 } else {
1503 print_helper(st, "SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
1504 print_helper(st, "SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
1505 print_helper(st, "OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
1506 #endif
1507 }
1508 return;
1509 } else if (dst_first == R_I0_num && dst_second == R_I1_num) {
1510 // returning a long value in I0/I1
1511 // a SpillCopy must be able to target a return instruction's reg_class
1512 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1513 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1514 // operand contains the least significant word of the 64-bit value and vice versa.
1515 OptoReg::Name tdest = dst_first;
1516
1517 if (src_first == dst_first) {
1518 tdest = OptoReg::Name(R_O7_num);
1519 }
1520
1521 if (cbuf) {
1522 assert((src_first & 1) == 0 && (src_first + 1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
1523 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
1524 // ShrL_reg_imm6
1525 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000);
1526 // ShrR_reg_imm6 src, 0, dst
1527 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000);
1528 if (tdest != dst_first) {
1529 emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest]);
1530 }
1531 }
1532 #ifndef PRODUCT
1533 else {
1534 print_helper(st, "SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
1535 print_helper(st, "SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
1536 if (tdest != dst_first) {
1537 print_helper(st, "MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
1538 }
1539 }
1540 #endif // PRODUCT
1541 return size+8;
1542 }
1543 #endif // !_LP64
1544 // Else normal reg-reg copy
1545 assert(src_second != dst_first, "smashed second before evacuating it");
1546 impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV ", st);
1547 assert((src_first & 1) == 0 && (dst_first & 1) == 0, "never move second-halves of int registers");
1548 // This moves an aligned adjacent pair.
1549 // See if we are done.
1550 if (src_first + 1 == src_second && dst_first + 1 == dst_second) {
1551 return;
1552 }
1553 }
1554
1555 // Check for integer store
1556 if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
1557 int offset = ra_->reg2offset(dst_first);
1558 // Further check for aligned-adjacent pair, so we can use a double store
1559 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1560 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stx_op3, "STX ", st);
1561 return;
1562 }
1563 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stw_op3, "STW ", st);
1597
1598 // Check for float load
1599 if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
1600 int offset = ra_->reg2offset(src_first);
1601 // Further check for aligned-adjacent pair, so we can use a double load
1602 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1603 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lddf_op3, "LDDF", st);
1604 return;
1605 }
1606 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::ldf_op3, "LDF ", st);
1607 }
1608
1609 // --------------------------------------------------------------------
1610 // Check for hi bits still needing moving. Only happens for misaligned
1611 // arguments to native calls.
1612 if (src_second == dst_second) {
1613 return; // Self copy; no move
1614 }
1615 assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad");
1616
1617 #ifndef _LP64
1618 // In the LP64 build, all registers can be moved as aligned/adjacent
1619 // pairs, so there's never any need to move the high bits separately.
1620 // The 32-bit builds have to deal with the 32-bit ABI which can force
1621 // all sorts of silly alignment problems.
1622
1623 // Check for integer reg-reg copy. Hi bits are stuck up in the top
1624 // 32-bits of a 64-bit register, but are needed in low bits of another
1625 // register (else it's a hi-bits-to-hi-bits copy which should have
1626 // happened already as part of a 64-bit move)
1627 if (src_second_rc == rc_int && dst_second_rc == rc_int) {
1628 assert((src_second & 1) == 1, "its the evil O0/O1 native return case");
1629 assert((dst_second & 1) == 0, "should have moved with 1 64-bit move");
1630 // Shift src_second down to dst_second's low bits.
1631 if (cbuf) {
1632 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
1633 #ifndef PRODUCT
1634 } else {
1635 print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second - 1), OptoReg::regname(dst_second));
1636 #endif
1637 }
1638 return;
1639 }
1640
1641 // Check for high word integer store. Must down-shift the hi bits
1642 // into a temp register, then fall into the case of storing int bits.
1643 if (src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second & 1) == 1) {
1644 // Shift src_second down to dst_second's low bits.
1645 if (cbuf) {
1646 emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
1647 #ifndef PRODUCT
1648 } else {
1649 print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second-1), OptoReg::regname(R_O7_num));
1650 #endif
1651 }
1652 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
1653 }
1654
1655 // Check for high word integer load
1656 if (dst_second_rc == rc_int && src_second_rc == rc_stack)
1657 return impl_helper(this, cbuf, ra_, true, ra_->reg2offset(src_second), dst_second, Assembler::lduw_op3, "LDUW", size, st);
1658
1659 // Check for high word integer store
1660 if (src_second_rc == rc_int && dst_second_rc == rc_stack)
1661 return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stw_op3, "STW ", size, st);
1662
1663 // Check for high word float store
1664 if (src_second_rc == rc_float && dst_second_rc == rc_stack)
1665 return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stf_op3, "STF ", size, st);
1666
1667 #endif // !_LP64
1668
1669 Unimplemented();
1670 }
1671
1672 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf,
1673 PhaseRegAlloc *ra_,
1674 bool do_size,
1675 outputStream* st) const {
1676 assert(!do_size, "not supported");
1677 mach_spill_copy_implementation_helper(this, cbuf, ra_, st);
1678 return 0;
1679 }
1680
1681 #ifndef PRODUCT
1682 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1683 implementation( NULL, ra_, false, st );
1684 }
1685 #endif
1686
1687 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1688 implementation( &cbuf, ra_, false, NULL );
1726 int reg = ra_->get_encode(this);
1727
1728 if (Assembler::is_simm13(offset)) {
1729 __ add(SP, offset, reg_to_register_object(reg));
1730 } else {
1731 __ set(offset, O7);
1732 __ add(SP, O7, reg_to_register_object(reg));
1733 }
1734 }
1735
1736 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1737 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1738 assert(ra_ == ra_->C->regalloc(), "sanity");
1739 return ra_->C->scratch_emit_size(this);
1740 }
1741
1742 //=============================================================================
1743 #ifndef PRODUCT
1744 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1745 st->print_cr("\nUEP:");
1746 #ifdef _LP64
1747 if (UseCompressedClassPointers) {
1748 assert(Universe::heap() != NULL, "java heap should be initialized");
1749 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
1750 if (Universe::narrow_klass_base() != 0) {
1751 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
1752 if (Universe::narrow_klass_shift() != 0) {
1753 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
1754 }
1755 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
1756 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base");
1757 } else {
1758 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
1759 }
1760 } else {
1761 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1762 }
1763 st->print_cr("\tCMP R_G5,R_G3" );
1764 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1765 #else // _LP64
1766 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1767 st->print_cr("\tCMP R_G5,R_G3" );
1768 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
1769 #endif // _LP64
1770 }
1771 #endif
1772
1773 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1774 MacroAssembler _masm(&cbuf);
1775 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1776 Register temp_reg = G3;
1777 assert( G5_ic_reg != temp_reg, "conflicting registers" );
1778
1779 // Load klass from receiver
1780 __ load_klass(O0, temp_reg);
1781 // Compare against expected klass
1782 __ cmp(temp_reg, G5_ic_reg);
1783 // Branch to miss code, checks xcc or icc depending
1784 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2);
1785 }
1786
1787 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1788 return MachNode::size(ra_);
1789 }
1857 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) {
1858 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding");
1859 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding");
1860 return as_DoubleFloatRegister(register_encoding);
1861 }
1862
1863 const bool Matcher::match_rule_supported(int opcode) {
1864 if (!has_match_rule(opcode))
1865 return false;
1866
1867 switch (opcode) {
1868 case Op_CountLeadingZerosI:
1869 case Op_CountLeadingZerosL:
1870 case Op_CountTrailingZerosI:
1871 case Op_CountTrailingZerosL:
1872 case Op_PopCountI:
1873 case Op_PopCountL:
1874 if (!UsePopCountInstruction)
1875 return false;
1876 case Op_CompareAndSwapL:
1877 #ifdef _LP64
1878 case Op_CompareAndSwapP:
1879 #endif
1880 if (!VM_Version::supports_cx8())
1881 return false;
1882 break;
1883 }
1884
1885 return true; // Per default match rules are supported.
1886 }
1887
1888 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
1889
1890 // TODO
1891 // identify extra cases that we might want to provide match rules for
1892 // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
1893 bool ret_value = match_rule_supported(opcode);
1894 // Add rules here.
1895
1896 return ret_value; // Per default match rules are supported.
1897 }
1898
1899 const bool Matcher::has_predicated_vectors(void) {
2010 return true;
2011 }
2012
2013 bool Matcher::const_klass_prefer_decode() {
2014 // TODO: Check if loading ConP from TOC in heap-based mode is better:
2015 // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
2016 // return Universe::narrow_klass_base() == NULL;
2017 return true;
2018 }
2019
2020 // Is it better to copy float constants, or load them directly from memory?
2021 // Intel can load a float constant from a direct address, requiring no
2022 // extra registers. Most RISCs will have to materialize an address into a
2023 // register first, so they would do better to copy the constant from stack.
2024 const bool Matcher::rematerialize_float_constants = false;
2025
2026 // If CPU can load and store mis-aligned doubles directly then no fixup is
2027 // needed. Else we split the double into 2 integer pieces and move it
2028 // piece-by-piece. Only happens when passing doubles into C code as the
2029 // Java calling convention forces doubles to be aligned.
2030 #ifdef _LP64
2031 const bool Matcher::misaligned_doubles_ok = true;
2032 #else
2033 const bool Matcher::misaligned_doubles_ok = false;
2034 #endif
2035
2036 // No-op on SPARC.
2037 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
2038 }
2039
2040 // Advertise here if the CPU requires explicit rounding operations
2041 // to implement the UseStrictFP mode.
2042 const bool Matcher::strict_fp_requires_explicit_rounding = false;
2043
2044 // Are floats converted to double when stored to stack during deoptimization?
2045 // Sparc does not handle callee-save floats.
2046 bool Matcher::float_in_double() { return false; }
2047
2048 // Do ints take an entire long register or just half?
2049 // Note that we if-def off of _LP64.
2050 // The relevant question is how the int is callee-saved. In _LP64
2051 // the whole long is written but de-opt'ing will have to extract
2052 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
2053 #ifdef _LP64
2054 const bool Matcher::int_in_long = true;
2055 #else
2056 const bool Matcher::int_in_long = false;
2057 #endif
2058
2059 // Return whether or not this register is ever used as an argument. This
2060 // function is used on startup to build the trampoline stubs in generateOptoStub.
2061 // Registers not mentioned will be killed by the VM call in the trampoline, and
2062 // arguments in those registers not be available to the callee.
2063 bool Matcher::can_be_java_arg( int reg ) {
2064 // Standard sparc 6 args in registers
2065 if( reg == R_I0_num ||
2066 reg == R_I1_num ||
2067 reg == R_I2_num ||
2068 reg == R_I3_num ||
2069 reg == R_I4_num ||
2070 reg == R_I5_num ) return true;
2071 #ifdef _LP64
2072 // 64-bit builds can pass 64-bit pointers and longs in
2073 // the high I registers
2074 if( reg == R_I0H_num ||
2075 reg == R_I1H_num ||
2076 reg == R_I2H_num ||
2077 reg == R_I3H_num ||
2078 reg == R_I4H_num ||
2079 reg == R_I5H_num ) return true;
2080
2081 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
2082 return true;
2083 }
2084
2085 #else
2086 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
2087 // Longs cannot be passed in O regs, because O regs become I regs
2088 // after a 'save' and I regs get their high bits chopped off on
2089 // interrupt.
2090 if( reg == R_G1H_num || reg == R_G1_num ) return true;
2091 if( reg == R_G4H_num || reg == R_G4_num ) return true;
2092 #endif
2093 // A few float args in registers
2094 if( reg >= R_F0_num && reg <= R_F7_num ) return true;
2095
2096 return false;
2097 }
2098
2099 bool Matcher::is_spillable_arg( int reg ) {
2100 return can_be_java_arg(reg);
2101 }
2102
2103 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
2104 // Use hardware SDIVX instruction when it is
2105 // faster than a code which use multiply.
2106 return VM_Version::has_fast_idiv();
2107 }
2108
2109 // Register for DIVI projection of divmodI
2110 RegMask Matcher::divI_proj_mask() {
2111 ShouldNotReachHere();
2112 return RegMask();
2135 }
2136
2137
2138 const bool Matcher::convi2l_type_required = true;
2139
2140 // Should the Matcher clone shifts on addressing modes, expecting them
2141 // to be subsumed into complex addressing expressions or compute them
2142 // into registers?
2143 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2144 return clone_base_plus_offset_address(m, mstack, address_visited);
2145 }
2146
2147 void Compile::reshape_address(AddPNode* addp) {
2148 }
2149
2150 %}
2151
2152
2153 // The intptr_t operand types, defined by textual substitution.
2154 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
2155 #ifdef _LP64
2156 #define immX immL
2157 #define immX13 immL13
2158 #define immX13m7 immL13m7
2159 #define iRegX iRegL
2160 #define g1RegX g1RegL
2161 #else
2162 #define immX immI
2163 #define immX13 immI13
2164 #define immX13m7 immI13m7
2165 #define iRegX iRegI
2166 #define g1RegX g1RegI
2167 #endif
2168
2169 //----------ENCODING BLOCK-----------------------------------------------------
2170 // This block specifies the encoding classes used by the compiler to output
2171 // byte streams. Encoding classes are parameterized macros used by
2172 // Machine Instruction Nodes in order to generate the bit encoding of the
2173 // instruction. Operands specify their base encoding interface with the
2174 // interface keyword. There are currently supported four interfaces,
2175 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
2176 // operand to generate a function which returns its register number when
2177 // queried. CONST_INTER causes an operand to generate a function which
2178 // returns the value of the constant when queried. MEMORY_INTER causes an
2179 // operand to generate four functions which return the Base Register, the
2180 // Index Register, the Scale Value, and the Offset Value of the operand when
2181 // queried. COND_INTER causes an operand to generate six functions which
2182 // return the encoding code (ie - encoding bits for the instruction)
2183 // associated with each basic boolean condition for a conditional instruction.
2184 //
2185 // Instructions specify two basic values for encoding. Again, a function
2186 // is available to check if the constant displacement is an oop. They use the
2187 // ins_encode keyword to specify their encoding classes (which must be
2309 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{
2310 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F );
2311 %}
2312
2313 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{
2314 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 );
2315 %}
2316
2317 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{
2318 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg );
2319 %}
2320
2321 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{
2322 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant );
2323 %}
2324
2325 enc_class move_return_pc_to_o1() %{
2326 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
2327 %}
2328
2329 #ifdef _LP64
2330 /* %%% merge with enc_to_bool */
2331 enc_class enc_convP2B( iRegI dst, iRegP src ) %{
2332 MacroAssembler _masm(&cbuf);
2333
2334 Register src_reg = reg_to_register_object($src$$reg);
2335 Register dst_reg = reg_to_register_object($dst$$reg);
2336 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
2337 %}
2338 #endif
2339
2340 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
2341 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
2342 MacroAssembler _masm(&cbuf);
2343
2344 Register p_reg = reg_to_register_object($p$$reg);
2345 Register q_reg = reg_to_register_object($q$$reg);
2346 Register y_reg = reg_to_register_object($y$$reg);
2347 Register tmp_reg = reg_to_register_object($tmp$$reg);
2348
2349 __ subcc( p_reg, q_reg, p_reg );
2350 __ add ( p_reg, y_reg, tmp_reg );
2351 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg );
2352 %}
2353
2354 enc_class form_d2i_helper(regD src, regF dst) %{
2355 // fcmp %fcc0,$src,$src
2356 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2357 // branch %fcc0 not-nan, predict taken
2358 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2609 enc_class Set32( immI src, iRegI rd ) %{
2610 MacroAssembler _masm(&cbuf);
2611 __ set($src$$constant, reg_to_register_object($rd$$reg));
2612 %}
2613
2614 enc_class call_epilog %{
2615 if( VerifyStackAtCalls ) {
2616 MacroAssembler _masm(&cbuf);
2617 int framesize = ra_->C->frame_size_in_bytes();
2618 Register temp_reg = G3;
2619 __ add(SP, framesize, temp_reg);
2620 __ cmp(temp_reg, FP);
2621 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc);
2622 }
2623 %}
2624
2625 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value
2626 // to G1 so the register allocator will not have to deal with the misaligned register
2627 // pair.
2628 enc_class adjust_long_from_native_call %{
2629 #ifndef _LP64
2630 if (returns_long()) {
2631 // sllx O0,32,O0
2632 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
2633 // srl O1,0,O1
2634 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
2635 // or O0,O1,G1
2636 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
2637 }
2638 #endif
2639 %}
2640
2641 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
2642 // CALL directly to the runtime
2643 // The user of this is responsible for ensuring that R_L7 is empty (killed).
2644 emit_call_reloc(cbuf, $meth$$method, runtime_call_Relocation::spec(), /*preserve_g2=*/true);
2645 %}
2646
2647 enc_class preserve_SP %{
2648 MacroAssembler _masm(&cbuf);
2649 __ mov(SP, L7_mh_SP_save);
2650 %}
2651
2652 enc_class restore_SP %{
2653 MacroAssembler _masm(&cbuf);
2654 __ mov(L7_mh_SP_save, SP);
2655 %}
2656
2657 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
2658 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3085 // varargs C calling conventions.
3086 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3087 // even aligned with pad0 as needed.
3088 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3089 // region 6-11 is even aligned; it may be padded out more so that
3090 // the region from SP to FP meets the minimum stack alignment.
3091
3092 frame %{
3093 // What direction does stack grow in (assumed to be same for native & Java)
3094 stack_direction(TOWARDS_LOW);
3095
3096 // These two registers define part of the calling convention
3097 // between compiled code and the interpreter.
3098 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C
3099 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter
3100
3101 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3102 cisc_spilling_operand_name(indOffset);
3103
3104 // Number of stack slots consumed by a Monitor enter
3105 #ifdef _LP64
3106 sync_stack_slots(2);
3107 #else
3108 sync_stack_slots(1);
3109 #endif
3110
3111 // Compiled code's Frame Pointer
3112 frame_pointer(R_SP);
3113
3114 // Stack alignment requirement
3115 stack_alignment(StackAlignmentInBytes);
3116 // LP64: Alignment size in bytes (128-bit -> 16 bytes)
3117 // !LP64: Alignment size in bytes (64-bit -> 8 bytes)
3118
3119 // Number of stack slots between incoming argument block and the start of
3120 // a new frame. The PROLOG must add this many slots to the stack. The
3121 // EPILOG must remove this many slots.
3122 in_preserve_stack_slots(0);
3123
3124 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3125 // for calls to C. Supports the var-args backing area for register parms.
3126 // ADLC doesn't support parsing expressions, so I folded the math by hand.
3127 #ifdef _LP64
3128 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
3129 varargs_C_out_slots_killed(12);
3130 #else
3131 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
3132 varargs_C_out_slots_killed( 7);
3133 #endif
3134
3135 // The after-PROLOG location of the return address. Location of
3136 // return address specifies a type (REG or STACK) and a number
3137 // representing the register number (i.e. - use a register name) or
3138 // stack slot.
3139 return_addr(REG R_I7); // Ret Addr is in register I7
3140
3141 // Body of function which returns an OptoRegs array locating
3142 // arguments either in registers or in stack slots for calling
3143 // java
3144 calling_convention %{
3145 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
3146
3147 %}
3148
3149 // Body of function which returns an OptoRegs array locating
3150 // arguments either in registers or in stack slots for calling
3151 // C.
3152 c_calling_convention %{
3153 // This is obviously always outgoing
3154 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3155 %}
3156
3157 // Location of native (C/C++) and interpreter return values. This is specified to
3158 // be the same as Java. In the 32-bit VM, long values are actually returned from
3159 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying
3160 // to and from the register pairs is done by the appropriate call and epilog
3161 // opcodes. This simplifies the register allocator.
3162 c_return_value %{
3163 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3164 #ifdef _LP64
3165 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3166 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3167 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3168 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3169 #else // !_LP64
3170 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3171 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3172 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3173 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3174 #endif
3175 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3176 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3177 %}
3178
3179 // Location of compiled Java return values. Same as C
3180 return_value %{
3181 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3182 #ifdef _LP64
3183 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3184 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3185 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3186 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3187 #else // !_LP64
3188 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3189 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3190 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3191 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3192 #endif
3193 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3194 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3195 %}
3196
3197 %}
3198
3199
3200 //----------ATTRIBUTES---------------------------------------------------------
3201 //----------Operand Attributes-------------------------------------------------
3202 op_attrib op_cost(1); // Required cost attribute
3203
3204 //----------Instruction Attributes---------------------------------------------
3205 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
3206 ins_attrib ins_size(32); // Required size attribute (in bits)
3207
3208 // avoid_back_to_back attribute is an expression that must return
3209 // one of the following values defined in MachNode:
3210 // AVOID_NONE - instruction can be placed anywhere
3211 // AVOID_BEFORE - instruction cannot be placed after an
3212 // instruction with MachNode::AVOID_AFTER
3427 // Long Immediate: the value FFFF
3428 operand immL_FFFF() %{
3429 predicate( n->get_long() == 0xFFFFL );
3430 match(ConL);
3431 op_cost(0);
3432
3433 format %{ %}
3434 interface(CONST_INTER);
3435 %}
3436
3437 // Pointer Immediate: 32 or 64-bit
3438 operand immP() %{
3439 match(ConP);
3440
3441 op_cost(5);
3442 // formats are generated automatically for constants and base registers
3443 format %{ %}
3444 interface(CONST_INTER);
3445 %}
3446
3447 #ifdef _LP64
3448 // Pointer Immediate: 64-bit
3449 operand immP_set() %{
3450 predicate(!VM_Version::is_niagara_plus());
3451 match(ConP);
3452
3453 op_cost(5);
3454 // formats are generated automatically for constants and base registers
3455 format %{ %}
3456 interface(CONST_INTER);
3457 %}
3458
3459 // Pointer Immediate: 64-bit
3460 // From Niagara2 processors on a load should be better than materializing.
3461 operand immP_load() %{
3462 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
3463 match(ConP);
3464
3465 op_cost(5);
3466 // formats are generated automatically for constants and base registers
3467 format %{ %}
3468 interface(CONST_INTER);
3469 %}
3470
3471 // Pointer Immediate: 64-bit
3472 operand immP_no_oop_cheap() %{
3473 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
3474 match(ConP);
3475
3476 op_cost(5);
3477 // formats are generated automatically for constants and base registers
3478 format %{ %}
3479 interface(CONST_INTER);
3480 %}
3481 #endif
3482
3483 operand immP13() %{
3484 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
3485 match(ConP);
3486 op_cost(0);
3487
3488 format %{ %}
3489 interface(CONST_INTER);
3490 %}
3491
3492 operand immP0() %{
3493 predicate(n->get_ptr() == 0);
3494 match(ConP);
3495 op_cost(0);
3496
3497 format %{ %}
3498 interface(CONST_INTER);
3499 %}
3500
3501 operand immP_poll() %{
3902 match(RegFlags);
3903
3904 format %{ "ccr" %} // both ICC and XCC
3905 interface(REG_INTER);
3906 %}
3907
3908 // Condition Code Register, unsigned comparisons.
3909 operand flagsRegU() %{
3910 constraint(ALLOC_IN_RC(int_flags));
3911 match(RegFlags);
3912
3913 format %{ "icc_U" %}
3914 interface(REG_INTER);
3915 %}
3916
3917 // Condition Code Register, pointer comparisons.
3918 operand flagsRegP() %{
3919 constraint(ALLOC_IN_RC(int_flags));
3920 match(RegFlags);
3921
3922 #ifdef _LP64
3923 format %{ "xcc_P" %}
3924 #else
3925 format %{ "icc_P" %}
3926 #endif
3927 interface(REG_INTER);
3928 %}
3929
3930 // Condition Code Register, long comparisons.
3931 operand flagsRegL() %{
3932 constraint(ALLOC_IN_RC(int_flags));
3933 match(RegFlags);
3934
3935 format %{ "xcc_L" %}
3936 interface(REG_INTER);
3937 %}
3938
3939 // Condition Code Register, floating comparisons, unordered same as "less".
3940 operand flagsRegF() %{
3941 constraint(ALLOC_IN_RC(float_flags));
3942 match(RegFlags);
3943 match(flagsRegF0);
3944
3945 format %{ %}
3946 interface(REG_INTER);
4483 src : R(read);
4484 IALU : R;
4485 %}
4486
4487 // Integer ALU reg conditional operation
4488 // This instruction has a 1 cycle stall, and cannot execute
4489 // in the same cycle as the instruction setting the condition
4490 // code. We kludge this by pretending to read the condition code
4491 // 1 cycle earlier, and by marking the functional units as busy
4492 // for 2 cycles with the result available 1 cycle later than
4493 // is really the case.
4494 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
4495 single_instruction;
4496 op2_out : C(write);
4497 op1 : R(read);
4498 cr : R(read); // This is really E, with a 1 cycle stall
4499 BR : R(2);
4500 MS : R(2);
4501 %}
4502
4503 #ifdef _LP64
4504 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
4505 instruction_count(1); multiple_bundles;
4506 dst : C(write)+1;
4507 src : R(read)+1;
4508 IALU : R(1);
4509 BR : E(2);
4510 MS : E(2);
4511 %}
4512 #endif
4513
4514 // Integer ALU reg operation
4515 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
4516 single_instruction; may_have_no_code;
4517 dst : E(write);
4518 src : R(read);
4519 IALU : R;
4520 %}
4521 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
4522 single_instruction; may_have_no_code;
4523 dst : E(write);
4524 src : R(read);
4525 IALU : R;
4526 %}
4527
4528 // Two integer ALU reg operations
4529 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
4530 instruction_count(2);
4531 dst : E(write);
4532 src : R(read);
4597 dst : E(write)+1;
4598 IALU : R(2);
4599 %}
4600
4601 // Long Constant
4602 pipe_class loadConL( iRegL dst, immL src ) %{
4603 instruction_count(2); multiple_bundles;
4604 dst : E(write)+1;
4605 IALU : R(2);
4606 IALU : R(2);
4607 %}
4608
4609 // Pointer Constant
4610 pipe_class loadConP( iRegP dst, immP src ) %{
4611 instruction_count(0); multiple_bundles;
4612 fixed_latency(6);
4613 %}
4614
4615 // Polling Address
4616 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
4617 #ifdef _LP64
4618 instruction_count(0); multiple_bundles;
4619 fixed_latency(6);
4620 #else
4621 dst : E(write);
4622 IALU : R;
4623 #endif
4624 %}
4625
4626 // Long Constant small
4627 pipe_class loadConLlo( iRegL dst, immL src ) %{
4628 instruction_count(2);
4629 dst : E(write);
4630 IALU : R;
4631 IALU : R;
4632 %}
4633
4634 // [PHH] This is wrong for 64-bit. See LdImmF/D.
4635 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{
4636 instruction_count(1); multiple_bundles;
4637 src : R(read);
4638 dst : M(write)+1;
4639 IALU : R;
4640 MS : E;
4641 %}
4642
4643 // Integer ALU nop operation
5344 match(Set dst src);
5345
5346 ins_cost(MEMORY_REF_COST);
5347 format %{ "LDX $src,$dst\t! long" %}
5348 opcode(Assembler::ldx_op3);
5349 ins_encode(simple_form3_mem_reg( src, dst ) );
5350 ins_pipe(iload_mem);
5351 %}
5352
5353 // Store long to stack slot
5354 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
5355 match(Set dst src);
5356
5357 ins_cost(MEMORY_REF_COST);
5358 format %{ "STX $src,$dst\t! long" %}
5359 opcode(Assembler::stx_op3);
5360 ins_encode(simple_form3_mem_reg( dst, src ) );
5361 ins_pipe(istore_mem_reg);
5362 %}
5363
5364 #ifdef _LP64
5365 // Load pointer from stack slot, 64-bit encoding
5366 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5367 match(Set dst src);
5368 ins_cost(MEMORY_REF_COST);
5369 format %{ "LDX $src,$dst\t!ptr" %}
5370 opcode(Assembler::ldx_op3);
5371 ins_encode(simple_form3_mem_reg( src, dst ) );
5372 ins_pipe(iload_mem);
5373 %}
5374
5375 // Store pointer to stack slot
5376 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5377 match(Set dst src);
5378 ins_cost(MEMORY_REF_COST);
5379 format %{ "STX $src,$dst\t!ptr" %}
5380 opcode(Assembler::stx_op3);
5381 ins_encode(simple_form3_mem_reg( dst, src ) );
5382 ins_pipe(istore_mem_reg);
5383 %}
5384 #else // _LP64
5385 // Load pointer from stack slot, 32-bit encoding
5386 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5387 match(Set dst src);
5388 ins_cost(MEMORY_REF_COST);
5389 format %{ "LDUW $src,$dst\t!ptr" %}
5390 opcode(Assembler::lduw_op3, Assembler::ldst_op);
5391 ins_encode(simple_form3_mem_reg( src, dst ) );
5392 ins_pipe(iload_mem);
5393 %}
5394
5395 // Store pointer to stack slot
5396 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5397 match(Set dst src);
5398 ins_cost(MEMORY_REF_COST);
5399 format %{ "STW $src,$dst\t!ptr" %}
5400 opcode(Assembler::stw_op3, Assembler::ldst_op);
5401 ins_encode(simple_form3_mem_reg( dst, src ) );
5402 ins_pipe(istore_mem_reg);
5403 %}
5404 #endif // _LP64
5405
5406 //------------Special Nop instructions for bundling - no match rules-----------
5407 // Nop using the A0 functional unit
5408 instruct Nop_A0() %{
5409 ins_cost(0);
5410
5411 format %{ "NOP ! Alu Pipeline" %}
5412 opcode(Assembler::or_op3, Assembler::arith_op);
5413 ins_encode( form2_nop() );
5414 ins_pipe(ialu_nop_A0);
5415 %}
5416
5417 // Nop using the A1 functional unit
5418 instruct Nop_A1( ) %{
5419 ins_cost(0);
5420
5421 format %{ "NOP ! Alu Pipeline" %}
5422 opcode(Assembler::or_op3, Assembler::arith_op);
5423 ins_encode( form2_nop() );
5424 ins_pipe(ialu_nop_A1);
5841 ins_pipe(iload_mem);
5842 %}
5843
5844 // Load Integer into %f register (for fitos/fitod)
5845 instruct loadI_freg(regF dst, memory mem) %{
5846 match(Set dst (LoadI mem));
5847 ins_cost(MEMORY_REF_COST);
5848
5849 format %{ "LDF $mem,$dst\t! for fitos/fitod" %}
5850 opcode(Assembler::ldf_op3);
5851 ins_encode(simple_form3_mem_reg( mem, dst ) );
5852 ins_pipe(floadF_mem);
5853 %}
5854
5855 // Load Pointer
5856 instruct loadP(iRegP dst, memory mem) %{
5857 match(Set dst (LoadP mem));
5858 ins_cost(MEMORY_REF_COST);
5859 size(4);
5860
5861 #ifndef _LP64
5862 format %{ "LDUW $mem,$dst\t! ptr" %}
5863 ins_encode %{
5864 __ lduw($mem$$Address, $dst$$Register);
5865 %}
5866 #else
5867 format %{ "LDX $mem,$dst\t! ptr" %}
5868 ins_encode %{
5869 __ ldx($mem$$Address, $dst$$Register);
5870 %}
5871 #endif
5872 ins_pipe(iload_mem);
5873 %}
5874
5875 // Load Compressed Pointer
5876 instruct loadN(iRegN dst, memory mem) %{
5877 match(Set dst (LoadN mem));
5878 ins_cost(MEMORY_REF_COST);
5879 size(4);
5880
5881 format %{ "LDUW $mem,$dst\t! compressed ptr" %}
5882 ins_encode %{
5883 __ lduw($mem$$Address, $dst$$Register);
5884 %}
5885 ins_pipe(iload_mem);
5886 %}
5887
5888 // Load Klass Pointer
5889 instruct loadKlass(iRegP dst, memory mem) %{
5890 match(Set dst (LoadKlass mem));
5891 ins_cost(MEMORY_REF_COST);
5892 size(4);
5893
5894 #ifndef _LP64
5895 format %{ "LDUW $mem,$dst\t! klass ptr" %}
5896 ins_encode %{
5897 __ lduw($mem$$Address, $dst$$Register);
5898 %}
5899 #else
5900 format %{ "LDX $mem,$dst\t! klass ptr" %}
5901 ins_encode %{
5902 __ ldx($mem$$Address, $dst$$Register);
5903 %}
5904 #endif
5905 ins_pipe(iload_mem);
5906 %}
5907
5908 // Load narrow Klass Pointer
5909 instruct loadNKlass(iRegN dst, memory mem) %{
5910 match(Set dst (LoadNKlass mem));
5911 ins_cost(MEMORY_REF_COST);
5912 size(4);
5913
5914 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
5915 ins_encode %{
5916 __ lduw($mem$$Address, $dst$$Register);
5917 %}
5918 ins_pipe(iload_mem);
5919 %}
5920
5921 // Load Double
5922 instruct loadD(regD dst, memory mem) %{
5923 match(Set dst (LoadD mem));
5924 ins_cost(MEMORY_REF_COST);
5952 %}
5953
5954 // Load Constant
5955 instruct loadConI( iRegI dst, immI src ) %{
5956 match(Set dst src);
5957 ins_cost(DEFAULT_COST * 3/2);
5958 format %{ "SET $src,$dst" %}
5959 ins_encode( Set32(src, dst) );
5960 ins_pipe(ialu_hi_lo_reg);
5961 %}
5962
5963 instruct loadConI13( iRegI dst, immI13 src ) %{
5964 match(Set dst src);
5965
5966 size(4);
5967 format %{ "MOV $src,$dst" %}
5968 ins_encode( Set13( src, dst ) );
5969 ins_pipe(ialu_imm);
5970 %}
5971
5972 #ifndef _LP64
5973 instruct loadConP(iRegP dst, immP con) %{
5974 match(Set dst con);
5975 ins_cost(DEFAULT_COST * 3/2);
5976 format %{ "SET $con,$dst\t!ptr" %}
5977 ins_encode %{
5978 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
5979 intptr_t val = $con$$constant;
5980 if (constant_reloc == relocInfo::oop_type) {
5981 __ set_oop_constant((jobject) val, $dst$$Register);
5982 } else if (constant_reloc == relocInfo::metadata_type) {
5983 __ set_metadata_constant((Metadata*)val, $dst$$Register);
5984 } else { // non-oop pointers, e.g. card mark base, heap top
5985 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
5986 __ set(val, $dst$$Register);
5987 }
5988 %}
5989 ins_pipe(loadConP);
5990 %}
5991 #else
5992 instruct loadConP_set(iRegP dst, immP_set con) %{
5993 match(Set dst con);
5994 ins_cost(DEFAULT_COST * 3/2);
5995 format %{ "SET $con,$dst\t! ptr" %}
5996 ins_encode %{
5997 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
5998 intptr_t val = $con$$constant;
5999 if (constant_reloc == relocInfo::oop_type) {
6000 __ set_oop_constant((jobject) val, $dst$$Register);
6001 } else if (constant_reloc == relocInfo::metadata_type) {
6002 __ set_metadata_constant((Metadata*)val, $dst$$Register);
6003 } else { // non-oop pointers, e.g. card mark base, heap top
6004 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
6005 __ set(val, $dst$$Register);
6006 }
6007 %}
6008 ins_pipe(loadConP);
6009 %}
6010
6011 instruct loadConP_load(iRegP dst, immP_load con) %{
6015 ins_encode %{
6016 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6017 __ ld_ptr($constanttablebase, con_offset, $dst$$Register);
6018 %}
6019 ins_pipe(loadConP);
6020 %}
6021
6022 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
6023 match(Set dst con);
6024 ins_cost(DEFAULT_COST * 3/2);
6025 format %{ "SET $con,$dst\t! non-oop ptr" %}
6026 ins_encode %{
6027 if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) {
6028 __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register);
6029 } else {
6030 __ set($con$$constant, $dst$$Register);
6031 }
6032 %}
6033 ins_pipe(loadConP);
6034 %}
6035 #endif // _LP64
6036
6037 instruct loadConP0(iRegP dst, immP0 src) %{
6038 match(Set dst src);
6039
6040 size(4);
6041 format %{ "CLR $dst\t!ptr" %}
6042 ins_encode %{
6043 __ clr($dst$$Register);
6044 %}
6045 ins_pipe(ialu_imm);
6046 %}
6047
6048 instruct loadConP_poll(iRegP dst, immP_poll src) %{
6049 match(Set dst src);
6050 ins_cost(DEFAULT_COST);
6051 format %{ "SET $src,$dst\t!ptr" %}
6052 ins_encode %{
6053 AddressLiteral polling_page(os::get_polling_page());
6054 __ sethi(polling_page, reg_to_register_object($dst$$reg));
6055 %}
6169 ins_encode( form3_mem_prefetch_write( mem ) );
6170 ins_pipe(iload_mem);
6171 %}
6172
6173 // Use BIS instruction to prefetch for allocation.
6174 // Could fault, need space at the end of TLAB.
6175 instruct prefetchAlloc_bis( iRegP dst ) %{
6176 predicate(AllocatePrefetchInstr == 1);
6177 match( PrefetchAllocation dst );
6178 ins_cost(MEMORY_REF_COST);
6179 size(4);
6180
6181 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %}
6182 ins_encode %{
6183 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
6184 %}
6185 ins_pipe(istore_mem_reg);
6186 %}
6187
6188 // Next code is used for finding next cache line address to prefetch.
6189 #ifndef _LP64
6190 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
6191 match(Set dst (CastX2P (AndI (CastP2X src) mask)));
6192 ins_cost(DEFAULT_COST);
6193 size(4);
6194
6195 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6196 ins_encode %{
6197 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6198 %}
6199 ins_pipe(ialu_reg_imm);
6200 %}
6201 #else
6202 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
6203 match(Set dst (CastX2P (AndL (CastP2X src) mask)));
6204 ins_cost(DEFAULT_COST);
6205 size(4);
6206
6207 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6208 ins_encode %{
6209 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6210 %}
6211 ins_pipe(ialu_reg_imm);
6212 %}
6213 #endif
6214
6215 //----------Store Instructions-------------------------------------------------
6216 // Store Byte
6217 instruct storeB(memory mem, iRegI src) %{
6218 match(Set mem (StoreB mem src));
6219 ins_cost(MEMORY_REF_COST);
6220
6221 format %{ "STB $src,$mem\t! byte" %}
6222 opcode(Assembler::stb_op3);
6223 ins_encode(simple_form3_mem_reg( mem, src ) );
6224 ins_pipe(istore_mem_reg);
6225 %}
6226
6227 instruct storeB0(memory mem, immI0 src) %{
6228 match(Set mem (StoreB mem src));
6229 ins_cost(MEMORY_REF_COST);
6230
6231 format %{ "STB $src,$mem\t! byte" %}
6232 opcode(Assembler::stb_op3);
6233 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6305 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6306 ins_pipe(istore_mem_zero);
6307 %}
6308
6309 // Store Integer from float register (used after fstoi)
6310 instruct storeI_Freg(memory mem, regF src) %{
6311 match(Set mem (StoreI mem src));
6312 ins_cost(MEMORY_REF_COST);
6313
6314 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %}
6315 opcode(Assembler::stf_op3);
6316 ins_encode(simple_form3_mem_reg( mem, src ) );
6317 ins_pipe(fstoreF_mem_reg);
6318 %}
6319
6320 // Store Pointer
6321 instruct storeP(memory dst, sp_ptr_RegP src) %{
6322 match(Set dst (StoreP dst src));
6323 ins_cost(MEMORY_REF_COST);
6324
6325 #ifndef _LP64
6326 format %{ "STW $src,$dst\t! ptr" %}
6327 opcode(Assembler::stw_op3, 0, REGP_OP);
6328 #else
6329 format %{ "STX $src,$dst\t! ptr" %}
6330 opcode(Assembler::stx_op3, 0, REGP_OP);
6331 #endif
6332 ins_encode( form3_mem_reg( dst, src ) );
6333 ins_pipe(istore_mem_spORreg);
6334 %}
6335
6336 instruct storeP0(memory dst, immP0 src) %{
6337 match(Set dst (StoreP dst src));
6338 ins_cost(MEMORY_REF_COST);
6339
6340 #ifndef _LP64
6341 format %{ "STW $src,$dst\t! ptr" %}
6342 opcode(Assembler::stw_op3, 0, REGP_OP);
6343 #else
6344 format %{ "STX $src,$dst\t! ptr" %}
6345 opcode(Assembler::stx_op3, 0, REGP_OP);
6346 #endif
6347 ins_encode( form3_mem_reg( dst, R_G0 ) );
6348 ins_pipe(istore_mem_zero);
6349 %}
6350
6351 // Store Compressed Pointer
6352 instruct storeN(memory dst, iRegN src) %{
6353 match(Set dst (StoreN dst src));
6354 ins_cost(MEMORY_REF_COST);
6355 size(4);
6356
6357 format %{ "STW $src,$dst\t! compressed ptr" %}
6358 ins_encode %{
6359 Register base = as_Register($dst$$base);
6360 Register index = as_Register($dst$$index);
6361 Register src = $src$$Register;
6362 if (index != G0) {
6363 __ stw(src, base, index);
6364 } else {
6365 __ stw(src, base, $dst$$disp);
6366 }
7077 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7078 match(Set dst (AddL src1 con));
7079
7080 size(4);
7081 format %{ "ADD $src1,$con,$dst" %}
7082 opcode(Assembler::add_op3, Assembler::arith_op);
7083 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7084 ins_pipe(ialu_reg_imm);
7085 %}
7086
7087 //----------Conditional_store--------------------------------------------------
7088 // Conditional-store of the updated heap-top.
7089 // Used during allocation of the shared heap.
7090 // Sets flags (EQ) on success. Implemented with a CASA on Sparc.
7091
7092 // LoadP-locked. Same as a regular pointer load when used with a compare-swap
7093 instruct loadPLocked(iRegP dst, memory mem) %{
7094 match(Set dst (LoadPLocked mem));
7095 ins_cost(MEMORY_REF_COST);
7096
7097 #ifndef _LP64
7098 format %{ "LDUW $mem,$dst\t! ptr" %}
7099 opcode(Assembler::lduw_op3, 0, REGP_OP);
7100 #else
7101 format %{ "LDX $mem,$dst\t! ptr" %}
7102 opcode(Assembler::ldx_op3, 0, REGP_OP);
7103 #endif
7104 ins_encode( form3_mem_reg( mem, dst ) );
7105 ins_pipe(iload_mem);
7106 %}
7107
7108 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
7109 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
7110 effect( KILL newval );
7111 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t"
7112 "CMP R_G3,$oldval\t\t! See if we made progress" %}
7113 ins_encode( enc_cas(heap_top_ptr,oldval,newval) );
7114 ins_pipe( long_memory_op );
7115 %}
7116
7117 // Conditional-store of an int value.
7118 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{
7119 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval)));
7120 effect( KILL newval );
7121 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7122 "CMP $oldval,$newval\t\t! See if we made progress" %}
7123 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7154 %}
7155
7156
7157 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7158 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7159 match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
7160 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7161 format %{
7162 "MOV $newval,O7\n\t"
7163 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7164 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7165 "MOV 1,$res\n\t"
7166 "MOVne icc,R_G0,$res"
7167 %}
7168 ins_encode( enc_casi(mem_ptr, oldval, newval),
7169 enc_iflags_ne_to_boolean(res) );
7170 ins_pipe( long_memory_op );
7171 %}
7172
7173 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7174 #ifdef _LP64
7175 predicate(VM_Version::supports_cx8());
7176 #endif
7177 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7178 match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
7179 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7180 format %{
7181 "MOV $newval,O7\n\t"
7182 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7183 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7184 "MOV 1,$res\n\t"
7185 "MOVne xcc,R_G0,$res"
7186 %}
7187 #ifdef _LP64
7188 ins_encode( enc_casx(mem_ptr, oldval, newval),
7189 enc_lflags_ne_to_boolean(res) );
7190 #else
7191 ins_encode( enc_casi(mem_ptr, oldval, newval),
7192 enc_iflags_ne_to_boolean(res) );
7193 #endif
7194 ins_pipe( long_memory_op );
7195 %}
7196
7197 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7198 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
7199 match(Set res (WeakCompareAndSwapN mem_ptr (Binary oldval newval)));
7200 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7201 format %{
7202 "MOV $newval,O7\n\t"
7203 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7204 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7205 "MOV 1,$res\n\t"
7206 "MOVne icc,R_G0,$res"
7207 %}
7208 ins_encode( enc_casi(mem_ptr, oldval, newval),
7209 enc_iflags_ne_to_boolean(res) );
7210 ins_pipe( long_memory_op );
7211 %}
7212
7213 instruct compareAndExchangeI(iRegP mem_ptr, iRegI oldval, iRegI newval)
7251 match(Set newval (CompareAndExchangeN mem_ptr (Binary oldval newval)));
7252 effect( USE mem_ptr );
7253
7254 format %{
7255 "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
7256 %}
7257 ins_encode( enc_casi_exch(mem_ptr, oldval, newval) );
7258 ins_pipe( long_memory_op );
7259 %}
7260
7261 instruct xchgI( memory mem, iRegI newval) %{
7262 match(Set newval (GetAndSetI mem newval));
7263 format %{ "SWAP [$mem],$newval" %}
7264 size(4);
7265 ins_encode %{
7266 __ swap($mem$$Address, $newval$$Register);
7267 %}
7268 ins_pipe( long_memory_op );
7269 %}
7270
7271 #ifndef _LP64
7272 instruct xchgP( memory mem, iRegP newval) %{
7273 match(Set newval (GetAndSetP mem newval));
7274 format %{ "SWAP [$mem],$newval" %}
7275 size(4);
7276 ins_encode %{
7277 __ swap($mem$$Address, $newval$$Register);
7278 %}
7279 ins_pipe( long_memory_op );
7280 %}
7281 #endif
7282
7283 instruct xchgN( memory mem, iRegN newval) %{
7284 match(Set newval (GetAndSetN mem newval));
7285 format %{ "SWAP [$mem],$newval" %}
7286 size(4);
7287 ins_encode %{
7288 __ swap($mem$$Address, $newval$$Register);
7289 %}
7290 ins_pipe( long_memory_op );
7291 %}
7292
7293 //---------------------
7294 // Subtraction Instructions
7295 // Register Subtraction
7296 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7297 match(Set dst (SubI src1 src2));
7298
7299 size(4);
7300 format %{ "SUB $src1,$src2,$dst" %}
7301 opcode(Assembler::sub_op3, Assembler::arith_op);
7723
7724 size(4);
7725 format %{ "SRLX $src1,$src2,$dst" %}
7726 opcode(Assembler::srlx_op3, Assembler::arith_op);
7727 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7728 ins_pipe(ialu_reg_reg);
7729 %}
7730
7731 // Register Shift Right Immediate
7732 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7733 match(Set dst (URShiftL src1 src2));
7734
7735 size(4);
7736 format %{ "SRLX $src1,$src2,$dst" %}
7737 opcode(Assembler::srlx_op3, Assembler::arith_op);
7738 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7739 ins_pipe(ialu_reg_imm);
7740 %}
7741
7742 // Register Shift Right Immediate with a CastP2X
7743 #ifdef _LP64
7744 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
7745 match(Set dst (URShiftL (CastP2X src1) src2));
7746 size(4);
7747 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %}
7748 opcode(Assembler::srlx_op3, Assembler::arith_op);
7749 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7750 ins_pipe(ialu_reg_imm);
7751 %}
7752 #else
7753 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
7754 match(Set dst (URShiftI (CastP2X src1) src2));
7755 size(4);
7756 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
7757 opcode(Assembler::srl_op3, Assembler::arith_op);
7758 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7759 ins_pipe(ialu_reg_imm);
7760 %}
7761 #endif
7762
7763
7764 //----------Floating Point Arithmetic Instructions-----------------------------
7765
7766 // Add float single precision
7767 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
7768 match(Set dst (AddF src1 src2));
7769
7770 size(4);
7771 format %{ "FADDS $src1,$src2,$dst" %}
7772 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf);
7773 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7774 ins_pipe(faddF_reg_reg);
7775 %}
7776
7777 // Add float double precision
7778 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
7779 match(Set dst (AddD src1 src2));
7780
7781 size(4);
7984 ins_cost(DEFAULT_COST);
7985 size(4);
7986 format %{ "OR $src1,$src2,$dst\t! long" %}
7987 opcode(Assembler::or_op3, Assembler::arith_op);
7988 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7989 ins_pipe(ialu_reg_reg);
7990 %}
7991
7992 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7993 match(Set dst (OrL src1 con));
7994 ins_cost(DEFAULT_COST*2);
7995
7996 ins_cost(DEFAULT_COST);
7997 size(4);
7998 format %{ "OR $src1,$con,$dst\t! long" %}
7999 opcode(Assembler::or_op3, Assembler::arith_op);
8000 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8001 ins_pipe(ialu_reg_imm);
8002 %}
8003
8004 #ifndef _LP64
8005
8006 // Use sp_ptr_RegP to match G2 (TLS register) without spilling.
8007 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
8008 match(Set dst (OrI src1 (CastP2X src2)));
8009
8010 size(4);
8011 format %{ "OR $src1,$src2,$dst" %}
8012 opcode(Assembler::or_op3, Assembler::arith_op);
8013 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8014 ins_pipe(ialu_reg_reg);
8015 %}
8016
8017 #else
8018
8019 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
8020 match(Set dst (OrL src1 (CastP2X src2)));
8021
8022 ins_cost(DEFAULT_COST);
8023 size(4);
8024 format %{ "OR $src1,$src2,$dst\t! long" %}
8025 opcode(Assembler::or_op3, Assembler::arith_op);
8026 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8027 ins_pipe(ialu_reg_reg);
8028 %}
8029
8030 #endif
8031
8032 // Xor Instructions
8033 // Register Xor
8034 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8035 match(Set dst (XorI src1 src2));
8036
8037 size(4);
8038 format %{ "XOR $src1,$src2,$dst" %}
8039 opcode(Assembler::xor_op3, Assembler::arith_op);
8040 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8041 ins_pipe(ialu_reg_reg);
8042 %}
8043
8044 // Immediate Xor
8045 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8046 match(Set dst (XorI src1 src2));
8047
8048 size(4);
8049 format %{ "XOR $src1,$src2,$dst" %}
8050 opcode(Assembler::xor_op3, Assembler::arith_op);
8051 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8071 size(4);
8072 format %{ "XOR $src1,$con,$dst\t! long" %}
8073 opcode(Assembler::xor_op3, Assembler::arith_op);
8074 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8075 ins_pipe(ialu_reg_imm);
8076 %}
8077
8078 //----------Convert to Boolean-------------------------------------------------
8079 // Nice hack for 32-bit tests but doesn't work for
8080 // 64-bit pointers.
8081 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
8082 match(Set dst (Conv2B src));
8083 effect( KILL ccr );
8084 ins_cost(DEFAULT_COST*2);
8085 format %{ "CMP R_G0,$src\n\t"
8086 "ADDX R_G0,0,$dst" %}
8087 ins_encode( enc_to_bool( src, dst ) );
8088 ins_pipe(ialu_reg_ialu);
8089 %}
8090
8091 #ifndef _LP64
8092 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
8093 match(Set dst (Conv2B src));
8094 effect( KILL ccr );
8095 ins_cost(DEFAULT_COST*2);
8096 format %{ "CMP R_G0,$src\n\t"
8097 "ADDX R_G0,0,$dst" %}
8098 ins_encode( enc_to_bool( src, dst ) );
8099 ins_pipe(ialu_reg_ialu);
8100 %}
8101 #else
8102 instruct convP2B( iRegI dst, iRegP src ) %{
8103 match(Set dst (Conv2B src));
8104 ins_cost(DEFAULT_COST*2);
8105 format %{ "MOV $src,$dst\n\t"
8106 "MOVRNZ $src,1,$dst" %}
8107 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
8108 ins_pipe(ialu_clr_and_mover);
8109 %}
8110 #endif
8111
8112 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
8113 match(Set dst (CmpLTMask src zero));
8114 effect(KILL ccr);
8115 size(4);
8116 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %}
8117 ins_encode %{
8118 __ sra($src$$Register, 31, $dst$$Register);
8119 %}
8120 ins_pipe(ialu_reg_imm);
8121 %}
8122
8123 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
8124 match(Set dst (CmpLTMask p q));
8125 effect( KILL ccr );
8126 ins_cost(DEFAULT_COST*4);
8127 format %{ "CMP $p,$q\n\t"
8128 "MOV #0,$dst\n\t"
8129 "BLT,a .+8\n\t"
8130 "MOV #-1,$dst" %}
8733 stkL_to_regD(tmp, src);
8734 convL2F_helper(dst, tmp);
8735 %}
8736 %}
8737
8738 instruct convL2F_reg(regF dst, iRegL src) %{
8739 predicate(UseVIS >= 3);
8740 match(Set dst (ConvL2F src));
8741 ins_cost(DEFAULT_COST);
8742 expand %{
8743 regD tmp;
8744 MoveL2D_reg_reg(tmp, src);
8745 convL2F_helper(dst, tmp);
8746 %}
8747 %}
8748
8749 //-----------
8750
8751 instruct convL2I_reg(iRegI dst, iRegL src) %{
8752 match(Set dst (ConvL2I src));
8753 #ifndef _LP64
8754 format %{ "MOV $src.lo,$dst\t! long->int" %}
8755 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
8756 ins_pipe(ialu_move_reg_I_to_L);
8757 #else
8758 size(4);
8759 format %{ "SRA $src,R_G0,$dst\t! long->int" %}
8760 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
8761 ins_pipe(ialu_reg);
8762 #endif
8763 %}
8764
8765 // Register Shift Right Immediate
8766 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
8767 match(Set dst (ConvL2I (RShiftL src cnt)));
8768
8769 size(4);
8770 format %{ "SRAX $src,$cnt,$dst" %}
8771 opcode(Assembler::srax_op3, Assembler::arith_op);
8772 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) );
8773 ins_pipe(ialu_reg_imm);
8774 %}
8775
8776 //----------Control Flow Instructions------------------------------------------
8777 // Compare Instructions
8778 // Compare Integers
8779 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
8780 match(Set icc (CmpI op1 op2));
8781 effect( DEF icc, USE op1, USE op2 );
8782
9511 ins_cost(BRANCH_COST);
9512 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9513 ins_encode %{
9514 Label* L = $labl$$label;
9515 assert(__ use_cbcond(*L), "back to back cbcond");
9516 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
9517 %}
9518 ins_short_branch(1);
9519 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9520 ins_pipe(cbcond_reg_imm);
9521 %}
9522
9523 // Compare Pointers and branch
9524 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9525 match(If cmp (CmpP op1 op2));
9526 predicate(UseCBCond);
9527 effect(USE labl, KILL pcc);
9528
9529 size(4);
9530 ins_cost(BRANCH_COST);
9531 #ifdef _LP64
9532 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
9533 #else
9534 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
9535 #endif
9536 ins_encode %{
9537 Label* L = $labl$$label;
9538 assert(__ use_cbcond(*L), "back to back cbcond");
9539 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
9540 %}
9541 ins_short_branch(1);
9542 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9543 ins_pipe(cbcond_reg_reg);
9544 %}
9545
9546 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9547 match(If cmp (CmpP op1 null));
9548 predicate(UseCBCond);
9549 effect(USE labl, KILL pcc);
9550
9551 size(4);
9552 ins_cost(BRANCH_COST);
9553 #ifdef _LP64
9554 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
9555 #else
9556 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
9557 #endif
9558 ins_encode %{
9559 Label* L = $labl$$label;
9560 assert(__ use_cbcond(*L), "back to back cbcond");
9561 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
9562 %}
9563 ins_short_branch(1);
9564 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9565 ins_pipe(cbcond_reg_reg);
9566 %}
9567
9568 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9569 match(If cmp (CmpN op1 op2));
9570 predicate(UseCBCond);
9571 effect(USE labl, KILL icc);
9572
9573 size(4);
9574 ins_cost(BRANCH_COST);
9575 format %{ "CWB$cmp $op1,$op2,$labl\t! compressed ptr" %}
9576 ins_encode %{
9577 Label* L = $labl$$label;
9805 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9806 ins_pipe(int_conditional_float_move);
9807 %}
9808
9809 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{
9810 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
9811 ins_cost(150);
9812 opcode(0x102);
9813 format %{ "FMOVD$cmp $xcc,$src,$dst" %}
9814 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9815 ins_pipe(int_conditional_float_move);
9816 %}
9817
9818 // ============================================================================
9819 // Safepoint Instruction
9820 instruct safePoint_poll(iRegP poll) %{
9821 match(SafePoint poll);
9822 effect(USE poll);
9823
9824 size(4);
9825 #ifdef _LP64
9826 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
9827 #else
9828 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
9829 #endif
9830 ins_encode %{
9831 __ relocate(relocInfo::poll_type);
9832 __ ld_ptr($poll$$Register, 0, G0);
9833 %}
9834 ins_pipe(loadPollP);
9835 %}
9836
9837 // ============================================================================
9838 // Call Instructions
9839 // Call Java Static Instruction
9840 instruct CallStaticJavaDirect( method meth ) %{
9841 match(CallStaticJava);
9842 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
9843 effect(USE meth);
9844
9845 size(8);
9846 ins_cost(CALL_COST);
9847 format %{ "CALL,static ; NOP ==> " %}
9848 ins_encode( Java_Static_Call( meth ), call_epilog );
9849 ins_avoid_back_to_back(AVOID_BEFORE);
|
1 //
2 // Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
294 // R_G6: reserved by Solaris ABI to tools
295 // R_G7: reserved by Solaris ABI to libthread
296 // R_O7: Used as a temp in many encodings
297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
298
299 // Class for all integer registers, except the G registers. This is used for
300 // encodings which use G registers as temps. The regular inputs to such
301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator
302 // will not put an input into a temp register.
303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
304
305 reg_class g1_regI(R_G1);
306 reg_class g3_regI(R_G3);
307 reg_class g4_regI(R_G4);
308 reg_class o0_regI(R_O0);
309 reg_class o7_regI(R_O7);
310
311 // ----------------------------
312 // Pointer Register Classes
313 // ----------------------------
314 // 64-bit build means 64-bit pointers means hi/lo pairs
315 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
316 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
317 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
318 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
319 // Lock encodings use G3 and G4 internally
320 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5,
321 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
322 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
323 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
324 // Special class for storeP instructions, which can store SP or RPC to TLS.
325 // It is also used for memory addressing, allowing direct TLS addressing.
326 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
327 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP,
328 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
329 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP );
330 // R_L7 is the lowest-priority callee-save (i.e., NS) register
331 // We use it to save R_G2 across calls out of Java.
332 reg_class l7_regP(R_L7H,R_L7);
333
334 // Other special pointer regs
335 reg_class g1_regP(R_G1H,R_G1);
336 reg_class g2_regP(R_G2H,R_G2);
337 reg_class g3_regP(R_G3H,R_G3);
338 reg_class g4_regP(R_G4H,R_G4);
339 reg_class g5_regP(R_G5H,R_G5);
340 reg_class i0_regP(R_I0H,R_I0);
341 reg_class o0_regP(R_O0H,R_O0);
342 reg_class o1_regP(R_O1H,R_O1);
343 reg_class o2_regP(R_O2H,R_O2);
344 reg_class o7_regP(R_O7H,R_O7);
345
346
347 // ----------------------------
348 // Long Register Classes
349 // ----------------------------
350 // Longs in 1 register. Aligned adjacent hi/lo pairs.
351 // Note: O7 is never in this class; it is sometimes used as an encoding temp.
352 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
353 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
354 // 64-bit, longs in 1 register: use all 64-bit integer registers
355 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
356 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
357 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
358 );
359
360 reg_class g1_regL(R_G1H,R_G1);
361 reg_class g3_regL(R_G3H,R_G3);
362 reg_class o2_regL(R_O2H,R_O2);
363 reg_class o7_regL(R_O7H,R_O7);
364
365 // ----------------------------
366 // Special Class for Condition Code Flags Register
367 reg_class int_flags(CCR);
368 reg_class float_flags(FCC0,FCC1,FCC2,FCC3);
369 reg_class float_flag0(FCC0);
370
371
372 // ----------------------------
373 // Float Point Register Classes
374 // ----------------------------
375 // Skip F30/F31, they are reserved for mem-mem copies
376 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
377
479
480 // tertiary op of a LoadP or StoreP encoding
481 #define REGP_OP true
482
483 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding);
484 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding);
485 static Register reg_to_register_object(int register_encoding);
486
487 // Used by the DFA in dfa_sparc.cpp.
488 // Check for being able to use a V9 branch-on-register. Requires a
489 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign-
490 // extended. Doesn't work following an integer ADD, for example, because of
491 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On
492 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and
493 // replace them with zero, which could become sign-extension in a different OS
494 // release. There's no obvious reason why an interrupt will ever fill these
495 // bits with non-zero junk (the registers are reloaded with standard LD
496 // instructions which either zero-fill or sign-fill).
497 bool can_branch_register( Node *bol, Node *cmp ) {
498 if( !BranchOnRegister ) return false;
499 if( cmp->Opcode() == Op_CmpP )
500 return true; // No problems with pointer compares
501 if( cmp->Opcode() == Op_CmpL )
502 return true; // No problems with long compares
503
504 if( !SparcV9RegsHiBitsZero ) return false;
505 if( bol->as_Bool()->_test._test != BoolTest::ne &&
506 bol->as_Bool()->_test._test != BoolTest::eq )
507 return false;
508
509 // Check for comparing against a 'safe' value. Any operation which
510 // clears out the high word is safe. Thus, loads and certain shifts
511 // are safe, as are non-negative constants. Any operation which
512 // preserves zero bits in the high word is safe as long as each of its
513 // inputs are safe. Thus, phis and bitwise booleans are safe if their
514 // inputs are safe. At present, the only important case to recognize
515 // seems to be loads. Constants should fold away, and shifts &
516 // logicals can use the 'cc' forms.
517 Node *x = cmp->in(1);
518 if( x->is_Load() ) return true;
519 if( x->is_Phi() ) {
520 for( uint i = 1; i < x->req(); i++ )
561 int klass_load_size;
562 if (UseCompressedClassPointers) {
563 assert(Universe::heap() != NULL, "java heap should be initialized");
564 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
565 } else {
566 klass_load_size = 1*BytesPerInstWord;
567 }
568 if (Assembler::is_simm13(v_off)) {
569 return klass_load_size +
570 (2*BytesPerInstWord + // ld_ptr, ld_ptr
571 NativeCall::instruction_size); // call; delay slot
572 } else {
573 return klass_load_size +
574 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
575 NativeCall::instruction_size); // call; delay slot
576 }
577 }
578 }
579
580 int MachCallRuntimeNode::ret_addr_offset() {
581 if (MacroAssembler::is_far_target(entry_point())) {
582 return NativeFarCall::instruction_size;
583 } else {
584 return NativeCall::instruction_size;
585 }
586 }
587
588 // Indicate if the safepoint node needs the polling page as an input.
589 // Since Sparc does not have absolute addressing, it does.
590 bool SafePointNode::needs_polling_address_input() {
591 return true;
592 }
593
594 // emit an interrupt that is caught by the debugger (for debugging compiler)
595 void emit_break(CodeBuffer &cbuf) {
596 MacroAssembler _masm(&cbuf);
597 __ breakpoint_trap();
598 }
599
600 #ifndef PRODUCT
601 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
602 st->print("TA");
603 }
604 #endif
605
964 MacroAssembler _masm(&cbuf);
965 __ set_inst_mark();
966
967 // We flush the current window just so that there is a valid stack copy
968 // the fact that the current window becomes active again instantly is
969 // not a problem there is nothing live in it.
970
971 #ifdef ASSERT
972 int startpos = __ offset();
973 #endif /* ASSERT */
974
975 __ call((address)entry_point, rspec);
976
977 if (preserve_g2) __ delayed()->mov(G2, L7);
978 else __ delayed()->nop();
979
980 if (preserve_g2) __ mov(L7, G2);
981
982 #ifdef ASSERT
983 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
984 // Trash argument dump slots.
985 __ set(0xb0b8ac0db0b8ac0d, G1);
986 __ mov(G1, G5);
987 __ stx(G1, SP, STACK_BIAS + 0x80);
988 __ stx(G1, SP, STACK_BIAS + 0x88);
989 __ stx(G1, SP, STACK_BIAS + 0x90);
990 __ stx(G1, SP, STACK_BIAS + 0x98);
991 __ stx(G1, SP, STACK_BIAS + 0xA0);
992 __ stx(G1, SP, STACK_BIAS + 0xA8);
993 }
994 #endif /*ASSERT*/
995 }
996
997 //=============================================================================
998 // REQUIRED FUNCTIONALITY for encoding
999 void emit_lo(CodeBuffer &cbuf, int val) { }
1000 void emit_hi(CodeBuffer &cbuf, int val) { }
1001
1002
1003 //=============================================================================
1004 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
1005
1006 int Compile::ConstantTable::calculate_table_base_offset() const {
1007 if (UseRDPCForConstantTableBase) {
1008 // The table base offset might be less but then it fits into
1009 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit).
1010 return Assembler::min_simm13();
1011 } else {
1012 int offset = -(size() / 2);
1185 Compile::ConstantTable& constant_table = C->constant_table();
1186 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1187 }
1188 }
1189
1190 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1191 return MachNode::size(ra_);
1192 }
1193
1194 int MachPrologNode::reloc() const {
1195 return 10; // a large enough number
1196 }
1197
1198 //=============================================================================
1199 #ifndef PRODUCT
1200 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1201 Compile* C = ra_->C;
1202
1203 if(do_polling() && ra_->C->is_method_compilation()) {
1204 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
1205 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
1206 }
1207
1208 if(do_polling()) {
1209 if (UseCBCond && !ra_->C->is_method_compilation()) {
1210 st->print("NOP\n\t");
1211 }
1212 st->print("RET\n\t");
1213 }
1214
1215 st->print("RESTORE");
1216 }
1217 #endif
1218
1219 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1220 MacroAssembler _masm(&cbuf);
1221 Compile* C = ra_->C;
1222
1223 __ verify_thread();
1224
1225 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1391 return;
1392 }
1393 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mstouw_opf, "MOVSTOUW", st);
1394 }
1395 // Check for int->float copy on T4
1396 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
1397 // Further check for aligned-adjacent pair, so we can use a double move
1398 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1399 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mxtod_opf, "MOVXTOD", st);
1400 return;
1401 }
1402 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mwtos_opf, "MOVWTOS", st);
1403 }
1404
1405 // --------------------------------------
1406 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
1407 // In such cases, I have to do the big-endian swap. For aligned targets, the
1408 // hardware does the flop for me. Doubles are always aligned, so no problem
1409 // there. Misaligned sources only come from native-long-returns (handled
1410 // special below).
1411
1412 // --------------------------------------
1413 // Check for integer reg-reg copy
1414 if (src_first_rc == rc_int && dst_first_rc == rc_int) {
1415 // Else normal reg-reg copy
1416 assert(src_second != dst_first, "smashed second before evacuating it");
1417 impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV ", st);
1418 assert((src_first & 1) == 0 && (dst_first & 1) == 0, "never move second-halves of int registers");
1419 // This moves an aligned adjacent pair.
1420 // See if we are done.
1421 if (src_first + 1 == src_second && dst_first + 1 == dst_second) {
1422 return;
1423 }
1424 }
1425
1426 // Check for integer store
1427 if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
1428 int offset = ra_->reg2offset(dst_first);
1429 // Further check for aligned-adjacent pair, so we can use a double store
1430 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1431 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stx_op3, "STX ", st);
1432 return;
1433 }
1434 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stw_op3, "STW ", st);
1468
1469 // Check for float load
1470 if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
1471 int offset = ra_->reg2offset(src_first);
1472 // Further check for aligned-adjacent pair, so we can use a double load
1473 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1474 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lddf_op3, "LDDF", st);
1475 return;
1476 }
1477 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::ldf_op3, "LDF ", st);
1478 }
1479
1480 // --------------------------------------------------------------------
1481 // Check for hi bits still needing moving. Only happens for misaligned
1482 // arguments to native calls.
1483 if (src_second == dst_second) {
1484 return; // Self copy; no move
1485 }
1486 assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad");
1487
1488 Unimplemented();
1489 }
1490
1491 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf,
1492 PhaseRegAlloc *ra_,
1493 bool do_size,
1494 outputStream* st) const {
1495 assert(!do_size, "not supported");
1496 mach_spill_copy_implementation_helper(this, cbuf, ra_, st);
1497 return 0;
1498 }
1499
1500 #ifndef PRODUCT
1501 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1502 implementation( NULL, ra_, false, st );
1503 }
1504 #endif
1505
1506 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1507 implementation( &cbuf, ra_, false, NULL );
1545 int reg = ra_->get_encode(this);
1546
1547 if (Assembler::is_simm13(offset)) {
1548 __ add(SP, offset, reg_to_register_object(reg));
1549 } else {
1550 __ set(offset, O7);
1551 __ add(SP, O7, reg_to_register_object(reg));
1552 }
1553 }
1554
1555 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1556 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1557 assert(ra_ == ra_->C->regalloc(), "sanity");
1558 return ra_->C->scratch_emit_size(this);
1559 }
1560
1561 //=============================================================================
1562 #ifndef PRODUCT
1563 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1564 st->print_cr("\nUEP:");
1565 if (UseCompressedClassPointers) {
1566 assert(Universe::heap() != NULL, "java heap should be initialized");
1567 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
1568 if (Universe::narrow_klass_base() != 0) {
1569 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
1570 if (Universe::narrow_klass_shift() != 0) {
1571 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
1572 }
1573 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
1574 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base");
1575 } else {
1576 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
1577 }
1578 } else {
1579 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1580 }
1581 st->print_cr("\tCMP R_G5,R_G3" );
1582 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1583 }
1584 #endif
1585
1586 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1587 MacroAssembler _masm(&cbuf);
1588 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1589 Register temp_reg = G3;
1590 assert( G5_ic_reg != temp_reg, "conflicting registers" );
1591
1592 // Load klass from receiver
1593 __ load_klass(O0, temp_reg);
1594 // Compare against expected klass
1595 __ cmp(temp_reg, G5_ic_reg);
1596 // Branch to miss code, checks xcc or icc depending
1597 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2);
1598 }
1599
1600 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1601 return MachNode::size(ra_);
1602 }
1670 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) {
1671 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding");
1672 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding");
1673 return as_DoubleFloatRegister(register_encoding);
1674 }
1675
1676 const bool Matcher::match_rule_supported(int opcode) {
1677 if (!has_match_rule(opcode))
1678 return false;
1679
1680 switch (opcode) {
1681 case Op_CountLeadingZerosI:
1682 case Op_CountLeadingZerosL:
1683 case Op_CountTrailingZerosI:
1684 case Op_CountTrailingZerosL:
1685 case Op_PopCountI:
1686 case Op_PopCountL:
1687 if (!UsePopCountInstruction)
1688 return false;
1689 case Op_CompareAndSwapL:
1690 case Op_CompareAndSwapP:
1691 if (!VM_Version::supports_cx8())
1692 return false;
1693 break;
1694 }
1695
1696 return true; // Per default match rules are supported.
1697 }
1698
1699 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
1700
1701 // TODO
1702 // identify extra cases that we might want to provide match rules for
1703 // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
1704 bool ret_value = match_rule_supported(opcode);
1705 // Add rules here.
1706
1707 return ret_value; // Per default match rules are supported.
1708 }
1709
1710 const bool Matcher::has_predicated_vectors(void) {
1821 return true;
1822 }
1823
1824 bool Matcher::const_klass_prefer_decode() {
1825 // TODO: Check if loading ConP from TOC in heap-based mode is better:
1826 // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode.
1827 // return Universe::narrow_klass_base() == NULL;
1828 return true;
1829 }
1830
1831 // Is it better to copy float constants, or load them directly from memory?
1832 // Intel can load a float constant from a direct address, requiring no
1833 // extra registers. Most RISCs will have to materialize an address into a
1834 // register first, so they would do better to copy the constant from stack.
1835 const bool Matcher::rematerialize_float_constants = false;
1836
1837 // If CPU can load and store mis-aligned doubles directly then no fixup is
1838 // needed. Else we split the double into 2 integer pieces and move it
1839 // piece-by-piece. Only happens when passing doubles into C code as the
1840 // Java calling convention forces doubles to be aligned.
1841 const bool Matcher::misaligned_doubles_ok = true;
1842
1843 // No-op on SPARC.
1844 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1845 }
1846
1847 // Advertise here if the CPU requires explicit rounding operations
1848 // to implement the UseStrictFP mode.
1849 const bool Matcher::strict_fp_requires_explicit_rounding = false;
1850
1851 // Are floats converted to double when stored to stack during deoptimization?
1852 // Sparc does not handle callee-save floats.
1853 bool Matcher::float_in_double() { return false; }
1854
1855 // Do ints take an entire long register or just half?
1856 // Note that we if-def off of _LP64.
1857 // The relevant question is how the int is callee-saved. In _LP64
1858 // the whole long is written but de-opt'ing will have to extract
1859 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
1860 const bool Matcher::int_in_long = true;
1861
1862 // Return whether or not this register is ever used as an argument. This
1863 // function is used on startup to build the trampoline stubs in generateOptoStub.
1864 // Registers not mentioned will be killed by the VM call in the trampoline, and
1865 // arguments in those registers not be available to the callee.
1866 bool Matcher::can_be_java_arg( int reg ) {
1867 // Standard sparc 6 args in registers
1868 if( reg == R_I0_num ||
1869 reg == R_I1_num ||
1870 reg == R_I2_num ||
1871 reg == R_I3_num ||
1872 reg == R_I4_num ||
1873 reg == R_I5_num ) return true;
1874 // 64-bit builds can pass 64-bit pointers and longs in
1875 // the high I registers
1876 if( reg == R_I0H_num ||
1877 reg == R_I1H_num ||
1878 reg == R_I2H_num ||
1879 reg == R_I3H_num ||
1880 reg == R_I4H_num ||
1881 reg == R_I5H_num ) return true;
1882
1883 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
1884 return true;
1885 }
1886
1887 // A few float args in registers
1888 if( reg >= R_F0_num && reg <= R_F7_num ) return true;
1889
1890 return false;
1891 }
1892
1893 bool Matcher::is_spillable_arg( int reg ) {
1894 return can_be_java_arg(reg);
1895 }
1896
1897 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
1898 // Use hardware SDIVX instruction when it is
1899 // faster than a code which use multiply.
1900 return VM_Version::has_fast_idiv();
1901 }
1902
1903 // Register for DIVI projection of divmodI
1904 RegMask Matcher::divI_proj_mask() {
1905 ShouldNotReachHere();
1906 return RegMask();
1929 }
1930
1931
1932 const bool Matcher::convi2l_type_required = true;
1933
1934 // Should the Matcher clone shifts on addressing modes, expecting them
1935 // to be subsumed into complex addressing expressions or compute them
1936 // into registers?
1937 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
1938 return clone_base_plus_offset_address(m, mstack, address_visited);
1939 }
1940
1941 void Compile::reshape_address(AddPNode* addp) {
1942 }
1943
1944 %}
1945
1946
1947 // The intptr_t operand types, defined by textual substitution.
1948 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
1949 #define immX immL
1950 #define immX13 immL13
1951 #define immX13m7 immL13m7
1952 #define iRegX iRegL
1953 #define g1RegX g1RegL
1954
1955 //----------ENCODING BLOCK-----------------------------------------------------
1956 // This block specifies the encoding classes used by the compiler to output
1957 // byte streams. Encoding classes are parameterized macros used by
1958 // Machine Instruction Nodes in order to generate the bit encoding of the
1959 // instruction. Operands specify their base encoding interface with the
1960 // interface keyword. There are currently supported four interfaces,
1961 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1962 // operand to generate a function which returns its register number when
1963 // queried. CONST_INTER causes an operand to generate a function which
1964 // returns the value of the constant when queried. MEMORY_INTER causes an
1965 // operand to generate four functions which return the Base Register, the
1966 // Index Register, the Scale Value, and the Offset Value of the operand when
1967 // queried. COND_INTER causes an operand to generate six functions which
1968 // return the encoding code (ie - encoding bits for the instruction)
1969 // associated with each basic boolean condition for a conditional instruction.
1970 //
1971 // Instructions specify two basic values for encoding. Again, a function
1972 // is available to check if the constant displacement is an oop. They use the
1973 // ins_encode keyword to specify their encoding classes (which must be
2095 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{
2096 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F );
2097 %}
2098
2099 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{
2100 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 );
2101 %}
2102
2103 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{
2104 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg );
2105 %}
2106
2107 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{
2108 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant );
2109 %}
2110
2111 enc_class move_return_pc_to_o1() %{
2112 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
2113 %}
2114
2115 /* %%% merge with enc_to_bool */
2116 enc_class enc_convP2B( iRegI dst, iRegP src ) %{
2117 MacroAssembler _masm(&cbuf);
2118
2119 Register src_reg = reg_to_register_object($src$$reg);
2120 Register dst_reg = reg_to_register_object($dst$$reg);
2121 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
2122 %}
2123
2124 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
2125 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
2126 MacroAssembler _masm(&cbuf);
2127
2128 Register p_reg = reg_to_register_object($p$$reg);
2129 Register q_reg = reg_to_register_object($q$$reg);
2130 Register y_reg = reg_to_register_object($y$$reg);
2131 Register tmp_reg = reg_to_register_object($tmp$$reg);
2132
2133 __ subcc( p_reg, q_reg, p_reg );
2134 __ add ( p_reg, y_reg, tmp_reg );
2135 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg );
2136 %}
2137
2138 enc_class form_d2i_helper(regD src, regF dst) %{
2139 // fcmp %fcc0,$src,$src
2140 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2141 // branch %fcc0 not-nan, predict taken
2142 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2393 enc_class Set32( immI src, iRegI rd ) %{
2394 MacroAssembler _masm(&cbuf);
2395 __ set($src$$constant, reg_to_register_object($rd$$reg));
2396 %}
2397
2398 enc_class call_epilog %{
2399 if( VerifyStackAtCalls ) {
2400 MacroAssembler _masm(&cbuf);
2401 int framesize = ra_->C->frame_size_in_bytes();
2402 Register temp_reg = G3;
2403 __ add(SP, framesize, temp_reg);
2404 __ cmp(temp_reg, FP);
2405 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc);
2406 }
2407 %}
2408
2409 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value
2410 // to G1 so the register allocator will not have to deal with the misaligned register
2411 // pair.
2412 enc_class adjust_long_from_native_call %{
2413 %}
2414
2415 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
2416 // CALL directly to the runtime
2417 // The user of this is responsible for ensuring that R_L7 is empty (killed).
2418 emit_call_reloc(cbuf, $meth$$method, runtime_call_Relocation::spec(), /*preserve_g2=*/true);
2419 %}
2420
2421 enc_class preserve_SP %{
2422 MacroAssembler _masm(&cbuf);
2423 __ mov(SP, L7_mh_SP_save);
2424 %}
2425
2426 enc_class restore_SP %{
2427 MacroAssembler _masm(&cbuf);
2428 __ mov(L7_mh_SP_save, SP);
2429 %}
2430
2431 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
2432 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2859 // varargs C calling conventions.
2860 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
2861 // even aligned with pad0 as needed.
2862 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
2863 // region 6-11 is even aligned; it may be padded out more so that
2864 // the region from SP to FP meets the minimum stack alignment.
2865
2866 frame %{
2867 // What direction does stack grow in (assumed to be same for native & Java)
2868 stack_direction(TOWARDS_LOW);
2869
2870 // These two registers define part of the calling convention
2871 // between compiled code and the interpreter.
2872 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C
2873 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter
2874
2875 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
2876 cisc_spilling_operand_name(indOffset);
2877
2878 // Number of stack slots consumed by a Monitor enter
2879 sync_stack_slots(2);
2880
2881 // Compiled code's Frame Pointer
2882 frame_pointer(R_SP);
2883
2884 // Stack alignment requirement
2885 stack_alignment(StackAlignmentInBytes);
2886 // LP64: Alignment size in bytes (128-bit -> 16 bytes)
2887 // !LP64: Alignment size in bytes (64-bit -> 8 bytes)
2888
2889 // Number of stack slots between incoming argument block and the start of
2890 // a new frame. The PROLOG must add this many slots to the stack. The
2891 // EPILOG must remove this many slots.
2892 in_preserve_stack_slots(0);
2893
2894 // Number of outgoing stack slots killed above the out_preserve_stack_slots
2895 // for calls to C. Supports the var-args backing area for register parms.
2896 // ADLC doesn't support parsing expressions, so I folded the math by hand.
2897 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
2898 varargs_C_out_slots_killed(12);
2899
2900 // The after-PROLOG location of the return address. Location of
2901 // return address specifies a type (REG or STACK) and a number
2902 // representing the register number (i.e. - use a register name) or
2903 // stack slot.
2904 return_addr(REG R_I7); // Ret Addr is in register I7
2905
2906 // Body of function which returns an OptoRegs array locating
2907 // arguments either in registers or in stack slots for calling
2908 // java
2909 calling_convention %{
2910 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
2911
2912 %}
2913
2914 // Body of function which returns an OptoRegs array locating
2915 // arguments either in registers or in stack slots for calling
2916 // C.
2917 c_calling_convention %{
2918 // This is obviously always outgoing
2919 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
2920 %}
2921
2922 // Location of native (C/C++) and interpreter return values. This is specified to
2923 // be the same as Java. In the 32-bit VM, long values are actually returned from
2924 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying
2925 // to and from the register pairs is done by the appropriate call and epilog
2926 // opcodes. This simplifies the register allocator.
2927 c_return_value %{
2928 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
2929 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
2930 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
2931 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
2932 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
2933 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
2934 (is_outgoing?lo_out:lo_in)[ideal_reg] );
2935 %}
2936
2937 // Location of compiled Java return values. Same as C
2938 return_value %{
2939 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
2940 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
2941 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
2942 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
2943 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
2944 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
2945 (is_outgoing?lo_out:lo_in)[ideal_reg] );
2946 %}
2947
2948 %}
2949
2950
2951 //----------ATTRIBUTES---------------------------------------------------------
2952 //----------Operand Attributes-------------------------------------------------
2953 op_attrib op_cost(1); // Required cost attribute
2954
2955 //----------Instruction Attributes---------------------------------------------
2956 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
2957 ins_attrib ins_size(32); // Required size attribute (in bits)
2958
2959 // avoid_back_to_back attribute is an expression that must return
2960 // one of the following values defined in MachNode:
2961 // AVOID_NONE - instruction can be placed anywhere
2962 // AVOID_BEFORE - instruction cannot be placed after an
2963 // instruction with MachNode::AVOID_AFTER
3178 // Long Immediate: the value FFFF
3179 operand immL_FFFF() %{
3180 predicate( n->get_long() == 0xFFFFL );
3181 match(ConL);
3182 op_cost(0);
3183
3184 format %{ %}
3185 interface(CONST_INTER);
3186 %}
3187
3188 // Pointer Immediate: 32 or 64-bit
3189 operand immP() %{
3190 match(ConP);
3191
3192 op_cost(5);
3193 // formats are generated automatically for constants and base registers
3194 format %{ %}
3195 interface(CONST_INTER);
3196 %}
3197
3198 // Pointer Immediate: 64-bit
3199 operand immP_set() %{
3200 predicate(!VM_Version::is_niagara_plus());
3201 match(ConP);
3202
3203 op_cost(5);
3204 // formats are generated automatically for constants and base registers
3205 format %{ %}
3206 interface(CONST_INTER);
3207 %}
3208
3209 // Pointer Immediate: 64-bit
3210 // From Niagara2 processors on a load should be better than materializing.
3211 operand immP_load() %{
3212 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
3213 match(ConP);
3214
3215 op_cost(5);
3216 // formats are generated automatically for constants and base registers
3217 format %{ %}
3218 interface(CONST_INTER);
3219 %}
3220
3221 // Pointer Immediate: 64-bit
3222 operand immP_no_oop_cheap() %{
3223 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
3224 match(ConP);
3225
3226 op_cost(5);
3227 // formats are generated automatically for constants and base registers
3228 format %{ %}
3229 interface(CONST_INTER);
3230 %}
3231
3232 operand immP13() %{
3233 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
3234 match(ConP);
3235 op_cost(0);
3236
3237 format %{ %}
3238 interface(CONST_INTER);
3239 %}
3240
3241 operand immP0() %{
3242 predicate(n->get_ptr() == 0);
3243 match(ConP);
3244 op_cost(0);
3245
3246 format %{ %}
3247 interface(CONST_INTER);
3248 %}
3249
3250 operand immP_poll() %{
3651 match(RegFlags);
3652
3653 format %{ "ccr" %} // both ICC and XCC
3654 interface(REG_INTER);
3655 %}
3656
3657 // Condition Code Register, unsigned comparisons.
3658 operand flagsRegU() %{
3659 constraint(ALLOC_IN_RC(int_flags));
3660 match(RegFlags);
3661
3662 format %{ "icc_U" %}
3663 interface(REG_INTER);
3664 %}
3665
3666 // Condition Code Register, pointer comparisons.
3667 operand flagsRegP() %{
3668 constraint(ALLOC_IN_RC(int_flags));
3669 match(RegFlags);
3670
3671 format %{ "xcc_P" %}
3672 interface(REG_INTER);
3673 %}
3674
3675 // Condition Code Register, long comparisons.
3676 operand flagsRegL() %{
3677 constraint(ALLOC_IN_RC(int_flags));
3678 match(RegFlags);
3679
3680 format %{ "xcc_L" %}
3681 interface(REG_INTER);
3682 %}
3683
3684 // Condition Code Register, floating comparisons, unordered same as "less".
3685 operand flagsRegF() %{
3686 constraint(ALLOC_IN_RC(float_flags));
3687 match(RegFlags);
3688 match(flagsRegF0);
3689
3690 format %{ %}
3691 interface(REG_INTER);
4228 src : R(read);
4229 IALU : R;
4230 %}
4231
4232 // Integer ALU reg conditional operation
4233 // This instruction has a 1 cycle stall, and cannot execute
4234 // in the same cycle as the instruction setting the condition
4235 // code. We kludge this by pretending to read the condition code
4236 // 1 cycle earlier, and by marking the functional units as busy
4237 // for 2 cycles with the result available 1 cycle later than
4238 // is really the case.
4239 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
4240 single_instruction;
4241 op2_out : C(write);
4242 op1 : R(read);
4243 cr : R(read); // This is really E, with a 1 cycle stall
4244 BR : R(2);
4245 MS : R(2);
4246 %}
4247
4248 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
4249 instruction_count(1); multiple_bundles;
4250 dst : C(write)+1;
4251 src : R(read)+1;
4252 IALU : R(1);
4253 BR : E(2);
4254 MS : E(2);
4255 %}
4256
4257 // Integer ALU reg operation
4258 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
4259 single_instruction; may_have_no_code;
4260 dst : E(write);
4261 src : R(read);
4262 IALU : R;
4263 %}
4264 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
4265 single_instruction; may_have_no_code;
4266 dst : E(write);
4267 src : R(read);
4268 IALU : R;
4269 %}
4270
4271 // Two integer ALU reg operations
4272 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
4273 instruction_count(2);
4274 dst : E(write);
4275 src : R(read);
4340 dst : E(write)+1;
4341 IALU : R(2);
4342 %}
4343
4344 // Long Constant
4345 pipe_class loadConL( iRegL dst, immL src ) %{
4346 instruction_count(2); multiple_bundles;
4347 dst : E(write)+1;
4348 IALU : R(2);
4349 IALU : R(2);
4350 %}
4351
4352 // Pointer Constant
4353 pipe_class loadConP( iRegP dst, immP src ) %{
4354 instruction_count(0); multiple_bundles;
4355 fixed_latency(6);
4356 %}
4357
4358 // Polling Address
4359 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
4360 instruction_count(0); multiple_bundles;
4361 fixed_latency(6);
4362 %}
4363
4364 // Long Constant small
4365 pipe_class loadConLlo( iRegL dst, immL src ) %{
4366 instruction_count(2);
4367 dst : E(write);
4368 IALU : R;
4369 IALU : R;
4370 %}
4371
4372 // [PHH] This is wrong for 64-bit. See LdImmF/D.
4373 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{
4374 instruction_count(1); multiple_bundles;
4375 src : R(read);
4376 dst : M(write)+1;
4377 IALU : R;
4378 MS : E;
4379 %}
4380
4381 // Integer ALU nop operation
5082 match(Set dst src);
5083
5084 ins_cost(MEMORY_REF_COST);
5085 format %{ "LDX $src,$dst\t! long" %}
5086 opcode(Assembler::ldx_op3);
5087 ins_encode(simple_form3_mem_reg( src, dst ) );
5088 ins_pipe(iload_mem);
5089 %}
5090
5091 // Store long to stack slot
5092 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
5093 match(Set dst src);
5094
5095 ins_cost(MEMORY_REF_COST);
5096 format %{ "STX $src,$dst\t! long" %}
5097 opcode(Assembler::stx_op3);
5098 ins_encode(simple_form3_mem_reg( dst, src ) );
5099 ins_pipe(istore_mem_reg);
5100 %}
5101
5102 // Load pointer from stack slot, 64-bit encoding
5103 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5104 match(Set dst src);
5105 ins_cost(MEMORY_REF_COST);
5106 format %{ "LDX $src,$dst\t!ptr" %}
5107 opcode(Assembler::ldx_op3);
5108 ins_encode(simple_form3_mem_reg( src, dst ) );
5109 ins_pipe(iload_mem);
5110 %}
5111
5112 // Store pointer to stack slot
5113 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5114 match(Set dst src);
5115 ins_cost(MEMORY_REF_COST);
5116 format %{ "STX $src,$dst\t!ptr" %}
5117 opcode(Assembler::stx_op3);
5118 ins_encode(simple_form3_mem_reg( dst, src ) );
5119 ins_pipe(istore_mem_reg);
5120 %}
5121
5122 //------------Special Nop instructions for bundling - no match rules-----------
5123 // Nop using the A0 functional unit
5124 instruct Nop_A0() %{
5125 ins_cost(0);
5126
5127 format %{ "NOP ! Alu Pipeline" %}
5128 opcode(Assembler::or_op3, Assembler::arith_op);
5129 ins_encode( form2_nop() );
5130 ins_pipe(ialu_nop_A0);
5131 %}
5132
5133 // Nop using the A1 functional unit
5134 instruct Nop_A1( ) %{
5135 ins_cost(0);
5136
5137 format %{ "NOP ! Alu Pipeline" %}
5138 opcode(Assembler::or_op3, Assembler::arith_op);
5139 ins_encode( form2_nop() );
5140 ins_pipe(ialu_nop_A1);
5557 ins_pipe(iload_mem);
5558 %}
5559
5560 // Load Integer into %f register (for fitos/fitod)
5561 instruct loadI_freg(regF dst, memory mem) %{
5562 match(Set dst (LoadI mem));
5563 ins_cost(MEMORY_REF_COST);
5564
5565 format %{ "LDF $mem,$dst\t! for fitos/fitod" %}
5566 opcode(Assembler::ldf_op3);
5567 ins_encode(simple_form3_mem_reg( mem, dst ) );
5568 ins_pipe(floadF_mem);
5569 %}
5570
5571 // Load Pointer
5572 instruct loadP(iRegP dst, memory mem) %{
5573 match(Set dst (LoadP mem));
5574 ins_cost(MEMORY_REF_COST);
5575 size(4);
5576
5577 format %{ "LDX $mem,$dst\t! ptr" %}
5578 ins_encode %{
5579 __ ldx($mem$$Address, $dst$$Register);
5580 %}
5581 ins_pipe(iload_mem);
5582 %}
5583
5584 // Load Compressed Pointer
5585 instruct loadN(iRegN dst, memory mem) %{
5586 match(Set dst (LoadN mem));
5587 ins_cost(MEMORY_REF_COST);
5588 size(4);
5589
5590 format %{ "LDUW $mem,$dst\t! compressed ptr" %}
5591 ins_encode %{
5592 __ lduw($mem$$Address, $dst$$Register);
5593 %}
5594 ins_pipe(iload_mem);
5595 %}
5596
5597 // Load Klass Pointer
5598 instruct loadKlass(iRegP dst, memory mem) %{
5599 match(Set dst (LoadKlass mem));
5600 ins_cost(MEMORY_REF_COST);
5601 size(4);
5602
5603 format %{ "LDX $mem,$dst\t! klass ptr" %}
5604 ins_encode %{
5605 __ ldx($mem$$Address, $dst$$Register);
5606 %}
5607 ins_pipe(iload_mem);
5608 %}
5609
5610 // Load narrow Klass Pointer
5611 instruct loadNKlass(iRegN dst, memory mem) %{
5612 match(Set dst (LoadNKlass mem));
5613 ins_cost(MEMORY_REF_COST);
5614 size(4);
5615
5616 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
5617 ins_encode %{
5618 __ lduw($mem$$Address, $dst$$Register);
5619 %}
5620 ins_pipe(iload_mem);
5621 %}
5622
5623 // Load Double
5624 instruct loadD(regD dst, memory mem) %{
5625 match(Set dst (LoadD mem));
5626 ins_cost(MEMORY_REF_COST);
5654 %}
5655
5656 // Load Constant
5657 instruct loadConI( iRegI dst, immI src ) %{
5658 match(Set dst src);
5659 ins_cost(DEFAULT_COST * 3/2);
5660 format %{ "SET $src,$dst" %}
5661 ins_encode( Set32(src, dst) );
5662 ins_pipe(ialu_hi_lo_reg);
5663 %}
5664
5665 instruct loadConI13( iRegI dst, immI13 src ) %{
5666 match(Set dst src);
5667
5668 size(4);
5669 format %{ "MOV $src,$dst" %}
5670 ins_encode( Set13( src, dst ) );
5671 ins_pipe(ialu_imm);
5672 %}
5673
5674 instruct loadConP_set(iRegP dst, immP_set con) %{
5675 match(Set dst con);
5676 ins_cost(DEFAULT_COST * 3/2);
5677 format %{ "SET $con,$dst\t! ptr" %}
5678 ins_encode %{
5679 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
5680 intptr_t val = $con$$constant;
5681 if (constant_reloc == relocInfo::oop_type) {
5682 __ set_oop_constant((jobject) val, $dst$$Register);
5683 } else if (constant_reloc == relocInfo::metadata_type) {
5684 __ set_metadata_constant((Metadata*)val, $dst$$Register);
5685 } else { // non-oop pointers, e.g. card mark base, heap top
5686 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
5687 __ set(val, $dst$$Register);
5688 }
5689 %}
5690 ins_pipe(loadConP);
5691 %}
5692
5693 instruct loadConP_load(iRegP dst, immP_load con) %{
5697 ins_encode %{
5698 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
5699 __ ld_ptr($constanttablebase, con_offset, $dst$$Register);
5700 %}
5701 ins_pipe(loadConP);
5702 %}
5703
5704 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
5705 match(Set dst con);
5706 ins_cost(DEFAULT_COST * 3/2);
5707 format %{ "SET $con,$dst\t! non-oop ptr" %}
5708 ins_encode %{
5709 if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) {
5710 __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register);
5711 } else {
5712 __ set($con$$constant, $dst$$Register);
5713 }
5714 %}
5715 ins_pipe(loadConP);
5716 %}
5717
5718 instruct loadConP0(iRegP dst, immP0 src) %{
5719 match(Set dst src);
5720
5721 size(4);
5722 format %{ "CLR $dst\t!ptr" %}
5723 ins_encode %{
5724 __ clr($dst$$Register);
5725 %}
5726 ins_pipe(ialu_imm);
5727 %}
5728
5729 instruct loadConP_poll(iRegP dst, immP_poll src) %{
5730 match(Set dst src);
5731 ins_cost(DEFAULT_COST);
5732 format %{ "SET $src,$dst\t!ptr" %}
5733 ins_encode %{
5734 AddressLiteral polling_page(os::get_polling_page());
5735 __ sethi(polling_page, reg_to_register_object($dst$$reg));
5736 %}
5850 ins_encode( form3_mem_prefetch_write( mem ) );
5851 ins_pipe(iload_mem);
5852 %}
5853
5854 // Use BIS instruction to prefetch for allocation.
5855 // Could fault, need space at the end of TLAB.
5856 instruct prefetchAlloc_bis( iRegP dst ) %{
5857 predicate(AllocatePrefetchInstr == 1);
5858 match( PrefetchAllocation dst );
5859 ins_cost(MEMORY_REF_COST);
5860 size(4);
5861
5862 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %}
5863 ins_encode %{
5864 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
5865 %}
5866 ins_pipe(istore_mem_reg);
5867 %}
5868
5869 // Next code is used for finding next cache line address to prefetch.
5870 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
5871 match(Set dst (CastX2P (AndL (CastP2X src) mask)));
5872 ins_cost(DEFAULT_COST);
5873 size(4);
5874
5875 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
5876 ins_encode %{
5877 __ and3($src$$Register, $mask$$constant, $dst$$Register);
5878 %}
5879 ins_pipe(ialu_reg_imm);
5880 %}
5881
5882 //----------Store Instructions-------------------------------------------------
5883 // Store Byte
5884 instruct storeB(memory mem, iRegI src) %{
5885 match(Set mem (StoreB mem src));
5886 ins_cost(MEMORY_REF_COST);
5887
5888 format %{ "STB $src,$mem\t! byte" %}
5889 opcode(Assembler::stb_op3);
5890 ins_encode(simple_form3_mem_reg( mem, src ) );
5891 ins_pipe(istore_mem_reg);
5892 %}
5893
5894 instruct storeB0(memory mem, immI0 src) %{
5895 match(Set mem (StoreB mem src));
5896 ins_cost(MEMORY_REF_COST);
5897
5898 format %{ "STB $src,$mem\t! byte" %}
5899 opcode(Assembler::stb_op3);
5900 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
5972 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
5973 ins_pipe(istore_mem_zero);
5974 %}
5975
5976 // Store Integer from float register (used after fstoi)
5977 instruct storeI_Freg(memory mem, regF src) %{
5978 match(Set mem (StoreI mem src));
5979 ins_cost(MEMORY_REF_COST);
5980
5981 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %}
5982 opcode(Assembler::stf_op3);
5983 ins_encode(simple_form3_mem_reg( mem, src ) );
5984 ins_pipe(fstoreF_mem_reg);
5985 %}
5986
5987 // Store Pointer
5988 instruct storeP(memory dst, sp_ptr_RegP src) %{
5989 match(Set dst (StoreP dst src));
5990 ins_cost(MEMORY_REF_COST);
5991
5992 format %{ "STX $src,$dst\t! ptr" %}
5993 opcode(Assembler::stx_op3, 0, REGP_OP);
5994 ins_encode( form3_mem_reg( dst, src ) );
5995 ins_pipe(istore_mem_spORreg);
5996 %}
5997
5998 instruct storeP0(memory dst, immP0 src) %{
5999 match(Set dst (StoreP dst src));
6000 ins_cost(MEMORY_REF_COST);
6001
6002 format %{ "STX $src,$dst\t! ptr" %}
6003 opcode(Assembler::stx_op3, 0, REGP_OP);
6004 ins_encode( form3_mem_reg( dst, R_G0 ) );
6005 ins_pipe(istore_mem_zero);
6006 %}
6007
6008 // Store Compressed Pointer
6009 instruct storeN(memory dst, iRegN src) %{
6010 match(Set dst (StoreN dst src));
6011 ins_cost(MEMORY_REF_COST);
6012 size(4);
6013
6014 format %{ "STW $src,$dst\t! compressed ptr" %}
6015 ins_encode %{
6016 Register base = as_Register($dst$$base);
6017 Register index = as_Register($dst$$index);
6018 Register src = $src$$Register;
6019 if (index != G0) {
6020 __ stw(src, base, index);
6021 } else {
6022 __ stw(src, base, $dst$$disp);
6023 }
6734 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
6735 match(Set dst (AddL src1 con));
6736
6737 size(4);
6738 format %{ "ADD $src1,$con,$dst" %}
6739 opcode(Assembler::add_op3, Assembler::arith_op);
6740 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
6741 ins_pipe(ialu_reg_imm);
6742 %}
6743
6744 //----------Conditional_store--------------------------------------------------
6745 // Conditional-store of the updated heap-top.
6746 // Used during allocation of the shared heap.
6747 // Sets flags (EQ) on success. Implemented with a CASA on Sparc.
6748
6749 // LoadP-locked. Same as a regular pointer load when used with a compare-swap
6750 instruct loadPLocked(iRegP dst, memory mem) %{
6751 match(Set dst (LoadPLocked mem));
6752 ins_cost(MEMORY_REF_COST);
6753
6754 format %{ "LDX $mem,$dst\t! ptr" %}
6755 opcode(Assembler::ldx_op3, 0, REGP_OP);
6756 ins_encode( form3_mem_reg( mem, dst ) );
6757 ins_pipe(iload_mem);
6758 %}
6759
6760 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
6761 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
6762 effect( KILL newval );
6763 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t"
6764 "CMP R_G3,$oldval\t\t! See if we made progress" %}
6765 ins_encode( enc_cas(heap_top_ptr,oldval,newval) );
6766 ins_pipe( long_memory_op );
6767 %}
6768
6769 // Conditional-store of an int value.
6770 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{
6771 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval)));
6772 effect( KILL newval );
6773 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
6774 "CMP $oldval,$newval\t\t! See if we made progress" %}
6775 ins_encode( enc_cas(mem_ptr,oldval,newval) );
6806 %}
6807
6808
6809 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
6810 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
6811 match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
6812 effect( USE mem_ptr, KILL ccr, KILL tmp1);
6813 format %{
6814 "MOV $newval,O7\n\t"
6815 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
6816 "CMP $oldval,O7\t\t! See if we made progress\n\t"
6817 "MOV 1,$res\n\t"
6818 "MOVne icc,R_G0,$res"
6819 %}
6820 ins_encode( enc_casi(mem_ptr, oldval, newval),
6821 enc_iflags_ne_to_boolean(res) );
6822 ins_pipe( long_memory_op );
6823 %}
6824
6825 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
6826 predicate(VM_Version::supports_cx8());
6827 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
6828 match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
6829 effect( USE mem_ptr, KILL ccr, KILL tmp1);
6830 format %{
6831 "MOV $newval,O7\n\t"
6832 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
6833 "CMP $oldval,O7\t\t! See if we made progress\n\t"
6834 "MOV 1,$res\n\t"
6835 "MOVne xcc,R_G0,$res"
6836 %}
6837 ins_encode( enc_casx(mem_ptr, oldval, newval),
6838 enc_lflags_ne_to_boolean(res) );
6839 ins_pipe( long_memory_op );
6840 %}
6841
6842 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
6843 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
6844 match(Set res (WeakCompareAndSwapN mem_ptr (Binary oldval newval)));
6845 effect( USE mem_ptr, KILL ccr, KILL tmp1);
6846 format %{
6847 "MOV $newval,O7\n\t"
6848 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
6849 "CMP $oldval,O7\t\t! See if we made progress\n\t"
6850 "MOV 1,$res\n\t"
6851 "MOVne icc,R_G0,$res"
6852 %}
6853 ins_encode( enc_casi(mem_ptr, oldval, newval),
6854 enc_iflags_ne_to_boolean(res) );
6855 ins_pipe( long_memory_op );
6856 %}
6857
6858 instruct compareAndExchangeI(iRegP mem_ptr, iRegI oldval, iRegI newval)
6896 match(Set newval (CompareAndExchangeN mem_ptr (Binary oldval newval)));
6897 effect( USE mem_ptr );
6898
6899 format %{
6900 "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t"
6901 %}
6902 ins_encode( enc_casi_exch(mem_ptr, oldval, newval) );
6903 ins_pipe( long_memory_op );
6904 %}
6905
6906 instruct xchgI( memory mem, iRegI newval) %{
6907 match(Set newval (GetAndSetI mem newval));
6908 format %{ "SWAP [$mem],$newval" %}
6909 size(4);
6910 ins_encode %{
6911 __ swap($mem$$Address, $newval$$Register);
6912 %}
6913 ins_pipe( long_memory_op );
6914 %}
6915
6916
6917 instruct xchgN( memory mem, iRegN newval) %{
6918 match(Set newval (GetAndSetN mem newval));
6919 format %{ "SWAP [$mem],$newval" %}
6920 size(4);
6921 ins_encode %{
6922 __ swap($mem$$Address, $newval$$Register);
6923 %}
6924 ins_pipe( long_memory_op );
6925 %}
6926
6927 //---------------------
6928 // Subtraction Instructions
6929 // Register Subtraction
6930 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
6931 match(Set dst (SubI src1 src2));
6932
6933 size(4);
6934 format %{ "SUB $src1,$src2,$dst" %}
6935 opcode(Assembler::sub_op3, Assembler::arith_op);
7357
7358 size(4);
7359 format %{ "SRLX $src1,$src2,$dst" %}
7360 opcode(Assembler::srlx_op3, Assembler::arith_op);
7361 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7362 ins_pipe(ialu_reg_reg);
7363 %}
7364
7365 // Register Shift Right Immediate
7366 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7367 match(Set dst (URShiftL src1 src2));
7368
7369 size(4);
7370 format %{ "SRLX $src1,$src2,$dst" %}
7371 opcode(Assembler::srlx_op3, Assembler::arith_op);
7372 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7373 ins_pipe(ialu_reg_imm);
7374 %}
7375
7376 // Register Shift Right Immediate with a CastP2X
7377 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
7378 match(Set dst (URShiftL (CastP2X src1) src2));
7379 size(4);
7380 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %}
7381 opcode(Assembler::srlx_op3, Assembler::arith_op);
7382 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7383 ins_pipe(ialu_reg_imm);
7384 %}
7385
7386
7387 //----------Floating Point Arithmetic Instructions-----------------------------
7388
7389 // Add float single precision
7390 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
7391 match(Set dst (AddF src1 src2));
7392
7393 size(4);
7394 format %{ "FADDS $src1,$src2,$dst" %}
7395 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf);
7396 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7397 ins_pipe(faddF_reg_reg);
7398 %}
7399
7400 // Add float double precision
7401 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
7402 match(Set dst (AddD src1 src2));
7403
7404 size(4);
7607 ins_cost(DEFAULT_COST);
7608 size(4);
7609 format %{ "OR $src1,$src2,$dst\t! long" %}
7610 opcode(Assembler::or_op3, Assembler::arith_op);
7611 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7612 ins_pipe(ialu_reg_reg);
7613 %}
7614
7615 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7616 match(Set dst (OrL src1 con));
7617 ins_cost(DEFAULT_COST*2);
7618
7619 ins_cost(DEFAULT_COST);
7620 size(4);
7621 format %{ "OR $src1,$con,$dst\t! long" %}
7622 opcode(Assembler::or_op3, Assembler::arith_op);
7623 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7624 ins_pipe(ialu_reg_imm);
7625 %}
7626
7627 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
7628 match(Set dst (OrL src1 (CastP2X src2)));
7629
7630 ins_cost(DEFAULT_COST);
7631 size(4);
7632 format %{ "OR $src1,$src2,$dst\t! long" %}
7633 opcode(Assembler::or_op3, Assembler::arith_op);
7634 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7635 ins_pipe(ialu_reg_reg);
7636 %}
7637
7638 // Xor Instructions
7639 // Register Xor
7640 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7641 match(Set dst (XorI src1 src2));
7642
7643 size(4);
7644 format %{ "XOR $src1,$src2,$dst" %}
7645 opcode(Assembler::xor_op3, Assembler::arith_op);
7646 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7647 ins_pipe(ialu_reg_reg);
7648 %}
7649
7650 // Immediate Xor
7651 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7652 match(Set dst (XorI src1 src2));
7653
7654 size(4);
7655 format %{ "XOR $src1,$src2,$dst" %}
7656 opcode(Assembler::xor_op3, Assembler::arith_op);
7657 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7677 size(4);
7678 format %{ "XOR $src1,$con,$dst\t! long" %}
7679 opcode(Assembler::xor_op3, Assembler::arith_op);
7680 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7681 ins_pipe(ialu_reg_imm);
7682 %}
7683
7684 //----------Convert to Boolean-------------------------------------------------
7685 // Nice hack for 32-bit tests but doesn't work for
7686 // 64-bit pointers.
7687 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
7688 match(Set dst (Conv2B src));
7689 effect( KILL ccr );
7690 ins_cost(DEFAULT_COST*2);
7691 format %{ "CMP R_G0,$src\n\t"
7692 "ADDX R_G0,0,$dst" %}
7693 ins_encode( enc_to_bool( src, dst ) );
7694 ins_pipe(ialu_reg_ialu);
7695 %}
7696
7697 instruct convP2B( iRegI dst, iRegP src ) %{
7698 match(Set dst (Conv2B src));
7699 ins_cost(DEFAULT_COST*2);
7700 format %{ "MOV $src,$dst\n\t"
7701 "MOVRNZ $src,1,$dst" %}
7702 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
7703 ins_pipe(ialu_clr_and_mover);
7704 %}
7705
7706 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
7707 match(Set dst (CmpLTMask src zero));
7708 effect(KILL ccr);
7709 size(4);
7710 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %}
7711 ins_encode %{
7712 __ sra($src$$Register, 31, $dst$$Register);
7713 %}
7714 ins_pipe(ialu_reg_imm);
7715 %}
7716
7717 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
7718 match(Set dst (CmpLTMask p q));
7719 effect( KILL ccr );
7720 ins_cost(DEFAULT_COST*4);
7721 format %{ "CMP $p,$q\n\t"
7722 "MOV #0,$dst\n\t"
7723 "BLT,a .+8\n\t"
7724 "MOV #-1,$dst" %}
8327 stkL_to_regD(tmp, src);
8328 convL2F_helper(dst, tmp);
8329 %}
8330 %}
8331
8332 instruct convL2F_reg(regF dst, iRegL src) %{
8333 predicate(UseVIS >= 3);
8334 match(Set dst (ConvL2F src));
8335 ins_cost(DEFAULT_COST);
8336 expand %{
8337 regD tmp;
8338 MoveL2D_reg_reg(tmp, src);
8339 convL2F_helper(dst, tmp);
8340 %}
8341 %}
8342
8343 //-----------
8344
8345 instruct convL2I_reg(iRegI dst, iRegL src) %{
8346 match(Set dst (ConvL2I src));
8347 size(4);
8348 format %{ "SRA $src,R_G0,$dst\t! long->int" %}
8349 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
8350 ins_pipe(ialu_reg);
8351 %}
8352
8353 // Register Shift Right Immediate
8354 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
8355 match(Set dst (ConvL2I (RShiftL src cnt)));
8356
8357 size(4);
8358 format %{ "SRAX $src,$cnt,$dst" %}
8359 opcode(Assembler::srax_op3, Assembler::arith_op);
8360 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) );
8361 ins_pipe(ialu_reg_imm);
8362 %}
8363
8364 //----------Control Flow Instructions------------------------------------------
8365 // Compare Instructions
8366 // Compare Integers
8367 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
8368 match(Set icc (CmpI op1 op2));
8369 effect( DEF icc, USE op1, USE op2 );
8370
9099 ins_cost(BRANCH_COST);
9100 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9101 ins_encode %{
9102 Label* L = $labl$$label;
9103 assert(__ use_cbcond(*L), "back to back cbcond");
9104 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
9105 %}
9106 ins_short_branch(1);
9107 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9108 ins_pipe(cbcond_reg_imm);
9109 %}
9110
9111 // Compare Pointers and branch
9112 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9113 match(If cmp (CmpP op1 op2));
9114 predicate(UseCBCond);
9115 effect(USE labl, KILL pcc);
9116
9117 size(4);
9118 ins_cost(BRANCH_COST);
9119 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
9120 ins_encode %{
9121 Label* L = $labl$$label;
9122 assert(__ use_cbcond(*L), "back to back cbcond");
9123 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
9124 %}
9125 ins_short_branch(1);
9126 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9127 ins_pipe(cbcond_reg_reg);
9128 %}
9129
9130 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9131 match(If cmp (CmpP op1 null));
9132 predicate(UseCBCond);
9133 effect(USE labl, KILL pcc);
9134
9135 size(4);
9136 ins_cost(BRANCH_COST);
9137 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
9138 ins_encode %{
9139 Label* L = $labl$$label;
9140 assert(__ use_cbcond(*L), "back to back cbcond");
9141 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
9142 %}
9143 ins_short_branch(1);
9144 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
9145 ins_pipe(cbcond_reg_reg);
9146 %}
9147
9148 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9149 match(If cmp (CmpN op1 op2));
9150 predicate(UseCBCond);
9151 effect(USE labl, KILL icc);
9152
9153 size(4);
9154 ins_cost(BRANCH_COST);
9155 format %{ "CWB$cmp $op1,$op2,$labl\t! compressed ptr" %}
9156 ins_encode %{
9157 Label* L = $labl$$label;
9385 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9386 ins_pipe(int_conditional_float_move);
9387 %}
9388
9389 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{
9390 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
9391 ins_cost(150);
9392 opcode(0x102);
9393 format %{ "FMOVD$cmp $xcc,$src,$dst" %}
9394 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9395 ins_pipe(int_conditional_float_move);
9396 %}
9397
9398 // ============================================================================
9399 // Safepoint Instruction
9400 instruct safePoint_poll(iRegP poll) %{
9401 match(SafePoint poll);
9402 effect(USE poll);
9403
9404 size(4);
9405 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
9406 ins_encode %{
9407 __ relocate(relocInfo::poll_type);
9408 __ ld_ptr($poll$$Register, 0, G0);
9409 %}
9410 ins_pipe(loadPollP);
9411 %}
9412
9413 // ============================================================================
9414 // Call Instructions
9415 // Call Java Static Instruction
9416 instruct CallStaticJavaDirect( method meth ) %{
9417 match(CallStaticJava);
9418 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
9419 effect(USE meth);
9420
9421 size(8);
9422 ins_cost(CALL_COST);
9423 format %{ "CALL,static ; NOP ==> " %}
9424 ins_encode( Java_Static_Call( meth ), call_epilog );
9425 ins_avoid_back_to_back(AVOID_BEFORE);
|