src/cpu/x86/vm/x86_32.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7145346 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/x86_32.ad

Print this page


   1 //
   2 // Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //


 324 }
 325 
 326 // The address of the call instruction needs to be 4-byte aligned to
 327 // ensure that it does not span a cache line so that it can be patched.
 328 int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
 329   current_offset += pre_call_FPU_size();  // skip fldcw, if any
 330   current_offset += preserve_SP_size();   // skip mov rbp, rsp
 331   current_offset += 1;      // skip call opcode byte
 332   return round_to(current_offset, alignment_required()) - current_offset;
 333 }
 334 
 335 // The address of the call instruction needs to be 4-byte aligned to
 336 // ensure that it does not span a cache line so that it can be patched.
 337 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
 338   current_offset += pre_call_FPU_size();  // skip fldcw, if any
 339   current_offset += 5;      // skip MOV instruction
 340   current_offset += 1;      // skip call opcode byte
 341   return round_to(current_offset, alignment_required()) - current_offset;
 342 }
 343 
 344 #ifndef PRODUCT
 345 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
 346   st->print("INT3");
 347 }
 348 #endif
 349 
 350 // EMIT_RM()
 351 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
 352   unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
 353   cbuf.insts()->emit_int8(c);
 354 }
 355 
 356 // EMIT_CC()
 357 void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
 358   unsigned char c = (unsigned char)( f1 | f2 );
 359   cbuf.insts()->emit_int8(c);
 360 }
 361 
 362 // EMIT_OPCODE()
 363 void emit_opcode(CodeBuffer &cbuf, int code) {
 364   cbuf.insts()->emit_int8((unsigned char) code);
 365 }
 366 
 367 // EMIT_OPCODE() w/ relocation information
 368 void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) {
 369   cbuf.relocate(cbuf.insts_mark() + offset, reloc);


1100     return size;               // Self copy; no move
1101   assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1102 
1103   // Check for second word int-int move
1104   if( src_second_rc == rc_int && dst_second_rc == rc_int )
1105     return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
1106 
1107   // Check for second word integer store
1108   if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1109     return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
1110 
1111   // Check for second word integer load
1112   if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1113     return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
1114 
1115 
1116   Unimplemented();
1117 }
1118 
1119 #ifndef PRODUCT
1120 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1121   implementation( NULL, ra_, false, st );
1122 }
1123 #endif
1124 
1125 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1126   implementation( &cbuf, ra_, false, NULL );
1127 }
1128 
1129 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1130   return implementation( NULL, ra_, true, NULL );
1131 }
1132 
1133 //=============================================================================
1134 #ifndef PRODUCT
1135 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1136   st->print("NOP \t# %d bytes pad for loops and calls", _count);
1137 }
1138 #endif
1139 
1140 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1141   MacroAssembler _masm(&cbuf);
1142   __ nop(_count);
1143 }
1144 
1145 uint MachNopNode::size(PhaseRegAlloc *) const {
1146   return _count;
1147 }
1148 
1149 
1150 //=============================================================================
1151 #ifndef PRODUCT
1152 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1153   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1154   int reg = ra_->get_reg_first(this);
1155   st->print("LEA    %s,[ESP + #%d]",Matcher::regName[reg],offset);
1156 }
1157 #endif
1158 
1159 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1160   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1161   int reg = ra_->get_encode(this);
1162   if( offset >= 128 ) {
1163     emit_opcode(cbuf, 0x8D);      // LEA  reg,[SP+offset]
1164     emit_rm(cbuf, 0x2, reg, 0x04);
1165     emit_rm(cbuf, 0x0, 0x04, ESP_enc);
1166     emit_d32(cbuf, offset);
1167   }
1168   else {
1169     emit_opcode(cbuf, 0x8D);      // LEA  reg,[SP+offset]


1814 
1815   enc_class pre_call_FPU %{
1816     // If method sets FPU control word restore it here
1817     debug_only(int off0 = cbuf.insts_size());
1818     if( Compile::current()->in_24_bit_fp_mode() ) {
1819       MacroAssembler masm(&cbuf);
1820       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1821     }
1822     debug_only(int off1 = cbuf.insts_size());
1823     assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
1824   %}
1825 
1826   enc_class post_call_FPU %{
1827     // If method sets FPU control word do it here also
1828     if( Compile::current()->in_24_bit_fp_mode() ) {
1829       MacroAssembler masm(&cbuf);
1830       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
1831     }
1832   %}
1833 
1834   enc_class preserve_SP %{
1835     debug_only(int off0 = cbuf.insts_size());
1836     MacroAssembler _masm(&cbuf);
1837     // RBP is preserved across all calls, even compiled calls.
1838     // Use it to preserve RSP in places where the callee might change the SP.
1839     __ movptr(rbp_mh_SP_save, rsp);
1840     debug_only(int off1 = cbuf.insts_size());
1841     assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
1842   %}
1843 
1844   enc_class restore_SP %{
1845     MacroAssembler _masm(&cbuf);
1846     __ movptr(rsp, rbp_mh_SP_save);
1847   %}
1848 
1849   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
1850     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
1851     // who we intended to call.
1852     cbuf.set_insts_mark();
1853     $$$emit8$primary;
1854     if ( !_method ) {
1855       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1856                      runtime_call_Relocation::spec(), RELOC_IMM32 );
1857     } else if(_optimized_virtual) {
1858       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1859                      opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
1860     } else {
1861       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1862                      static_call_Relocation::spec(), RELOC_IMM32 );
1863     }
1864     if( _method ) {  // Emit stub for static call
1865       emit_java_to_interp(cbuf);
1866     }
1867   %}
1868 


3777   // Alignment size in bytes (128-bit -> 16 bytes)
3778   stack_alignment(StackAlignmentInBytes);
3779 
3780   // Number of stack slots between incoming argument block and the start of
3781   // a new frame.  The PROLOG must add this many slots to the stack.  The
3782   // EPILOG must remove this many slots.  Intel needs one slot for
3783   // return address and one for rbp, (must save rbp)
3784   in_preserve_stack_slots(2+VerifyStackAtCalls);
3785 
3786   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3787   // for calls to C.  Supports the var-args backing area for register parms.
3788   varargs_C_out_slots_killed(0);
3789 
3790   // The after-PROLOG location of the return address.  Location of
3791   // return address specifies a type (REG or STACK) and a number
3792   // representing the register number (i.e. - use a register name) or
3793   // stack slot.
3794   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3795   // Otherwise, it is above the locks and verification slot and alignment word
3796   return_addr(STACK - 1 +
3797               round_to(1+VerifyStackAtCalls+
3798               Compile::current()->fixed_slots(),
3799               (StackAlignmentInBytes/wordSize)));
3800 
3801   // Body of function which returns an integer array locating
3802   // arguments either in registers or in stack slots.  Passed an array
3803   // of ideal registers called "sig" and a "length" count.  Stack-slot
3804   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3805   // arguments for a CALLEE.  Incoming stack arguments are
3806   // automatically biased by the preserve_stack_slots field above.
3807   calling_convention %{
3808     // No difference between ingoing/outgoing just pass false
3809     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3810   %}
3811 
3812 
3813   // Body of function which returns an integer array locating
3814   // arguments either in registers or in stack slots.  Passed an array
3815   // of ideal registers called "sig" and a "length" count.  Stack-slot
3816   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3817   // arguments for a CALLEE.  Incoming stack arguments are
3818   // automatically biased by the preserve_stack_slots field above.
3819   c_calling_convention %{


13407 // Safepoint Instruction
13408 instruct safePoint_poll(eFlagsReg cr) %{
13409   match(SafePoint);
13410   effect(KILL cr);
13411 
13412   // TODO-FIXME: we currently poll at offset 0 of the safepoint polling page.
13413   // On SPARC that might be acceptable as we can generate the address with
13414   // just a sethi, saving an or.  By polling at offset 0 we can end up
13415   // putting additional pressure on the index-0 in the D$.  Because of
13416   // alignment (just like the situation at hand) the lower indices tend
13417   // to see more traffic.  It'd be better to change the polling address
13418   // to offset 0 of the last $line in the polling page.
13419 
13420   format %{ "TSTL   #polladdr,EAX\t! Safepoint: poll for GC" %}
13421   ins_cost(125);
13422   size(6) ;
13423   ins_encode( Safepoint_Poll() );
13424   ins_pipe( ialu_reg_mem );
13425 %}
13426 



















13427 //----------PEEPHOLE RULES-----------------------------------------------------
13428 // These must follow all instruction definitions as they use the names
13429 // defined in the instructions definitions.
13430 //
13431 // peepmatch ( root_instr_name [preceding_instruction]* );
13432 //
13433 // peepconstraint %{
13434 // (instruction_number.operand_name relational_op instruction_number.operand_name
13435 //  [, ...] );
13436 // // instruction numbers are zero-based using left to right order in peepmatch
13437 //
13438 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
13439 // // provide an instruction_number.operand_name for each operand that appears
13440 // // in the replacement instruction's match rule
13441 //
13442 // ---------VM FLAGS---------------------------------------------------------
13443 //
13444 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13445 //
13446 // Each peephole rule is given an identifying number starting with zero and


   1 //
   2 // Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //


 324 }
 325 
 326 // The address of the call instruction needs to be 4-byte aligned to
 327 // ensure that it does not span a cache line so that it can be patched.
 328 int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
 329   current_offset += pre_call_FPU_size();  // skip fldcw, if any
 330   current_offset += preserve_SP_size();   // skip mov rbp, rsp
 331   current_offset += 1;      // skip call opcode byte
 332   return round_to(current_offset, alignment_required()) - current_offset;
 333 }
 334 
 335 // The address of the call instruction needs to be 4-byte aligned to
 336 // ensure that it does not span a cache line so that it can be patched.
 337 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
 338   current_offset += pre_call_FPU_size();  // skip fldcw, if any
 339   current_offset += 5;      // skip MOV instruction
 340   current_offset += 1;      // skip call opcode byte
 341   return round_to(current_offset, alignment_required()) - current_offset;
 342 }
 343 






 344 // EMIT_RM()
 345 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
 346   unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
 347   cbuf.insts()->emit_int8(c);
 348 }
 349 
 350 // EMIT_CC()
 351 void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
 352   unsigned char c = (unsigned char)( f1 | f2 );
 353   cbuf.insts()->emit_int8(c);
 354 }
 355 
 356 // EMIT_OPCODE()
 357 void emit_opcode(CodeBuffer &cbuf, int code) {
 358   cbuf.insts()->emit_int8((unsigned char) code);
 359 }
 360 
 361 // EMIT_OPCODE() w/ relocation information
 362 void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) {
 363   cbuf.relocate(cbuf.insts_mark() + offset, reloc);


1094     return size;               // Self copy; no move
1095   assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1096 
1097   // Check for second word int-int move
1098   if( src_second_rc == rc_int && dst_second_rc == rc_int )
1099     return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
1100 
1101   // Check for second word integer store
1102   if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1103     return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
1104 
1105   // Check for second word integer load
1106   if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1107     return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
1108 
1109 
1110   Unimplemented();
1111 }
1112 
1113 #ifndef PRODUCT
1114 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
1115   implementation( NULL, ra_, false, st );
1116 }
1117 #endif
1118 
1119 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1120   implementation( &cbuf, ra_, false, NULL );
1121 }
1122 
1123 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1124   return implementation( NULL, ra_, true, NULL );
1125 }
1126 






1127 










1128 //=============================================================================
1129 #ifndef PRODUCT
1130 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1131   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1132   int reg = ra_->get_reg_first(this);
1133   st->print("LEA    %s,[ESP + #%d]",Matcher::regName[reg],offset);
1134 }
1135 #endif
1136 
1137 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1138   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1139   int reg = ra_->get_encode(this);
1140   if( offset >= 128 ) {
1141     emit_opcode(cbuf, 0x8D);      // LEA  reg,[SP+offset]
1142     emit_rm(cbuf, 0x2, reg, 0x04);
1143     emit_rm(cbuf, 0x0, 0x04, ESP_enc);
1144     emit_d32(cbuf, offset);
1145   }
1146   else {
1147     emit_opcode(cbuf, 0x8D);      // LEA  reg,[SP+offset]


1792 
1793   enc_class pre_call_FPU %{
1794     // If method sets FPU control word restore it here
1795     debug_only(int off0 = cbuf.insts_size());
1796     if( Compile::current()->in_24_bit_fp_mode() ) {
1797       MacroAssembler masm(&cbuf);
1798       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1799     }
1800     debug_only(int off1 = cbuf.insts_size());
1801     assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
1802   %}
1803 
1804   enc_class post_call_FPU %{
1805     // If method sets FPU control word do it here also
1806     if( Compile::current()->in_24_bit_fp_mode() ) {
1807       MacroAssembler masm(&cbuf);
1808       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
1809     }
1810   %}
1811 















1812   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
1813     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
1814     // who we intended to call.
1815     cbuf.set_insts_mark();
1816     $$$emit8$primary;
1817     if ( !_method ) {
1818       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1819                      runtime_call_Relocation::spec(), RELOC_IMM32 );
1820     } else if(_optimized_virtual) {
1821       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1822                      opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
1823     } else {
1824       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
1825                      static_call_Relocation::spec(), RELOC_IMM32 );
1826     }
1827     if( _method ) {  // Emit stub for static call
1828       emit_java_to_interp(cbuf);
1829     }
1830   %}
1831 


3740   // Alignment size in bytes (128-bit -> 16 bytes)
3741   stack_alignment(StackAlignmentInBytes);
3742 
3743   // Number of stack slots between incoming argument block and the start of
3744   // a new frame.  The PROLOG must add this many slots to the stack.  The
3745   // EPILOG must remove this many slots.  Intel needs one slot for
3746   // return address and one for rbp, (must save rbp)
3747   in_preserve_stack_slots(2+VerifyStackAtCalls);
3748 
3749   // Number of outgoing stack slots killed above the out_preserve_stack_slots
3750   // for calls to C.  Supports the var-args backing area for register parms.
3751   varargs_C_out_slots_killed(0);
3752 
3753   // The after-PROLOG location of the return address.  Location of
3754   // return address specifies a type (REG or STACK) and a number
3755   // representing the register number (i.e. - use a register name) or
3756   // stack slot.
3757   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3758   // Otherwise, it is above the locks and verification slot and alignment word
3759   return_addr(STACK - 1 +
3760               round_to((Compile::current()->in_preserve_stack_slots() +
3761                         Compile::current()->fixed_slots()),
3762                        stack_alignment_in_slots()));
3763 
3764   // Body of function which returns an integer array locating
3765   // arguments either in registers or in stack slots.  Passed an array
3766   // of ideal registers called "sig" and a "length" count.  Stack-slot
3767   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3768   // arguments for a CALLEE.  Incoming stack arguments are
3769   // automatically biased by the preserve_stack_slots field above.
3770   calling_convention %{
3771     // No difference between ingoing/outgoing just pass false
3772     SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3773   %}
3774 
3775 
3776   // Body of function which returns an integer array locating
3777   // arguments either in registers or in stack slots.  Passed an array
3778   // of ideal registers called "sig" and a "length" count.  Stack-slot
3779   // offsets are based on outgoing arguments, i.e. a CALLER setting up
3780   // arguments for a CALLEE.  Incoming stack arguments are
3781   // automatically biased by the preserve_stack_slots field above.
3782   c_calling_convention %{


13370 // Safepoint Instruction
13371 instruct safePoint_poll(eFlagsReg cr) %{
13372   match(SafePoint);
13373   effect(KILL cr);
13374 
13375   // TODO-FIXME: we currently poll at offset 0 of the safepoint polling page.
13376   // On SPARC that might be acceptable as we can generate the address with
13377   // just a sethi, saving an or.  By polling at offset 0 we can end up
13378   // putting additional pressure on the index-0 in the D$.  Because of
13379   // alignment (just like the situation at hand) the lower indices tend
13380   // to see more traffic.  It'd be better to change the polling address
13381   // to offset 0 of the last $line in the polling page.
13382 
13383   format %{ "TSTL   #polladdr,EAX\t! Safepoint: poll for GC" %}
13384   ins_cost(125);
13385   size(6) ;
13386   ins_encode( Safepoint_Poll() );
13387   ins_pipe( ialu_reg_mem );
13388 %}
13389 
13390 
13391 // ============================================================================
13392 // This name is KNOWN by the ADLC and cannot be changed.
13393 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
13394 // for this guy.
13395 instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
13396   match(Set dst (ThreadLocal));
13397   effect(DEF dst, KILL cr);
13398 
13399   format %{ "MOV    $dst, Thread::current()" %}
13400   ins_encode %{
13401     Register dstReg = as_Register($dst$$reg);
13402     __ get_thread(dstReg);
13403   %}
13404   ins_pipe( ialu_reg_fat );
13405 %}
13406 
13407 
13408 
13409 //----------PEEPHOLE RULES-----------------------------------------------------
13410 // These must follow all instruction definitions as they use the names
13411 // defined in the instructions definitions.
13412 //
13413 // peepmatch ( root_instr_name [preceding_instruction]* );
13414 //
13415 // peepconstraint %{
13416 // (instruction_number.operand_name relational_op instruction_number.operand_name
13417 //  [, ...] );
13418 // // instruction numbers are zero-based using left to right order in peepmatch
13419 //
13420 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
13421 // // provide an instruction_number.operand_name for each operand that appears
13422 // // in the replacement instruction's match rule
13423 //
13424 // ---------VM FLAGS---------------------------------------------------------
13425 //
13426 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13427 //
13428 // Each peephole rule is given an identifying number starting with zero and


src/cpu/x86/vm/x86_32.ad
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File