< prev index next >

src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp

Print this page

        

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -43,150 +43,86 @@
   stub_inst = patched_branch(target - branch, stub_inst, 0);
 }
 
 // Use the right loads/stores for the platform
 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
   Assembler::ldx(s1, s2, d);
-#else
-             ld( s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
-#ifdef _LP64
   Assembler::ldx(s1, simm13a, d);
-#else
-             ld( s1, simm13a, d);
-#endif
 }
 
 #ifdef ASSERT
 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
   ld_ptr(s1, in_bytes(simm13a), d);
 }
 #endif
 
 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
-#ifdef _LP64
   ldx(s1, s2, d);
-#else
-  ld( s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
-#ifdef _LP64
   ldx(a, d, offset);
-#else
-  ld( a, d, offset);
-#endif
 }
 
 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
-#ifdef _LP64
   Assembler::stx(d, s1, s2);
-#else
-             st( d, s1, s2);
-#endif
 }
 
 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
-#ifdef _LP64
   Assembler::stx(d, s1, simm13a);
-#else
-             st( d, s1, simm13a);
-#endif
 }
 
 #ifdef ASSERT
 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
   st_ptr(d, s1, in_bytes(simm13a));
 }
 #endif
 
 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
-#ifdef _LP64
   stx(d, s1, s2);
-#else
-  st( d, s1, s2);
-#endif
 }
 
 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
-#ifdef _LP64
   stx(d, a, offset);
-#else
-  st( d, a, offset);
-#endif
 }
 
 // Use the right loads/stores for the platform
 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
-#ifdef _LP64
   Assembler::ldx(s1, s2, d);
-#else
-  Assembler::ldd(s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
-#ifdef _LP64
   Assembler::ldx(s1, simm13a, d);
-#else
-  Assembler::ldd(s1, simm13a, d);
-#endif
 }
 
 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
-#ifdef _LP64
   ldx(s1, s2, d);
-#else
-  ldd(s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
-#ifdef _LP64
   ldx(a, d, offset);
-#else
-  ldd(a, d, offset);
-#endif
 }
 
 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
-#ifdef _LP64
   Assembler::stx(d, s1, s2);
-#else
-  Assembler::std(d, s1, s2);
-#endif
 }
 
 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
-#ifdef _LP64
   Assembler::stx(d, s1, simm13a);
-#else
-  Assembler::std(d, s1, simm13a);
-#endif
 }
 
 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
-#ifdef _LP64
   stx(d, s1, s2);
-#else
-  std(d, s1, s2);
-#endif
 }
 
 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
-#ifdef _LP64
   stx(d, a, offset);
-#else
-  std(d, a, offset);
-#endif
 }
 
 inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); }
 inline void MacroAssembler::ldbool(const Address& a, Register d) { ldub(a, d); }
 inline void MacroAssembler::movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }

@@ -205,49 +141,29 @@
 inline void MacroAssembler::casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
 
 // Functions for isolating 64 bit atomic swaps for LP64
 // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
 inline void MacroAssembler::cas_ptr(  Register s1, Register s2, Register d) {
-#ifdef _LP64
   casx( s1, s2, d );
-#else
-  cas( s1, s2, d );
-#endif
 }
 
 // Functions for isolating 64 bit shifts for LP64
 
 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
   Assembler::sllx(s1, s2, d);
-#else
-  Assembler::sll( s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::sll_ptr( Register s1, int imm6a,   Register d ) {
-#ifdef _LP64
   Assembler::sllx(s1, imm6a, d);
-#else
-  Assembler::sll( s1, imm6a, d);
-#endif
 }
 
 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
   Assembler::srlx(s1, s2, d);
-#else
-  Assembler::srl( s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::srl_ptr( Register s1, int imm6a,   Register d ) {
-#ifdef _LP64
   Assembler::srlx(s1, imm6a, d);
-#else
-  Assembler::srl( s1, imm6a, d);
-#endif
 }
 
 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
   if (s2.is_register())  sll_ptr(s1, s2.as_register(), d);
   else                   sll_ptr(s1, s2.as_constant(), d);

@@ -275,15 +191,11 @@
 
 
 // Branch that tests either xcc or icc depending on the
 // architecture compiled (LP64 or not)
 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
-#ifdef _LP64
     Assembler::bp(c, a, xcc, p, d, rt);
-#else
-    MacroAssembler::br(c, a, p, d, rt);
-#endif
 }
 
 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
   insert_nop_after_cbcond();
   brx(c, a, p, target(L));

@@ -336,11 +248,10 @@
 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
   MacroAssembler::call(d, Relocation::spec_simple(rt));
 }
 
 inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
-#ifdef _LP64
   intptr_t disp;
   // NULL is ok because it will be relocated later.
   // Must change NULL to a reachable address in order to
   // pass asserts here and in wdisp.
   if ( d == NULL )

@@ -353,13 +264,10 @@
     AddressLiteral dest(d);
     jumpl_to(dest, O7, O7);
   } else {
     Assembler::call(d, rspec);
   }
-#else
-  Assembler::call( d, rspec );
-#endif
 }
 
 inline void MacroAssembler::call( Label& L,   relocInfo::relocType rt ) {
   insert_nop_after_cbcond();
   MacroAssembler::call( target(L), rt);

@@ -412,16 +320,11 @@
 
 // Loads the current PC of the following instruction as an immediate value in
 // 2 instructions.  All PCs in the CodeCache are within 2 Gig of each other.
 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
   intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
-#ifdef _LP64
   Unimplemented();
-#else
-  Assembler::sethi(   thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
-             add(reg, thepc &  0x3ff, reg, internal_word_Relocation::spec((address)thepc));
-#endif
   return thepc;
 }
 
 
 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {

@@ -552,11 +455,10 @@
   else
     st_ptr (s, a.as_address());
 }
 
 
-#ifdef _LP64
 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
   if (a.is_float_register())
 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
     fmov(FloatRegisterImpl::S, s, a.as_float_register() );
   else

@@ -577,11 +479,10 @@
   if (a.is_register())
     mov(s, a.as_register());
   else
     stx(s, a.as_address());
 }
-#endif
 
 inline void MacroAssembler::round_to( Register r, int modulus ) {
   assert_not_delayed();
   inc( r, modulus - 1 );
   and3( r, -modulus, r );

@@ -638,26 +539,17 @@
 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
 
 inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); }
 inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); }
 
-#ifdef _LP64
 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
 inline void MacroAssembler::ld(  Register s1, Register s2, Register d)      { ldsw( s1, s2, d); }
 inline void MacroAssembler::ld(  Register s1, int simm13a, Register d)      { ldsw( s1, simm13a, d); }
-#else
-inline void MacroAssembler::ld(  Register s1, Register s2, Register d)      { lduw( s1, s2, d); }
-inline void MacroAssembler::ld(  Register s1, int simm13a, Register d)      { lduw( s1, simm13a, d); }
-#endif
 
 #ifdef ASSERT
   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
-# ifdef _LP64
 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
-# else
-inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
-# endif
 #endif
 
 inline void MacroAssembler::ld(  const Address& a, Register d, int offset) {
   if (a.has_index()) { assert(offset == 0, ""); ld(  a.base(), a.index(),         d); }
   else               {                          ld(  a.base(), a.disp() + offset, d); }
< prev index next >