< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp

Print this page
rev 54790 : 8223645: AArch64 build broken by fix for 8223136
Reviewed-by: stefank


  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  28 
  29 #include "asm/assembler.hpp"

  30 
  31 // MacroAssembler extends Assembler by frequently used macros.
  32 //
  33 // Instructions for which a 'better' code sequence exists depending
  34 // on arguments should also go in here.
  35 
  36 class MacroAssembler: public Assembler {
  37   friend class LIR_Assembler;
  38 
  39  public:
  40   using Assembler::mov;
  41   using Assembler::movi;
  42 
  43  protected:
  44 
  45   // Support for VM calls
  46   //
  47   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  48   // may customize this version by overriding it for its purposes (e.g., to save/restore
  49   // additional registers when doing a VM call).


  68   // returns the register which contains the thread upon return. If a thread register has been
  69   // specified, the return value will correspond to that register. If no last_java_sp is specified
  70   // (noreg) than rsp will be used instead.
  71   virtual void call_VM_base(           // returns the register containing the thread upon return
  72     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  73     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  74     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  75     address  entry_point,              // the entry point
  76     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  77     bool     check_exceptions          // whether to check for pending exceptions after return
  78   );
  79 
  80   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  81 
  82   // True if an XOR can be used to expand narrow klass references.
  83   bool use_XOR_for_compressed_class_base;
  84 
  85  public:
  86   MacroAssembler(CodeBuffer* code) : Assembler(code) {
  87     use_XOR_for_compressed_class_base
  88       = (operand_valid_for_logical_immediate(false /*is32*/,
  89                                              (uint64_t)Universe::narrow_klass_base())
  90          && ((uint64_t)Universe::narrow_klass_base()
  91              > (1UL << log2_intptr(Universe::narrow_klass_range()))));
  92   }
  93 
  94  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  95  // The implementation is only non-empty for the InterpreterMacroAssembler,
  96  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  97  virtual void check_and_handle_popframe(Register java_thread);
  98  virtual void check_and_handle_earlyret(Register java_thread);
  99 
 100   void safepoint_poll(Label& slow_path);
 101   void safepoint_poll_acquire(Label& slow_path);
 102 
 103   // Biased locking support
 104   // lock_reg and obj_reg must be loaded up with the appropriate values.
 105   // swap_reg is killed.
 106   // tmp_reg must be supplied and must not be rscratch1 or rscratch2
 107   // Optional slow case is for implementations (interpreter and C1) which branch to
 108   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 109   // Returns offset of first potentially-faulting instruction for null
 110   // check info (currently consumed only by C1). If
 111   // swap_reg_contains_mark is true then returns -1 as it is assumed




  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  28 
  29 #include "asm/assembler.hpp"
  30 #include "oops/compressedOops.hpp"
  31 
  32 // MacroAssembler extends Assembler by frequently used macros.
  33 //
  34 // Instructions for which a 'better' code sequence exists depending
  35 // on arguments should also go in here.
  36 
  37 class MacroAssembler: public Assembler {
  38   friend class LIR_Assembler;
  39 
  40  public:
  41   using Assembler::mov;
  42   using Assembler::movi;
  43 
  44  protected:
  45 
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).


  69   // returns the register which contains the thread upon return. If a thread register has been
  70   // specified, the return value will correspond to that register. If no last_java_sp is specified
  71   // (noreg) than rsp will be used instead.
  72   virtual void call_VM_base(           // returns the register containing the thread upon return
  73     Register oop_result,               // where an oop-result ends up if any; use noreg otherwise
  74     Register java_thread,              // the thread if computed before     ; use noreg otherwise
  75     Register last_java_sp,             // to set up last_Java_frame in stubs; use noreg otherwise
  76     address  entry_point,              // the entry point
  77     int      number_of_arguments,      // the number of arguments (w/o thread) to pop after the call
  78     bool     check_exceptions          // whether to check for pending exceptions after return
  79   );
  80 
  81   void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
  82 
  83   // True if an XOR can be used to expand narrow klass references.
  84   bool use_XOR_for_compressed_class_base;
  85 
  86  public:
  87   MacroAssembler(CodeBuffer* code) : Assembler(code) {
  88     use_XOR_for_compressed_class_base
  89       = operand_valid_for_logical_immediate
  90            (/*is32*/false, (uint64_t)CompressedKlassPointers::base())
  91          && ((uint64_t)CompressedKlassPointers::base()
  92              > (1UL << log2_intptr(CompressedKlassPointers::range())));
  93   }
  94 
  95  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  96  // The implementation is only non-empty for the InterpreterMacroAssembler,
  97  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  98  virtual void check_and_handle_popframe(Register java_thread);
  99  virtual void check_and_handle_earlyret(Register java_thread);
 100 
 101   void safepoint_poll(Label& slow_path);
 102   void safepoint_poll_acquire(Label& slow_path);
 103 
 104   // Biased locking support
 105   // lock_reg and obj_reg must be loaded up with the appropriate values.
 106   // swap_reg is killed.
 107   // tmp_reg must be supplied and must not be rscratch1 or rscratch2
 108   // Optional slow case is for implementations (interpreter and C1) which branch to
 109   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 110   // Returns offset of first potentially-faulting instruction for null
 111   // check info (currently consumed only by C1). If
 112   // swap_reg_contains_mark is true then returns -1 as it is assumed


< prev index next >