< prev index next >

src/cpu/ppc/vm/macroAssembler_ppc.hpp

Print this page
rev 8107 : 8077838: Recent developments for ppc.
   1 /*
   2  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2014 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
  27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
  28 
  29 #include "asm/assembler.hpp"

  30 #include "utilities/macros.hpp"
  31 
  32 // MacroAssembler extends Assembler by a few frequently used macros.
  33 
  34 class ciTypeArray;
  35 
  36 class MacroAssembler: public Assembler {
  37  public:
  38   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  39 
  40   //
  41   // Optimized instruction emitters
  42   //
  43 
  44   inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
  45   inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
  46 
  47   // load d = *[a+si31]
  48   // Emits several instructions if the offset is not encodable in one instruction.
  49   void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);


 415   // CmpxchgX sets condition register to cmpX(current, compare).
 416   // (flag == ne) => (dest_current_value != compare_value), (!swapped)
 417   // (flag == eq) => (dest_current_value == compare_value), ( swapped)
 418   static inline bool cmpxchgx_hint_acquire_lock()  { return true; }
 419   // The stxcx will probably not be succeeded by a releasing store.
 420   static inline bool cmpxchgx_hint_release_lock()  { return false; }
 421   static inline bool cmpxchgx_hint_atomic_update() { return false; }
 422 
 423   // Cmpxchg semantics
 424   enum {
 425     MemBarNone = 0,
 426     MemBarRel  = 1,
 427     MemBarAcq  = 2,
 428     MemBarFenceAfter = 4 // use powers of 2
 429   };
 430   void cmpxchgw(ConditionRegister flag,
 431                 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
 432                 int semantics, bool cmpxchgx_hint = false,
 433                 Register int_flag_success = noreg, bool contention_hint = false);
 434   void cmpxchgd(ConditionRegister flag,
 435                 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
 436                 int semantics, bool cmpxchgx_hint = false,
 437                 Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
 438 
 439   // interface method calling
 440   void lookup_interface_method(Register recv_klass,
 441                                Register intf_klass,
 442                                RegisterOrConstant itable_index,
 443                                Register method_result,
 444                                Register temp_reg, Register temp2_reg,
 445                                Label& no_such_interface);
 446 
 447   // virtual method calling
 448   void lookup_virtual_method(Register recv_klass,
 449                              RegisterOrConstant vtable_index,
 450                              Register method_result);
 451 
 452   // Test sub_klass against super_klass, with fast and slow paths.
 453 
 454   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 455   // One of the three labels can be NULL, meaning take the fall-through.
 456   // If super_check_offset is -1, the value is loaded up from super_klass.


 489   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
 490 
 491   // Biased locking support
 492   // Upon entry,obj_reg must contain the target object, and mark_reg
 493   // must contain the target object's header.
 494   // Destroys mark_reg if an attempt is made to bias an anonymously
 495   // biased lock. In this case a failure will go either to the slow
 496   // case or fall through with the notEqual condition code set with
 497   // the expectation that the slow case in the runtime will be called.
 498   // In the fall-through case where the CAS-based lock is done,
 499   // mark_reg is not destroyed.
 500   void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
 501                             Register temp2_reg, Label& done, Label* slow_case = NULL);
 502   // Upon entry, the base register of mark_addr must contain the oop.
 503   // Destroys temp_reg.
 504   // If allow_delay_slot_filling is set to true, the next instruction
 505   // emitted after this one will go in an annulled delay slot if the
 506   // biased locking exit case failed.
 507   void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
 508 
 509   void compiler_fast_lock_object(  ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
 510   void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);


































 511 
 512   // Support for serializing memory accesses between threads
 513   void serialize_memory(Register thread, Register tmp1, Register tmp2);
 514 
 515   // GC barrier support.
 516   void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
 517   void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
 518 
 519 #if INCLUDE_ALL_GCS
 520   // General G1 pre-barrier generator.
 521   void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
 522                             Register Rtmp1, Register Rtmp2, bool needs_frame = false);
 523   // General G1 post-barrier generator
 524   void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
 525                              Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
 526 #endif
 527 
 528   // Support for managing the JavaThread pointer (i.e.; the reference to
 529   // thread-local information).
 530 


 559   static bool is_trap_ic_miss_check(int x) {
 560     return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
 561   }
 562 
 563   // Implicit or explicit null check, jumps to static address exception_entry.
 564   inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
 565 
 566   // Check accessed object for null. Use SIGTRAP-based null checks on AIX.
 567   inline void load_with_trap_null_check(Register d, int si16, Register s1);
 568 
 569   // Load heap oop and decompress. Loaded oop may not be null.
 570   // Specify tmp to save one cycle.
 571   inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg,
 572                                      Register tmp = noreg);
 573   // Store heap oop and decompress.  Decompressed oop may not be null.
 574   // Specify tmp register if d should not be changed.
 575   inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
 576                                       Register tmp = noreg);
 577 
 578   // Null allowed.
 579   inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
 580 
 581   // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
 582   // src == d allowed.
 583   inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
 584   inline Register decode_heap_oop_not_null(Register d, Register src = noreg);
 585 
 586   // Null allowed.
 587   inline void decode_heap_oop(Register d);
 588 
 589   // Load/Store klass oop from klass field. Compress.
 590   void load_klass(Register dst, Register src);
 591   void load_klass_with_trap_null_check(Register dst, Register src);
 592   void store_klass(Register dst_oop, Register klass, Register tmp = R0);
 593   void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
 594   static int instr_size_for_decode_klass_not_null();
 595   void decode_klass_not_null(Register dst, Register src = noreg);
 596   void encode_klass_not_null(Register dst, Register src = noreg);
 597 
 598   // Load common heap base into register.
 599   void reinit_heapbase(Register d, Register tmp = noreg);
 600 
 601   // SIGTRAP-based range checks for arrays.
 602   inline void trap_range_check_l(Register a, Register b);
 603   inline void trap_range_check_l(Register a, int si16);
 604   static bool is_trap_range_check_l(int x) {
 605     return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
 606             is_twi(x, traptoLessThanUnsigned, -1/*any reg*/)                  );
 607   }
 608   inline void trap_range_check_le(Register a, int si16);
 609   static bool is_trap_range_check_le(int x) {
 610     return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
 611   }
 612   inline void trap_range_check_g(Register a, int si16);
 613   static bool is_trap_range_check_g(int x) {
 614     return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
 615   }
 616   inline void trap_range_check_ge(Register a, Register b);


   1 /*
   2  * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
  27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
  28 
  29 #include "asm/assembler.hpp"
  30 #include "runtime/rtmLocking.hpp"
  31 #include "utilities/macros.hpp"
  32 
  33 // MacroAssembler extends Assembler by a few frequently used macros.
  34 
  35 class ciTypeArray;
  36 
  37 class MacroAssembler: public Assembler {
  38  public:
  39   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  40 
  41   //
  42   // Optimized instruction emitters
  43   //
  44 
  45   inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
  46   inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
  47 
  48   // load d = *[a+si31]
  49   // Emits several instructions if the offset is not encodable in one instruction.
  50   void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);


 416   // CmpxchgX sets condition register to cmpX(current, compare).
 417   // (flag == ne) => (dest_current_value != compare_value), (!swapped)
 418   // (flag == eq) => (dest_current_value == compare_value), ( swapped)
 419   static inline bool cmpxchgx_hint_acquire_lock()  { return true; }
 420   // The stxcx will probably not be succeeded by a releasing store.
 421   static inline bool cmpxchgx_hint_release_lock()  { return false; }
 422   static inline bool cmpxchgx_hint_atomic_update() { return false; }
 423 
 424   // Cmpxchg semantics
 425   enum {
 426     MemBarNone = 0,
 427     MemBarRel  = 1,
 428     MemBarAcq  = 2,
 429     MemBarFenceAfter = 4 // use powers of 2
 430   };
 431   void cmpxchgw(ConditionRegister flag,
 432                 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
 433                 int semantics, bool cmpxchgx_hint = false,
 434                 Register int_flag_success = noreg, bool contention_hint = false);
 435   void cmpxchgd(ConditionRegister flag,
 436                 Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value,
 437                 Register addr_base, int semantics, bool cmpxchgx_hint = false,
 438                 Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
 439 
 440   // interface method calling
 441   void lookup_interface_method(Register recv_klass,
 442                                Register intf_klass,
 443                                RegisterOrConstant itable_index,
 444                                Register method_result,
 445                                Register temp_reg, Register temp2_reg,
 446                                Label& no_such_interface);
 447 
 448   // virtual method calling
 449   void lookup_virtual_method(Register recv_klass,
 450                              RegisterOrConstant vtable_index,
 451                              Register method_result);
 452 
 453   // Test sub_klass against super_klass, with fast and slow paths.
 454 
 455   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 456   // One of the three labels can be NULL, meaning take the fall-through.
 457   // If super_check_offset is -1, the value is loaded up from super_klass.


 490   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
 491 
 492   // Biased locking support
 493   // Upon entry,obj_reg must contain the target object, and mark_reg
 494   // must contain the target object's header.
 495   // Destroys mark_reg if an attempt is made to bias an anonymously
 496   // biased lock. In this case a failure will go either to the slow
 497   // case or fall through with the notEqual condition code set with
 498   // the expectation that the slow case in the runtime will be called.
 499   // In the fall-through case where the CAS-based lock is done,
 500   // mark_reg is not destroyed.
 501   void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
 502                             Register temp2_reg, Label& done, Label* slow_case = NULL);
 503   // Upon entry, the base register of mark_addr must contain the oop.
 504   // Destroys temp_reg.
 505   // If allow_delay_slot_filling is set to true, the next instruction
 506   // emitted after this one will go in an annulled delay slot if the
 507   // biased locking exit case failed.
 508   void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
 509 
 510   void atomic_inc_ptr(Register addr, Register result, int simm16 = 1);
 511   void atomic_ori_int(Register addr, Register result, int uimm16);
 512 
 513 #if INCLUDE_RTM_OPT
 514   void rtm_counters_update(Register abort_status, Register rtm_counters);
 515   void branch_on_random_using_tb(Register tmp, int count, Label& brLabel);
 516   void rtm_abort_ratio_calculation(Register rtm_counters_reg, RTMLockingCounters* rtm_counters,
 517                                    Metadata* method_data);
 518   void rtm_profiling(Register abort_status_Reg, Register temp_Reg,
 519                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 520   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status,
 521                                Label& retryLabel, Label* checkRetry = NULL);
 522   void rtm_retry_lock_on_busy(Register retry_count, Register owner_addr, Label& retryLabel);
 523   void rtm_stack_locking(ConditionRegister flag, Register obj, Register mark_word, Register tmp,
 524                          Register retry_on_abort_count,
 525                          RTMLockingCounters* stack_rtm_counters,
 526                          Metadata* method_data, bool profile_rtm,
 527                          Label& DONE_LABEL, Label& IsInflated);
 528   void rtm_inflated_locking(ConditionRegister flag, Register obj, Register mark_word, Register box,
 529                             Register retry_on_busy_count, Register retry_on_abort_count,
 530                             RTMLockingCounters* rtm_counters,
 531                             Metadata* method_data, bool profile_rtm,
 532                             Label& DONE_LABEL);
 533 #endif
 534 
 535   void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
 536                                  Register tmp1, Register tmp2, Register tmp3,
 537                                  bool try_bias = UseBiasedLocking,
 538                                  RTMLockingCounters* rtm_counters = NULL,
 539                                  RTMLockingCounters* stack_rtm_counters = NULL,
 540                                  Metadata* method_data = NULL,
 541                                  bool use_rtm = false, bool profile_rtm = false);
 542 
 543   void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
 544                                    Register tmp1, Register tmp2, Register tmp3,
 545                                    bool try_bias = UseBiasedLocking, bool use_rtm = false);
 546 
 547   // Support for serializing memory accesses between threads
 548   void serialize_memory(Register thread, Register tmp1, Register tmp2);
 549 
 550   // GC barrier support.
 551   void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
 552   void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
 553 
 554 #if INCLUDE_ALL_GCS
 555   // General G1 pre-barrier generator.
 556   void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
 557                             Register Rtmp1, Register Rtmp2, bool needs_frame = false);
 558   // General G1 post-barrier generator
 559   void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
 560                              Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
 561 #endif
 562 
 563   // Support for managing the JavaThread pointer (i.e.; the reference to
 564   // thread-local information).
 565 


 594   static bool is_trap_ic_miss_check(int x) {
 595     return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
 596   }
 597 
 598   // Implicit or explicit null check, jumps to static address exception_entry.
 599   inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
 600 
 601   // Check accessed object for null. Use SIGTRAP-based null checks on AIX.
 602   inline void load_with_trap_null_check(Register d, int si16, Register s1);
 603 
 604   // Load heap oop and decompress. Loaded oop may not be null.
 605   // Specify tmp to save one cycle.
 606   inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg,
 607                                      Register tmp = noreg);
 608   // Store heap oop and decompress.  Decompressed oop may not be null.
 609   // Specify tmp register if d should not be changed.
 610   inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
 611                                       Register tmp = noreg);
 612 
 613   // Null allowed.
 614   inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg, Label *is_null = NULL);
 615 
 616   // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
 617   // src == d allowed.
 618   inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
 619   inline Register decode_heap_oop_not_null(Register d, Register src = noreg);
 620 
 621   // Null allowed.
 622   inline void decode_heap_oop(Register d);
 623 
 624   // Load/Store klass oop from klass field. Compress.
 625   void load_klass(Register dst, Register src);
 626   void load_klass_with_trap_null_check(Register dst, Register src);
 627   void store_klass(Register dst_oop, Register klass, Register tmp = R0);
 628   void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
 629   static int instr_size_for_decode_klass_not_null();
 630   void decode_klass_not_null(Register dst, Register src = noreg);
 631   Register encode_klass_not_null(Register dst, Register src = noreg);
 632 
 633   // Load common heap base into register.
 634   void reinit_heapbase(Register d, Register tmp = noreg);
 635 
 636   // SIGTRAP-based range checks for arrays.
 637   inline void trap_range_check_l(Register a, Register b);
 638   inline void trap_range_check_l(Register a, int si16);
 639   static bool is_trap_range_check_l(int x) {
 640     return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
 641             is_twi(x, traptoLessThanUnsigned, -1/*any reg*/)                  );
 642   }
 643   inline void trap_range_check_le(Register a, int si16);
 644   static bool is_trap_range_check_le(int x) {
 645     return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
 646   }
 647   inline void trap_range_check_g(Register a, int si16);
 648   static bool is_trap_range_check_g(int x) {
 649     return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
 650   }
 651   inline void trap_range_check_ge(Register a, Register b);


< prev index next >