< prev index next >

src/cpu/arm/vm/macroAssembler_arm.hpp

Print this page
rev 12652 : [mq]: kimpatch
   1 /*
   2  * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 384   // codes set (for C2's Fast_Lock node) and jumps to done label.
 385   // Falls through for the fast locking attempt.
 386   // Returns offset of first potentially-faulting instruction for null
 387   // check info (currently consumed only by C1). If
 388   // swap_reg_contains_mark is true then returns -1 as it is assumed
 389   // the calling code has already passed any potential faults.
 390   // Notes:
 391   // - swap_reg and tmp_reg are scratched
 392   // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
 393   int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
 394                            bool swap_reg_contains_mark,
 395                            Register tmp2,
 396                            Label& done, Label& slow_case,
 397                            BiasedLockingCounters* counters = NULL);
 398   void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
 399 
 400   // Building block for CAS cases of biased locking: makes CAS and records statistics.
 401   // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
 402   void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
 403                                      Register tmp, Label& slow_case, int* counter_addr);























 404 
 405 #ifndef AARCH64
 406   void nop() {
 407     mov(R0, R0);
 408   }
 409 
 410   void push(Register rd, AsmCondition cond = al) {
 411     assert(rd != SP, "unpredictable instruction");
 412     str(rd, Address(SP, -wordSize, pre_indexed), cond);
 413   }
 414 
 415   void push(RegisterSet reg_set, AsmCondition cond = al) {
 416     assert(!reg_set.contains(SP), "unpredictable instruction");
 417     stmdb(SP, reg_set, writeback, cond);
 418   }
 419 
 420   void pop(Register rd, AsmCondition cond = al) {
 421     assert(rd != SP, "unpredictable instruction");
 422     ldr(rd, Address(SP, wordSize, post_indexed), cond);
 423   }


   1 /*
   2  * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 384   // codes set (for C2's Fast_Lock node) and jumps to done label.
 385   // Falls through for the fast locking attempt.
 386   // Returns offset of first potentially-faulting instruction for null
 387   // check info (currently consumed only by C1). If
 388   // swap_reg_contains_mark is true then returns -1 as it is assumed
 389   // the calling code has already passed any potential faults.
 390   // Notes:
 391   // - swap_reg and tmp_reg are scratched
 392   // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
 393   int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
 394                            bool swap_reg_contains_mark,
 395                            Register tmp2,
 396                            Label& done, Label& slow_case,
 397                            BiasedLockingCounters* counters = NULL);
 398   void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
 399 
 400   // Building block for CAS cases of biased locking: makes CAS and records statistics.
 401   // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
 402   void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
 403                                      Register tmp, Label& slow_case, int* counter_addr);
 404 
 405   void resolve_jobject(Register value, Register tmp1, Register tmp2);
 406 
 407 #if INCLUDE_ALL_GCS
 408   // G1 pre-barrier.
 409   // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 410   // If store_addr != noreg, then previous value is loaded from [store_addr];
 411   // in such case store_addr and new_val registers are preserved;
 412   // otherwise pre_val register is preserved.
 413   void g1_write_barrier_pre(Register store_addr,
 414                             Register new_val,
 415                             Register pre_val,
 416                             Register tmp1,
 417                             Register tmp2);
 418 
 419   // G1 post-barrier.
 420   // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 421   void g1_write_barrier_post(Register store_addr,
 422                              Register new_val,
 423                              Register tmp1,
 424                              Register tmp2,
 425                              Register tmp3);
 426 #endif // INCLUDE_ALL_GCS
 427 
 428 #ifndef AARCH64
 429   void nop() {
 430     mov(R0, R0);
 431   }
 432 
 433   void push(Register rd, AsmCondition cond = al) {
 434     assert(rd != SP, "unpredictable instruction");
 435     str(rd, Address(SP, -wordSize, pre_indexed), cond);
 436   }
 437 
 438   void push(RegisterSet reg_set, AsmCondition cond = al) {
 439     assert(!reg_set.contains(SP), "unpredictable instruction");
 440     stmdb(SP, reg_set, writeback, cond);
 441   }
 442 
 443   void pop(Register rd, AsmCondition cond = al) {
 444     assert(rd != SP, "unpredictable instruction");
 445     ldr(rd, Address(SP, wordSize, post_indexed), cond);
 446   }


< prev index next >