< prev index next >

src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp

Print this page
rev 58823 : [mq]: aarch64-jdk-nmethod-barriers-3.patch
   1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "gc/shared/barrierSetAssembler.hpp"

  27 #include "gc/shared/collectedHeap.hpp"

  28 #include "memory/universe.hpp"
  29 #include "runtime/jniHandles.hpp"

  30 #include "runtime/thread.hpp"
  31 

  32 #define __ masm->
  33 
  34 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  35                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
  36 
  37   // LR is live.  It must be saved around calls.
  38 
  39   bool in_heap = (decorators & IN_HEAP) != 0;
  40   bool in_native = (decorators & IN_NATIVE) != 0;
  41   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
  42   switch (type) {
  43   case T_OBJECT:
  44   case T_ARRAY: {
  45     if (in_heap) {
  46       if (UseCompressedOops) {
  47         __ ldrw(dst, src);
  48         if (is_not_null) {
  49           __ decode_heap_oop_not_null(dst);
  50         } else {
  51           __ decode_heap_oop(dst);


 212     __ cbnzw(rscratch2, retry);
 213 
 214     incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);
 215   }
 216 }
 217 
 218 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
 219                                                Register var_size_in_bytes,
 220                                                int con_size_in_bytes,
 221                                                Register t1) {
 222   assert(t1->is_valid(), "need temp reg");
 223 
 224   __ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 225   if (var_size_in_bytes->is_valid()) {
 226     __ add(t1, t1, var_size_in_bytes);
 227   } else {
 228     __ add(t1, t1, con_size_in_bytes);
 229   }
 230   __ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 231 }
































































   1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "memory/universe.hpp"
  32 #include "runtime/jniHandles.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/thread.hpp"
  35 
  36 
  37 #define __ masm->
  38 
  39 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  40                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
  41 
  42   // LR is live.  It must be saved around calls.
  43 
  44   bool in_heap = (decorators & IN_HEAP) != 0;
  45   bool in_native = (decorators & IN_NATIVE) != 0;
  46   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
  47   switch (type) {
  48   case T_OBJECT:
  49   case T_ARRAY: {
  50     if (in_heap) {
  51       if (UseCompressedOops) {
  52         __ ldrw(dst, src);
  53         if (is_not_null) {
  54           __ decode_heap_oop_not_null(dst);
  55         } else {
  56           __ decode_heap_oop(dst);


 217     __ cbnzw(rscratch2, retry);
 218 
 219     incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);
 220   }
 221 }
 222 
 223 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
 224                                                Register var_size_in_bytes,
 225                                                int con_size_in_bytes,
 226                                                Register t1) {
 227   assert(t1->is_valid(), "need temp reg");
 228 
 229   __ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 230   if (var_size_in_bytes->is_valid()) {
 231     __ add(t1, t1, var_size_in_bytes);
 232   } else {
 233     __ add(t1, t1, con_size_in_bytes);
 234   }
 235   __ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 236 }
 237 
 238 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
 239   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 240 
 241   if (bs_nm == NULL) {
 242     return;
 243   }
 244 
 245   Label skip, guard;
 246   Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
 247 
 248   __ ldrw(rscratch1, guard);
 249 
 250   // Subsequent loads of oops must occur after load of guard value.
 251   // BarrierSetNMethod::disarm sets guard with release semantics.
 252   __ membar(__ LoadLoad);
 253   __ ldrw(rscratch2, thread_disarmed_addr);
 254   __ cmpw(rscratch1, rscratch2);
 255   __ br(Assembler::EQ, skip);
 256 
 257   __ mov(rscratch1, StubRoutines::aarch64::method_entry_barrier());
 258   __ blr(rscratch1);
 259   __ b(skip);
 260 
 261   __ bind(guard);
 262 
 263   __ emit_int32(0);   // nmethod guard value. Skipped over in common case.
 264 
 265   __ bind(skip);
 266 }
 267 
 268 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
 269   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 270   if (bs == NULL) {
 271     return;
 272   }
 273 
 274   Label bad_call;
 275   __ cbz(rmethod, bad_call);
 276 
 277   // Pointer chase to the method holder to find out if the method is concurrently unloading.
 278   Label method_live;
 279   __ load_method_holder_cld(rscratch1, rmethod);
 280 
 281   // Is it a strong CLD?
 282   __ ldr(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_offset()));
 283   __ cbnz(rscratch2, method_live);
 284 
 285   // Is it a weak but alive CLD?
 286   __ stp(r10, r11, Address(__ pre(sp, -2 * wordSize)));
 287   __ ldr(r10, Address(rscratch1, ClassLoaderData::holder_offset()));
 288 
 289   // Uses rscratch1 & rscratch2, so we must pass new temporaries.
 290   __ resolve_weak_handle(r10, r11);
 291   __ mov(rscratch1, r10);
 292   __ ldp(r10, r11, Address(__ post(sp, 2 * wordSize)));
 293   __ cbnz(rscratch1, method_live);
 294 
 295   __ bind(bad_call);
 296 
 297   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 298   __ bind(method_live);
 299 }
 300 
< prev index next >