1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "memory/universe.hpp"
  32 #include "runtime/jniHandles.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/thread.hpp"
  35 
  36 #define __ masm->
  37 
  38 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  39                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
  40   bool in_heap = (decorators & IN_HEAP) != 0;
  41   bool in_native = (decorators & IN_NATIVE) != 0;
  42   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
  43   bool atomic = (decorators & MO_RELAXED) != 0;
  44 
  45   assert(type != T_VALUETYPE, "Not supported yet");
  46   switch (type) {
  47   case T_OBJECT:
  48   case T_ARRAY: {
  49     if (in_heap) {
  50 #ifdef _LP64
  51       if (UseCompressedOops) {
  52         __ movl(dst, src);
  53         if (is_not_null) {
  54           __ decode_heap_oop_not_null(dst);
  55         } else {
  56           __ decode_heap_oop(dst);
  57         }
  58       } else
  59 #endif
  60       {
  61         __ movptr(dst, src);
  62       }
  63     } else {
  64       assert(in_native, "why else?");
  65       __ movptr(dst, src);
  66     }
  67     break;
  68   }
  69   case T_BOOLEAN: __ load_unsigned_byte(dst, src);  break;
  70   case T_BYTE:    __ load_signed_byte(dst, src);    break;
  71   case T_CHAR:    __ load_unsigned_short(dst, src); break;
  72   case T_SHORT:   __ load_signed_short(dst, src);   break;
  73   case T_INT:     __ movl  (dst, src);              break;
  74   case T_ADDRESS: __ movptr(dst, src);              break;
  75   case T_FLOAT:
  76     assert(dst == noreg, "only to ftos");
  77     __ load_float(src);
  78     break;
  79   case T_DOUBLE:
  80     assert(dst == noreg, "only to dtos");
  81     __ load_double(src);
  82     break;
  83   case T_LONG:
  84     assert(dst == noreg, "only to ltos");
  85 #ifdef _LP64
  86     __ movq(rax, src);
  87 #else
  88     if (atomic) {
  89       __ fild_d(src);               // Must load atomically
  90       __ subptr(rsp,2*wordSize);    // Make space for store
  91       __ fistp_d(Address(rsp,0));
  92       __ pop(rax);
  93       __ pop(rdx);
  94     } else {
  95       __ movl(rax, src);
  96       __ movl(rdx, src.plus_disp(wordSize));
  97     }
  98 #endif
  99     break;
 100   default: Unimplemented();
 101   }
 102 }
 103 
 104 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 105                                    Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
 106   bool in_heap = (decorators & IN_HEAP) != 0;
 107   bool in_native = (decorators & IN_NATIVE) != 0;
 108   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
 109   bool atomic = (decorators & MO_RELAXED) != 0;
 110 
 111   assert(type != T_VALUETYPE, "Not supported yet");
 112   switch (type) {
 113   case T_OBJECT:
 114   case T_ARRAY: {
 115     if (in_heap) {
 116       if (val == noreg) {
 117         assert(!is_not_null, "inconsistent access");
 118 #ifdef _LP64
 119         if (UseCompressedOops) {
 120           __ movl(dst, (int32_t)NULL_WORD);
 121         } else {
 122           __ movslq(dst, (int32_t)NULL_WORD);
 123         }
 124 #else
 125         __ movl(dst, (int32_t)NULL_WORD);
 126 #endif
 127       } else {
 128 #ifdef _LP64
 129         if (UseCompressedOops) {
 130           assert(!dst.uses(val), "not enough registers");
 131           if (is_not_null) {
 132             __ encode_heap_oop_not_null(val);
 133           } else {
 134             __ encode_heap_oop(val);
 135           }
 136           __ movl(dst, val);
 137         } else
 138 #endif
 139         {
 140           __ movptr(dst, val);
 141         }
 142       }
 143     } else {
 144       assert(in_native, "why else?");
 145       assert(val != noreg, "not supported");
 146       __ movptr(dst, val);
 147     }
 148     break;
 149   }
 150   case T_BOOLEAN:
 151     __ andl(val, 0x1);  // boolean is true if LSB is 1
 152     __ movb(dst, val);
 153     break;
 154   case T_BYTE:
 155     __ movb(dst, val);
 156     break;
 157   case T_SHORT:
 158     __ movw(dst, val);
 159     break;
 160   case T_CHAR:
 161     __ movw(dst, val);
 162     break;
 163   case T_INT:
 164     __ movl(dst, val);
 165     break;
 166   case T_LONG:
 167     assert(val == noreg, "only tos");
 168 #ifdef _LP64
 169     __ movq(dst, rax);
 170 #else
 171     if (atomic) {
 172       __ push(rdx);
 173       __ push(rax);                 // Must update atomically with FIST
 174       __ fild_d(Address(rsp,0));    // So load into FPU register
 175       __ fistp_d(dst);              // and put into memory atomically
 176       __ addptr(rsp, 2*wordSize);
 177     } else {
 178       __ movptr(dst, rax);
 179       __ movptr(dst.plus_disp(wordSize), rdx);
 180     }
 181 #endif
 182     break;
 183   case T_FLOAT:
 184     assert(val == noreg, "only tos");
 185     __ store_float(dst);
 186     break;
 187   case T_DOUBLE:
 188     assert(val == noreg, "only tos");
 189     __ store_double(dst);
 190     break;
 191   case T_ADDRESS:
 192     __ movptr(dst, val);
 193     break;
 194   default: Unimplemented();
 195   }
 196 }
 197 
 198 #ifndef _LP64
 199 void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
 200                                      Address obj1, jobject obj2) {
 201   __ cmpoop_raw(obj1, obj2);
 202 }
 203 
 204 void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
 205                                      Register obj1, jobject obj2) {
 206   __ cmpoop_raw(obj1, obj2);
 207 }
 208 #endif
 209 void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
 210                                      Register obj1, Address obj2) {
 211   __ cmpptr(obj1, obj2);
 212 }
 213 
 214 void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
 215                                      Register obj1, Register obj2) {
 216   __ cmpptr(obj1, obj2);
 217 }
 218 
 219 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
 220                                                         Register obj, Register tmp, Label& slowpath) {
 221   __ clear_jweak_tag(obj);
 222   __ movptr(obj, Address(obj, 0));
 223 }
 224 
 225 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
 226                                         Register thread, Register obj,
 227                                         Register var_size_in_bytes,
 228                                         int con_size_in_bytes,
 229                                         Register t1,
 230                                         Register t2,
 231                                         Label& slow_case) {
 232   assert_different_registers(obj, t1, t2);
 233   assert_different_registers(obj, var_size_in_bytes, t1);
 234   Register end = t2;
 235   if (!thread->is_valid()) {
 236 #ifdef _LP64
 237     thread = r15_thread;
 238 #else
 239     assert(t1->is_valid(), "need temp reg");
 240     thread = t1;
 241     __ get_thread(thread);
 242 #endif
 243   }
 244 
 245   __ verify_tlab();
 246 
 247   __ movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
 248   if (var_size_in_bytes == noreg) {
 249     __ lea(end, Address(obj, con_size_in_bytes));
 250   } else {
 251     __ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
 252   }
 253   __ cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
 254   __ jcc(Assembler::above, slow_case);
 255 
 256   // update the tlab top pointer
 257   __ movptr(Address(thread, JavaThread::tlab_top_offset()), end);
 258 
 259   // recover var_size_in_bytes if necessary
 260   if (var_size_in_bytes == end) {
 261     __ subptr(var_size_in_bytes, obj);
 262   }
 263   __ verify_tlab();
 264 }
 265 
 266 // Defines obj, preserves var_size_in_bytes
 267 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm,
 268                                         Register thread, Register obj,
 269                                         Register var_size_in_bytes,
 270                                         int con_size_in_bytes,
 271                                         Register t1,
 272                                         Label& slow_case) {
 273   assert(obj == rax, "obj must be in rax, for cmpxchg");
 274   assert_different_registers(obj, var_size_in_bytes, t1);
 275   if (!Universe::heap()->supports_inline_contig_alloc()) {
 276     __ jmp(slow_case);
 277   } else {
 278     Register end = t1;
 279     Label retry;
 280     __ bind(retry);
 281     ExternalAddress heap_top((address) Universe::heap()->top_addr());
 282     __ movptr(obj, heap_top);
 283     if (var_size_in_bytes == noreg) {
 284       __ lea(end, Address(obj, con_size_in_bytes));
 285     } else {
 286       __ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
 287     }
 288     // if end < obj then we wrapped around => object too long => slow case
 289     __ cmpptr(end, obj);
 290     __ jcc(Assembler::below, slow_case);
 291     __ cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
 292     __ jcc(Assembler::above, slow_case);
 293     // Compare obj with the top addr, and if still equal, store the new top addr in
 294     // end at the address of the top addr pointer. Sets ZF if was equal, and clears
 295     // it otherwise. Use lock prefix for atomicity on MPs.
 296     __ locked_cmpxchgptr(end, heap_top);
 297     __ jcc(Assembler::notEqual, retry);
 298     incr_allocated_bytes(masm, thread, var_size_in_bytes, con_size_in_bytes, thread->is_valid() ? noreg : t1);
 299   }
 300 }
 301 
 302 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, Register thread,
 303                                                Register var_size_in_bytes,
 304                                                int con_size_in_bytes,
 305                                                Register t1) {
 306   if (!thread->is_valid()) {
 307 #ifdef _LP64
 308     thread = r15_thread;
 309 #else
 310     assert(t1->is_valid(), "need temp reg");
 311     thread = t1;
 312     __ get_thread(thread);
 313 #endif
 314   }
 315 
 316 #ifdef _LP64
 317   if (var_size_in_bytes->is_valid()) {
 318     __ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
 319   } else {
 320     __ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
 321   }
 322 #else
 323   if (var_size_in_bytes->is_valid()) {
 324     __ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
 325   } else {
 326     __ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
 327   }
 328   __ adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
 329 #endif
 330 }
 331 
 332 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
 333   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 334   if (bs_nm == NULL) {
 335     return;
 336   }
 337 #ifndef _LP64
 338   ShouldNotReachHere();
 339 #else
 340   Label continuation;
 341   Register thread = LP64_ONLY(r15_thread);
 342   Address disarmed_addr(thread, in_bytes(bs_nm->thread_disarmed_offset()));
 343   __ align(8);
 344   __ cmpl(disarmed_addr, 0);
 345   __ jcc(Assembler::equal, continuation);
 346   __ call(RuntimeAddress(StubRoutines::x86::method_entry_barrier()));
 347   __ bind(continuation);
 348 #endif
 349 }
 350 
 351 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
 352   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 353   if (bs == NULL) {
 354     return;
 355   }
 356 
 357   Label bad_call;
 358   __ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters.
 359   __ jcc(Assembler::equal, bad_call);
 360 
 361   // Pointer chase to the method holder to find out if the method is concurrently unloading.
 362   Label method_live;
 363   __ load_method_holder_cld(rscratch1, rbx);
 364 
 365   // Is it a strong CLD?
 366   __ movl(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_offset()));
 367   __ cmpptr(rscratch2, 0);
 368   __ jcc(Assembler::greater, method_live);
 369 
 370   // Is it a weak but alive CLD?
 371   __ movptr(rscratch1, Address(rscratch1, ClassLoaderData::holder_offset()));
 372   __ resolve_weak_handle(rscratch1, rscratch2);
 373   __ cmpptr(rscratch1, 0);
 374   __ jcc(Assembler::notEqual, method_live);
 375 
 376   __ bind(bad_call);
 377   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 378   __ bind(method_live);
 379 }