< prev index next >

src/hotspot/share/gc/shared/c1/barrierSetC1.cpp

c1_root_access
   LIR_Opr resolved = resolve_address(access, false);
   access.set_resolved_addr(resolved);
   load_at_resolved(access, result);
 }
 
+void BarrierSetC1::load(LIRAccess& access, LIR_Opr result) {
+  DecoratorSet decorators = access.decorators();
+  bool in_heap = (decorators & IN_HEAP) != 0;
+  assert(!in_heap, "consider using load_at");
+  load_at_resolved(access, result);
+}
+
 LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
   DecoratorSet decorators = access.decorators();
   bool in_heap = (decorators & IN_HEAP) != 0;
   assert(in_heap, "not supported yet");
 

@@ -157,17 +164,20 LIRGenerator *gen = access.gen(); DecoratorSet decorators = access.decorators(); bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP(); bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0; bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0; + bool in_native = (decorators & IN_NATIVE) != 0; if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) { __ membar(); } LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; - if (is_volatile && !needs_patching) { + if (in_native) { + __ move_wide(access.resolved_addr()->as_address_ptr(), result); + } else if (is_volatile && !needs_patching) { gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info()); } else { __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code); }
< prev index next >