< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "opto/arraycopynode.hpp"
  28 #include "opto/convertnode.hpp"
  29 #include "opto/graphKit.hpp"
  30 #include "opto/idealKit.hpp"
  31 #include "opto/macro.hpp"
  32 #include "opto/narrowptrnode.hpp"
  33 #include "utilities/macros.hpp"
  34 
  35 // By default this is a no-op.
  36 void BarrierSetC2::resolve_address(C2Access& access) const { }
  37 
  38 void* C2ParseAccess::barrier_set_state() const {
  39   return _kit->barrier_set_state();
  40 }
  41 
  42 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
  43 




  44 bool C2Access::needs_cpu_membar() const {
  45   bool mismatched = (_decorators & C2_MISMATCHED) != 0;
  46   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
  47   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
  48   bool in_heap = (_decorators & IN_HEAP) != 0;
  49 
  50   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
  51   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
  52   bool is_atomic = is_read && is_write;
  53 
  54   if (is_atomic) {
  55     // Atomics always need to be wrapped in CPU membars
  56     return true;
  57   }
  58 
  59   if (anonymous) {
  60     // We will need memory barriers unless we can determine a unique
  61     // alias category for this reference.  (Note:  If for some reason
  62     // the barriers get omitted and the unsafe reference begins to "pollute"
  63     // the alias analysis of the rest of the graph, either Compile::can_alias


 127 
 128   Node* adr = access.addr().node();
 129   const TypePtr* adr_type = access.addr().type();
 130 
 131   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 132   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 133   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 134   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
 135   bool pinned = (decorators & C2_PINNED_LOAD) != 0;
 136   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 137 
 138   bool in_native = (decorators & IN_NATIVE) != 0;
 139 
 140   MemNode::MemOrd mo = access.mem_node_mo();
 141   LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
 142 
 143   Node* load;
 144   if (access.is_parse_access()) {
 145     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 146     GraphKit* kit = parse_access.kit();
 147     Node* control = control_dependent ? kit->control() : NULL;
 148 
 149     if (in_native) {
 150       load = kit->make_load(control, adr, val_type, access.type(), mo);
 151     } else {
 152       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
 153                             dep, requires_atomic_access, unaligned, mismatched, unsafe);
 154     }
 155     access.set_raw_access(load);
 156   } else {
 157     assert(!requires_atomic_access, "not yet supported");
 158     assert(access.is_opt_access(), "either parse or opt access");
 159     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 160     Node* control = control_dependent ? opt_access.ctl() : NULL;
 161     MergeMemNode* mm = opt_access.mem();
 162     PhaseGVN& gvn = opt_access.gvn();
 163     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
 164     load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep, unaligned, mismatched);
 165     load = gvn.transform(load);
 166   }
 167 




  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "opto/arraycopynode.hpp"
  28 #include "opto/convertnode.hpp"
  29 #include "opto/graphKit.hpp"
  30 #include "opto/idealKit.hpp"
  31 #include "opto/macro.hpp"
  32 #include "opto/narrowptrnode.hpp"
  33 #include "utilities/macros.hpp"
  34 
  35 // By default this is a no-op.
  36 void BarrierSetC2::resolve_address(C2Access& access) const { }
  37 
  38 void* C2ParseAccess::barrier_set_state() const {
  39   return _kit->barrier_set_state();
  40 }
  41 
  42 PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
  43 
  44 Node* C2ParseAccess::control() const {
  45   return _ctl == NULL ? _kit->control() : _ctl;
  46 }
  47 
  48 bool C2Access::needs_cpu_membar() const {
  49   bool mismatched = (_decorators & C2_MISMATCHED) != 0;
  50   bool is_unordered = (_decorators & MO_UNORDERED) != 0;
  51   bool anonymous = (_decorators & C2_UNSAFE_ACCESS) != 0;
  52   bool in_heap = (_decorators & IN_HEAP) != 0;
  53 
  54   bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
  55   bool is_read = (_decorators & C2_READ_ACCESS) != 0;
  56   bool is_atomic = is_read && is_write;
  57 
  58   if (is_atomic) {
  59     // Atomics always need to be wrapped in CPU membars
  60     return true;
  61   }
  62 
  63   if (anonymous) {
  64     // We will need memory barriers unless we can determine a unique
  65     // alias category for this reference.  (Note:  If for some reason
  66     // the barriers get omitted and the unsafe reference begins to "pollute"
  67     // the alias analysis of the rest of the graph, either Compile::can_alias


 131 
 132   Node* adr = access.addr().node();
 133   const TypePtr* adr_type = access.addr().type();
 134 
 135   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 136   bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
 137   bool unaligned = (decorators & C2_UNALIGNED) != 0;
 138   bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
 139   bool pinned = (decorators & C2_PINNED_LOAD) != 0;
 140   bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
 141 
 142   bool in_native = (decorators & IN_NATIVE) != 0;
 143 
 144   MemNode::MemOrd mo = access.mem_node_mo();
 145   LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
 146 
 147   Node* load;
 148   if (access.is_parse_access()) {
 149     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 150     GraphKit* kit = parse_access.kit();
 151     Node* control = control_dependent ? parse_access.control() : NULL;
 152 
 153     if (in_native) {
 154       load = kit->make_load(control, adr, val_type, access.type(), mo);
 155     } else {
 156       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
 157                             dep, requires_atomic_access, unaligned, mismatched, unsafe);
 158     }
 159     access.set_raw_access(load);
 160   } else {
 161     assert(!requires_atomic_access, "not yet supported");
 162     assert(access.is_opt_access(), "either parse or opt access");
 163     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 164     Node* control = control_dependent ? opt_access.ctl() : NULL;
 165     MergeMemNode* mm = opt_access.mem();
 166     PhaseGVN& gvn = opt_access.gvn();
 167     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
 168     load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep, unaligned, mismatched);
 169     load = gvn.transform(load);
 170   }
 171 


< prev index next >