< prev index next >

src/hotspot/share/opto/graphKit.hpp

BarrierSetC2

*** 25,34 **** --- 25,35 ---- #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP #define SHARE_VM_OPTO_GRAPHKIT_HPP #include "ci/ciEnv.hpp" #include "ci/ciMethodData.hpp" + #include "gc/shared/c2/barrierSetC2.hpp" #include "opto/addnode.hpp" #include "opto/callnode.hpp" #include "opto/cfgnode.hpp" #include "opto/compile.hpp" #include "opto/divnode.hpp" ***************
*** 36,45 **** --- 37,47 ---- #include "opto/phaseX.hpp" #include "opto/subnode.hpp" #include "opto/type.hpp" #include "runtime/deoptimization.hpp" + class BarrierSetC2; class FastLockNode; class FastUnlockNode; class IdealKit; class LibraryCallKit; class Parse; ***************
*** 61,70 **** --- 63,73 ---- PhaseGVN &_gvn; // Some optimizations while parsing SafePointNode* _map; // Parser map from JVM to Nodes SafePointNode* _exceptions;// Parser map(s) for exception state(s) int _bci; // JVM Bytecode Pointer ciMethod* _method; // JVM Current Method + BarrierSetC2* _barrier_set; private: int _sp; // JVM Expression Stack Pointer; don't modify directly! private: ***************
*** 84,95 **** #endif virtual Parse* is_Parse() const { return NULL; } virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; } ! ciEnv* env() const { return _env; } ! PhaseGVN& gvn() const { return _gvn; } void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile // Handy well-known nodes: Node* null() const { return zerocon(T_OBJECT); } --- 87,99 ---- #endif virtual Parse* is_Parse() const { return NULL; } virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; } ! ciEnv* env() const { return _env; } ! PhaseGVN& gvn() const { return _gvn; } ! void* barrier_set_state() const { return C->barrier_set_state(); } void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile // Handy well-known nodes: Node* null() const { return zerocon(T_OBJECT); } ***************
*** 101,113 **** Node* longcon(jlong con) const { return _gvn.longcon(con); } Node* makecon(const Type *t) const { return _gvn.makecon(t); } Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); } // (See also macro MakeConX in type.hpp, which uses intcon or longcon.) - // Helper for byte_map_base - Node* byte_map_base_node(); - jint find_int_con(Node* n, jint value_if_unknown) { return _gvn.find_int_con(n, value_if_unknown); } jlong find_long_con(Node* n, jlong value_if_unknown) { return _gvn.find_long_con(n, value_if_unknown); --- 105,114 ---- ***************
*** 567,640 **** MemNode::MemOrd, bool require_atomic_access = false, bool unaligned = false, bool mismatched = false); ! // All in one pre-barrier, store, post_barrier ! // Insert a write-barrier'd store. This is to let generational GC ! // work; we have to flag all oop-stores before the next GC point. ! // ! // It comes in 3 flavors of store to an object, array, or unknown. ! // We use precise card marks for arrays to avoid scanning the entire ! // array. We use imprecise for object. We use precise for unknown ! // since we don't know if we have an array or and object or even ! // where the object starts. ! // ! // If val==NULL, it is taken to be a completely unknown value. QQQ ! ! Node* store_oop(Node* ctl, ! Node* obj, // containing obj ! Node* adr, // actual adress to store val at ! const TypePtr* adr_type, ! Node* val, ! const TypeOopPtr* val_type, ! BasicType bt, ! bool use_precise, ! MemNode::MemOrd mo, ! bool mismatched = false); ! Node* store_oop_to_object(Node* ctl, ! Node* obj, // containing obj ! Node* adr, // actual adress to store val at ! const TypePtr* adr_type, ! Node* val, ! const TypeOopPtr* val_type, ! BasicType bt, ! MemNode::MemOrd mo) { ! return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo); ! } ! ! Node* store_oop_to_array(Node* ctl, ! Node* obj, // containing obj ! Node* adr, // actual adress to store val at ! const TypePtr* adr_type, ! Node* val, ! const TypeOopPtr* val_type, ! BasicType bt, ! MemNode::MemOrd mo) { ! return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo); ! } ! ! // Could be an array or object we don't know at compile time (unsafe ref.) ! Node* store_oop_to_unknown(Node* ctl, ! Node* obj, // containing obj ! Node* adr, // actual adress to store val at const TypePtr* adr_type, ! Node* val, BasicType bt, ! MemNode::MemOrd mo, ! bool mismatched = false); ! ! // For the few case where the barriers need special help ! void pre_barrier(bool do_load, Node* ctl, ! Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type, ! Node* pre_val, ! BasicType bt); ! void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx, ! Node* val, BasicType bt, bool use_precise); // Return addressing for an array element. Node* array_element_address(Node* ary, Node* idx, BasicType elembt, // Optional constraint on the array size: const TypeInt* sizetype = NULL, --- 568,638 ---- MemNode::MemOrd, bool require_atomic_access = false, bool unaligned = false, bool mismatched = false); + // Perform decorated accesses ! Node* access_store_at(Node* ctl, ! Node* obj, // containing obj ! Node* adr, // actual adress to store val at ! const TypePtr* adr_type, ! Node* val, ! const Type* val_type, ! BasicType bt, ! DecoratorSet decorators); ! ! Node* access_load_at(Node* obj, // containing obj ! Node* adr, // actual adress to store val at ! const TypePtr* adr_type, ! const Type* val_type, ! BasicType bt, ! DecoratorSet decorators); ! ! Node* access_atomic_cmpxchg_val_at(Node* ctl, ! Node* obj, ! Node* adr, ! const TypePtr* adr_type, ! int alias_idx, ! Node* expected_val, ! Node* new_val, ! const Type* value_type, ! BasicType bt, ! DecoratorSet decorators); ! ! Node* access_atomic_cmpxchg_bool_at(Node* ctl, ! Node* obj, ! Node* adr, ! const TypePtr* adr_type, ! int alias_idx, ! Node* expected_val, ! Node* new_val, ! const Type* value_type, ! BasicType bt, ! DecoratorSet decorators); ! ! Node* access_atomic_xchg_at(Node* ctl, ! Node* obj, ! Node* adr, ! const TypePtr* adr_type, ! int alias_idx, ! Node* new_val, ! const Type* value_type, ! BasicType bt, ! DecoratorSet decorators); ! Node* access_atomic_add_at(Node* ctl, ! Node* obj, ! Node* adr, const TypePtr* adr_type, ! int alias_idx, ! Node* new_val, ! const Type* value_type, BasicType bt, ! DecoratorSet decorators); ! void access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array); // Return addressing for an array element. Node* array_element_address(Node* ary, Node* idx, BasicType elembt, // Optional constraint on the array size: const TypeInt* sizetype = NULL, ***************
*** 752,802 **** } // Returns the object (if any) which was created the moment before. Node* just_allocated_object(Node* current_control); - static bool use_ReduceInitialCardMarks(); - // Sync Ideal and Graph kits. void sync_kit(IdealKit& ideal); void final_sync(IdealKit& ideal); - // vanilla/CMS post barrier - void write_barrier_post(Node *store, Node* obj, - Node* adr, uint adr_idx, Node* val, bool use_precise); - - // Allow reordering of pre-barrier with oop store and/or post-barrier. - // Used for load_store operations which loads old value. - bool can_move_pre_barrier() const; - - // G1 pre/post barriers - void g1_write_barrier_pre(bool do_load, - Node* obj, - Node* adr, - uint alias_idx, - Node* val, - const TypeOopPtr* val_type, - Node* pre_val, - BasicType bt); - - void g1_write_barrier_post(Node* store, - Node* obj, - Node* adr, - uint alias_idx, - Node* val, - BasicType bt, - bool use_precise); - // Helper function for g1 - private: - void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx, - Node* index, Node* index_adr, - Node* buffer, const TypeFunc* tf); - - bool g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr, BasicType bt, uint adr_idx); - - bool g1_can_remove_post_barrier(PhaseTransform* phase, Node* store, Node* adr); - public: // Helper function to round double arguments before a call void round_double_arguments(ciMethod* dest_method); void round_double_result(ciMethod* dest_method); --- 750,763 ----
< prev index next >