< prev index next >

src/share/vm/opto/graphKit.hpp

Print this page
rev 12906 : [mq]: gc_interface


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
  26 #define SHARE_VM_OPTO_GRAPHKIT_HPP
  27 
  28 #include "ci/ciEnv.hpp"
  29 #include "ci/ciMethodData.hpp"

  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/cfgnode.hpp"
  33 #include "opto/compile.hpp"
  34 #include "opto/divnode.hpp"
  35 #include "opto/mulnode.hpp"
  36 #include "opto/phaseX.hpp"
  37 #include "opto/subnode.hpp"
  38 #include "opto/type.hpp"
  39 #include "runtime/deoptimization.hpp"
  40 
  41 class FastLockNode;
  42 class FastUnlockNode;
  43 class IdealKit;
  44 class LibraryCallKit;
  45 class Parse;
  46 class RootNode;
  47 
  48 //-----------------------------------------------------------------------------
  49 //----------------------------GraphKit-----------------------------------------


  86   virtual Parse*          is_Parse()          const { return NULL; }
  87   virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
  88 
  89   ciEnv*        env()           const { return _env; }
  90   PhaseGVN&     gvn()           const { return _gvn; }
  91 
  92   void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
  93 
  94   // Handy well-known nodes:
  95   Node*         null()          const { return zerocon(T_OBJECT); }
  96   Node*         top()           const { return C->top(); }
  97   RootNode*     root()          const { return C->root(); }
  98 
  99   // Create or find a constant node
 100   Node* intcon(jint con)        const { return _gvn.intcon(con); }
 101   Node* longcon(jlong con)      const { return _gvn.longcon(con); }
 102   Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
 103   Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
 104   // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
 105 
 106   // Helper for byte_map_base
 107   Node* byte_map_base_node();
 108 
 109   jint  find_int_con(Node* n, jint value_if_unknown) {
 110     return _gvn.find_int_con(n, value_if_unknown);
 111   }
 112   jlong find_long_con(Node* n, jlong value_if_unknown) {
 113     return _gvn.find_long_con(n, value_if_unknown);
 114   }
 115   // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
 116 
 117   // JVM State accessors:
 118   // Parser mapping from JVM indices into Nodes.
 119   // Low slots are accessed by the StartNode::enum.
 120   // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
 121   // Then come JVM stack slots.
 122   // Finally come the monitors, if any.
 123   // See layout accessors in class JVMState.
 124 
 125   SafePointNode*     map()      const { return _map; }
 126   bool               has_exceptions() const { return _exceptions != NULL; }
 127   JVMState*          jvms()     const { return map_not_null()->_jvms; }
 128   int                sp()       const { return _sp; }


 549                         MemNode::MemOrd mo,
 550                         bool require_atomic_access = false,
 551                         bool unaligned = false,
 552                         bool mismatched = false) {
 553     // This version computes alias_index from an address type
 554     assert(adr_type != NULL, "use other store_to_memory factory");
 555     return store_to_memory(ctl, adr, val, bt,
 556                            C->get_alias_index(adr_type),
 557                            mo, require_atomic_access,
 558                            unaligned, mismatched);
 559   }
 560   // This is the base version which is given alias index
 561   // Return the new StoreXNode
 562   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 563                         int adr_idx,
 564                         MemNode::MemOrd,
 565                         bool require_atomic_access = false,
 566                         bool unaligned = false,
 567                         bool mismatched = false);
 568 

 569 
 570   // All in one pre-barrier, store, post_barrier
 571   // Insert a write-barrier'd store.  This is to let generational GC
 572   // work; we have to flag all oop-stores before the next GC point.
 573   //
 574   // It comes in 3 flavors of store to an object, array, or unknown.
 575   // We use precise card marks for arrays to avoid scanning the entire
 576   // array. We use imprecise for object. We use precise for unknown
 577   // since we don't know if we have an array or and object or even
 578   // where the object starts.
 579   //
 580   // If val==NULL, it is taken to be a completely unknown value. QQQ
 581 
 582   Node* store_oop(Node* ctl,
 583                   Node* obj,   // containing obj
 584                   Node* adr,   // actual adress to store val at
 585                   const TypePtr* adr_type,
 586                   Node* val,
 587                   const TypeOopPtr* val_type,
 588                   BasicType bt,
 589                   bool use_precise,
 590                   MemNode::MemOrd mo,
 591                   bool mismatched = false);
 592 
 593   Node* store_oop_to_object(Node* ctl,
 594                             Node* obj,   // containing obj
 595                             Node* adr,   // actual adress to store val at
 596                             const TypePtr* adr_type,
 597                             Node* val,
 598                             const TypeOopPtr* val_type,
 599                             BasicType bt,
 600                             MemNode::MemOrd mo) {
 601     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
 602   }
 603 
 604   Node* store_oop_to_array(Node* ctl,
 605                            Node* obj,   // containing obj
 606                            Node* adr,   // actual adress to store val at
 607                            const TypePtr* adr_type,
 608                            Node* val,
 609                            const TypeOopPtr* val_type,


 610                            BasicType bt,
 611                            MemNode::MemOrd mo) {
 612     return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
 613   }
 614 
 615   // Could be an array or object we don't know at compile time (unsafe ref.)
 616   Node* store_oop_to_unknown(Node* ctl,
 617                              Node* obj,   // containing obj
 618                              Node* adr,   // actual adress to store val at
 619                              const TypePtr* adr_type,
 620                              Node* val,



 621                              BasicType bt,
 622                              MemNode::MemOrd mo,
 623                              bool mismatched = false);









 624 
 625   // For the few case where the barriers need special help
 626   void pre_barrier(bool do_load, Node* ctl,
 627                    Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
 628                    Node* pre_val,
 629                    BasicType bt);




 630 
 631   void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
 632                     Node* val, BasicType bt, bool use_precise);
 633 
 634   // Return addressing for an array element.
 635   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 636                               // Optional constraint on the array size:
 637                               const TypeInt* sizetype = NULL,
 638                               // Optional control dependency (for example, on range check)
 639                               Node* ctrl = NULL);
 640 
 641   // Return a load of array element at idx.
 642   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 643 
 644   //---------------- Dtrace support --------------------
 645   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
 646   void make_dtrace_method_entry(ciMethod* method) {
 647     make_dtrace_method_entry_exit(method, true);
 648   }
 649   void make_dtrace_method_exit(ciMethod* method) {
 650     make_dtrace_method_entry_exit(method, false);
 651   }
 652 


 734   }
 735 
 736   // SP when bytecode needs to be reexecuted.
 737   virtual int reexecute_sp() { return sp(); }
 738 
 739   // Report if there were too many traps at the current method and bci.
 740   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
 741   // If there is no MDO at all, report no trap unless told to assume it.
 742   bool too_many_traps(Deoptimization::DeoptReason reason) {
 743     return C->too_many_traps(method(), bci(), reason);
 744   }
 745 
 746   // Report if there were too many recompiles at the current method and bci.
 747   bool too_many_recompiles(Deoptimization::DeoptReason reason) {
 748     return C->too_many_recompiles(method(), bci(), reason);
 749   }
 750 
 751   // Returns the object (if any) which was created the moment before.
 752   Node* just_allocated_object(Node* current_control);
 753 
 754   static bool use_ReduceInitialCardMarks() {
 755     return (ReduceInitialCardMarks
 756             && Universe::heap()->can_elide_tlab_store_barriers());
 757   }
 758 
 759   // Sync Ideal and Graph kits.
 760   void sync_kit(IdealKit& ideal);
 761   void final_sync(IdealKit& ideal);
 762 
 763   // vanilla/CMS post barrier
 764   void write_barrier_post(Node *store, Node* obj,
 765                           Node* adr,  uint adr_idx, Node* val, bool use_precise);
 766 
 767   // Allow reordering of pre-barrier with oop store and/or post-barrier.
 768   // Used for load_store operations which loads old value.
 769   bool can_move_pre_barrier() const;
 770 
 771   // G1 pre/post barriers
 772   void g1_write_barrier_pre(bool do_load,
 773                             Node* obj,
 774                             Node* adr,
 775                             uint alias_idx,
 776                             Node* val,
 777                             const TypeOopPtr* val_type,
 778                             Node* pre_val,
 779                             BasicType bt);
 780 
 781   void g1_write_barrier_post(Node* store,
 782                              Node* obj,
 783                              Node* adr,
 784                              uint alias_idx,
 785                              Node* val,
 786                              BasicType bt,
 787                              bool use_precise);
 788   // Helper function for g1
 789   private:
 790   void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
 791                     Node* index, Node* index_adr,
 792                     Node* buffer, const TypeFunc* tf);
 793 
 794   bool g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr, BasicType bt, uint adr_idx);
 795 
 796   bool g1_can_remove_post_barrier(PhaseTransform* phase, Node* store, Node* adr);
 797 
 798   public:
 799   // Helper function to round double arguments before a call
 800   void round_double_arguments(ciMethod* dest_method);
 801   void round_double_result(ciMethod* dest_method);
 802 
 803   // rounding for strict float precision conformance
 804   Node* precision_rounding(Node* n);
 805 
 806   // rounding for strict double precision conformance
 807   Node* dprecision_rounding(Node* n);
 808 
 809   // rounding for non-strict double stores
 810   Node* dstore_rounding(Node* n);
 811 
 812   // Helper functions for fast/slow path codes
 813   Node* opt_iff(Node* region, Node* iff);
 814   Node* make_runtime_call(int flags,
 815                           const TypeFunc* call_type, address call_addr,
 816                           const char* call_name,




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
  26 #define SHARE_VM_OPTO_GRAPHKIT_HPP
  27 
  28 #include "ci/ciEnv.hpp"
  29 #include "ci/ciMethodData.hpp"
  30 #include "gc/shared/c2BarrierSetCodeGen.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/callnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/compile.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/mulnode.hpp"
  37 #include "opto/phaseX.hpp"
  38 #include "opto/subnode.hpp"
  39 #include "opto/type.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 
  42 class FastLockNode;
  43 class FastUnlockNode;
  44 class IdealKit;
  45 class LibraryCallKit;
  46 class Parse;
  47 class RootNode;
  48 
  49 //-----------------------------------------------------------------------------
  50 //----------------------------GraphKit-----------------------------------------


  87   virtual Parse*          is_Parse()          const { return NULL; }
  88   virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
  89 
  90   ciEnv*        env()           const { return _env; }
  91   PhaseGVN&     gvn()           const { return _gvn; }
  92 
  93   void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
  94 
  95   // Handy well-known nodes:
  96   Node*         null()          const { return zerocon(T_OBJECT); }
  97   Node*         top()           const { return C->top(); }
  98   RootNode*     root()          const { return C->root(); }
  99 
 100   // Create or find a constant node
 101   Node* intcon(jint con)        const { return _gvn.intcon(con); }
 102   Node* longcon(jlong con)      const { return _gvn.longcon(con); }
 103   Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
 104   Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
 105   // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
 106 



 107   jint  find_int_con(Node* n, jint value_if_unknown) {
 108     return _gvn.find_int_con(n, value_if_unknown);
 109   }
 110   jlong find_long_con(Node* n, jlong value_if_unknown) {
 111     return _gvn.find_long_con(n, value_if_unknown);
 112   }
 113   // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
 114 
 115   // JVM State accessors:
 116   // Parser mapping from JVM indices into Nodes.
 117   // Low slots are accessed by the StartNode::enum.
 118   // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
 119   // Then come JVM stack slots.
 120   // Finally come the monitors, if any.
 121   // See layout accessors in class JVMState.
 122 
 123   SafePointNode*     map()      const { return _map; }
 124   bool               has_exceptions() const { return _exceptions != NULL; }
 125   JVMState*          jvms()     const { return map_not_null()->_jvms; }
 126   int                sp()       const { return _sp; }


 547                         MemNode::MemOrd mo,
 548                         bool require_atomic_access = false,
 549                         bool unaligned = false,
 550                         bool mismatched = false) {
 551     // This version computes alias_index from an address type
 552     assert(adr_type != NULL, "use other store_to_memory factory");
 553     return store_to_memory(ctl, adr, val, bt,
 554                            C->get_alias_index(adr_type),
 555                            mo, require_atomic_access,
 556                            unaligned, mismatched);
 557   }
 558   // This is the base version which is given alias index
 559   // Return the new StoreXNode
 560   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
 561                         int adr_idx,
 562                         MemNode::MemOrd,
 563                         bool require_atomic_access = false,
 564                         bool unaligned = false,
 565                         bool mismatched = false);
 566 
 567   // Perform decorated accesses
 568 
 569   Node* access_store_at(Node* ctl,












 570                         Node* obj,   // containing obj
 571                         Node* adr,   // actual adress to store val at
 572                         const TypePtr* adr_type,
 573                         Node* val,
 574                         const Type* val_type,
 575                         BasicType bt,
 576                         C2DecoratorSet decorators);


 577 
 578   Node* access_load_at(Node* obj,   // containing obj

 579                        Node* adr,   // actual adress to store val at
 580                        const TypePtr* adr_type,
 581                        const Type* val_type,

 582                        BasicType bt,
 583                        C2DecoratorSet decorators);


 584 
 585   Node* access_cas_val_at(Node* ctl,
 586                           Node* obj,
 587                           Node* adr,
 588                           const TypePtr* adr_type,
 589                           int alias_idx,
 590                           Node* expected_val,
 591                           Node* new_val,
 592                           const Type* value_type,
 593                           BasicType bt,
 594                           C2DecoratorSet decorators);


 595 
 596   Node* access_cas_bool_at(Node* ctl,
 597                            Node* obj,
 598                            Node* adr,

 599                            const TypePtr* adr_type,
 600                            int alias_idx,
 601                            Node* expected_val,
 602                            Node* new_val,
 603                            const Type* value_type,
 604                            BasicType bt,
 605                            C2DecoratorSet decorators);
 606 
 607   Node* access_swap_at(Node* ctl,
 608                        Node* obj,
 609                        Node* adr,
 610                        const TypePtr* adr_type,
 611                        int alias_idx,
 612                        Node* new_val,
 613                        const Type* value_type,
 614                        BasicType bt,
 615                        C2DecoratorSet decorators);
 616 
 617   Node* access_fetch_and_add_at(Node* ctl,
 618                                 Node* obj,
 619                                 Node* adr,
 620                                 const TypePtr* adr_type,
 621                                 int alias_idx,
 622                                 Node* new_val,
 623                                 const Type* value_type,
 624                                 BasicType bt,
 625                                 C2DecoratorSet decorators);
 626 
 627   void access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array);

 628 
 629   // Return addressing for an array element.
 630   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
 631                               // Optional constraint on the array size:
 632                               const TypeInt* sizetype = NULL,
 633                               // Optional control dependency (for example, on range check)
 634                               Node* ctrl = NULL);
 635 
 636   // Return a load of array element at idx.
 637   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 638 
 639   //---------------- Dtrace support --------------------
 640   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
 641   void make_dtrace_method_entry(ciMethod* method) {
 642     make_dtrace_method_entry_exit(method, true);
 643   }
 644   void make_dtrace_method_exit(ciMethod* method) {
 645     make_dtrace_method_entry_exit(method, false);
 646   }
 647 


 729   }
 730 
 731   // SP when bytecode needs to be reexecuted.
 732   virtual int reexecute_sp() { return sp(); }
 733 
 734   // Report if there were too many traps at the current method and bci.
 735   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
 736   // If there is no MDO at all, report no trap unless told to assume it.
 737   bool too_many_traps(Deoptimization::DeoptReason reason) {
 738     return C->too_many_traps(method(), bci(), reason);
 739   }
 740 
 741   // Report if there were too many recompiles at the current method and bci.
 742   bool too_many_recompiles(Deoptimization::DeoptReason reason) {
 743     return C->too_many_recompiles(method(), bci(), reason);
 744   }
 745 
 746   // Returns the object (if any) which was created the moment before.
 747   Node* just_allocated_object(Node* current_control);
 748 





 749   // Sync Ideal and Graph kits.
 750   void sync_kit(IdealKit& ideal);
 751   void final_sync(IdealKit& ideal);



































 752 
 753   public:
 754   // Helper function to round double arguments before a call
 755   void round_double_arguments(ciMethod* dest_method);
 756   void round_double_result(ciMethod* dest_method);
 757 
 758   // rounding for strict float precision conformance
 759   Node* precision_rounding(Node* n);
 760 
 761   // rounding for strict double precision conformance
 762   Node* dprecision_rounding(Node* n);
 763 
 764   // rounding for non-strict double stores
 765   Node* dstore_rounding(Node* n);
 766 
 767   // Helper functions for fast/slow path codes
 768   Node* opt_iff(Node* region, Node* iff);
 769   Node* make_runtime_call(int flags,
 770                           const TypeFunc* call_type, address call_addr,
 771                           const char* call_name,


< prev index next >