< prev index next >

src/hotspot/share/opto/graphKit.hpp

Print this page

        

@@ -64,10 +64,13 @@
   SafePointNode*    _map;       // Parser map from JVM to Nodes
   SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
   int               _bci;       // JVM Bytecode Pointer
   ciMethod*         _method;    // JVM Current Method
   BarrierSetC2*     _barrier_set;
+#ifdef ASSERT
+  uint              _worklist_size;
+#endif
 
  private:
   int               _sp;        // JVM Expression Stack Pointer; don't modify directly!
 
  private:

@@ -76,26 +79,31 @@
     return _map;
   }
 
  public:
   GraphKit();                   // empty constructor
-  GraphKit(JVMState* jvms);     // the JVM state on which to operate
+  GraphKit(JVMState* jvms, PhaseGVN* gvn = NULL);     // the JVM state on which to operate
 
 #ifdef ASSERT
   ~GraphKit() {
     assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
+    // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN
+    // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies,
+    // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit.
+    assert((_gvn.is_IterGVN() == NULL) || (_gvn.C->for_igvn()->size() == _worklist_size),
+           "GraphKit should not modify _for_igvn worklist after parsing");
   }
 #endif
 
   virtual Parse*          is_Parse()          const { return NULL; }
   virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
 
   ciEnv*        env()               const { return _env; }
   PhaseGVN&     gvn()               const { return _gvn; }
   void*         barrier_set_state() const { return C->barrier_set_state(); }
 
-  void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
+  void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); }
 
   // Handy well-known nodes:
   Node*         null()          const { return zerocon(T_OBJECT); }
   Node*         top()           const { return C->top(); }
   RootNode*     root()          const { return C->root(); }

@@ -367,10 +375,12 @@
   // Return the value cast to null, and be clever about dominating checks.
   Node* null_assert(Node* value, BasicType type = T_OBJECT) {
     return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null());
   }
 
+  Node* null2default(Node* value, ciValueKlass* vk = NULL);
+
   // Check if value is null and abort if it is
   Node* must_be_not_null(Node* value, bool do_replace_in_map);
 
   // Null check oop.  Return null-path control into (*null_control).
   // Return a cast-not-null node which depends on the not-null control.

@@ -578,11 +588,12 @@
                         Node* adr,   // actual adress to store val at
                         const TypePtr* adr_type,
                         Node* val,
                         const Type* val_type,
                         BasicType bt,
-                        DecoratorSet decorators);
+                        DecoratorSet decorators,
+                        bool deoptimize_on_exception = false);
 
   Node* access_load_at(Node* obj,   // containing obj
                        Node* adr,   // actual adress to load val at
                        const TypePtr* adr_type,
                        const Type* val_type,

@@ -630,11 +641,11 @@
                              Node* new_val,
                              const Type* value_type,
                              BasicType bt,
                              DecoratorSet decorators);
 
-  void access_clone(Node* src, Node* dst, Node* size, bool is_array);
+  void access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array);
 
   Node* access_resolve(Node* n, DecoratorSet decorators);
 
   // Return addressing for an array element.
   Node* array_element_address(Node* ary, Node* idx, BasicType elembt,

@@ -667,10 +678,13 @@
 
   // Do a null check on the receiver as it would happen before the call to
   // callee (with all arguments still on the stack).
   Node* null_check_receiver_before_call(ciMethod* callee) {
     assert(!callee->is_static(), "must be a virtual method");
+    if (argument(0)->is_ValueType()) {
+      return argument(0);
+    }
     // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
     // Use callsite signature always.
     ciMethod* declared_method = method()->get_method_at_bci(bci());
     const int nargs = declared_method->arg_size();
     inc_sp(nargs);

@@ -679,11 +693,11 @@
     return n;
   }
 
   // Fill in argument edges for the call from argument(0), argument(1), ...
   // (The next step is to call set_edges_for_java_call.)
-  void  set_arguments_for_java_call(CallJavaNode* call);
+  void  set_arguments_for_java_call(CallJavaNode* call, bool incremental_inlining = false);
 
   // Fill in non-argument edges for the call.
   // Transform the call, and update the basics: control, i_o, memory.
   // (The next step is usually to call set_results_for_java_call.)
   void set_edges_for_java_call(CallJavaNode* call,

@@ -817,12 +831,18 @@
   // and the reflective instance-of call.
   Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
 
   // Generate a check-cast idiom.  Used by both the check-cast bytecode
   // and the array-store bytecode
-  Node* gen_checkcast( Node *subobj, Node* superkls,
-                       Node* *failure_control = NULL );
+  Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = NULL, bool never_null = false);
+
+  Node* is_always_locked(Node* obj);
+  Node* gen_value_type_test(Node* kls);
+  void gen_value_type_guard(Node* obj, int nargs = 0);
+  void gen_value_type_array_guard(Node* ary, Node* obj, int nargs);
+  Node* load_lh_array_tag(Node* kls);
+  Node* gen_lh_array_test(Node* kls, unsigned int lh_value);
 
   Node* gen_subtype_check(Node* subklass, Node* superklass) {
     MergeMemNode* mem = merged_memory();
     Node* ctrl = control();
     Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn);

@@ -833,10 +853,11 @@
   // Exact type check used for predicted calls and casts.
   // Rewrites (*casted_receiver) to be casted to the stronger type.
   // (Caller is responsible for doing replace_in_map.)
   Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
                             Node* *casted_receiver);
+  Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob);
 
   // Inexact type check used for predicted calls.
   Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
                                Node** casted_receiver);
 

@@ -846,11 +867,12 @@
                                   bool deoptimize_on_exception=false);
   Node* get_layout_helper(Node* klass_node, jint& constant_value);
   Node* new_instance(Node* klass_node,
                      Node* slow_test = NULL,
                      Node* *return_size_val = NULL,
-                     bool deoptimize_on_exception = false);
+                     bool deoptimize_on_exception = false,
+                     ValueTypeBaseNode* value_node = NULL);
   Node* new_array(Node* klass_node, Node* count_val, int nargs,
                   Node* *return_size_val = NULL,
                   bool deoptimize_on_exception = false);
 
   // java.lang.String helpers

@@ -884,10 +906,12 @@
   // Insert a loop predicate into the graph
   void add_predicate(int nargs = 0);
   void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
 
   Node* make_constant_from_field(ciField* field, Node* obj);
+
+  Node* load_mirror_from_klass(Node* klass);
 };
 
 // Helper class to support building of control flow branches. Upon
 // creation the map and sp at bci are cloned and restored upon de-
 // struction. Typical use:
< prev index next >