--- old/src/hotspot/share/opto/graphKit.hpp 2019-03-11 14:26:43.698354636 +0100 +++ new/src/hotspot/share/opto/graphKit.hpp 2019-03-11 14:26:43.466354639 +0100 @@ -66,6 +66,9 @@ int _bci; // JVM Bytecode Pointer ciMethod* _method; // JVM Current Method BarrierSetC2* _barrier_set; +#ifdef ASSERT + uint _worklist_size; +#endif private: int _sp; // JVM Expression Stack Pointer; don't modify directly! @@ -78,11 +81,16 @@ public: GraphKit(); // empty constructor - GraphKit(JVMState* jvms); // the JVM state on which to operate + GraphKit(JVMState* jvms, PhaseGVN* gvn = NULL); // the JVM state on which to operate #ifdef ASSERT ~GraphKit() { assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); + // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN + // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies, + // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit. + assert((_gvn.is_IterGVN() == NULL) || (_gvn.C->for_igvn()->size() == _worklist_size), + "GraphKit should not modify _for_igvn worklist after parsing"); } #endif @@ -93,7 +101,7 @@ PhaseGVN& gvn() const { return _gvn; } void* barrier_set_state() const { return C->barrier_set_state(); } - void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile + void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); } // Handy well-known nodes: Node* null() const { return zerocon(T_OBJECT); } @@ -369,6 +377,8 @@ return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null()); } + Node* null2default(Node* value, ciValueKlass* vk = NULL); + // Check if value is null and abort if it is Node* must_be_not_null(Node* value, bool do_replace_in_map); @@ -580,7 +590,8 @@ Node* val, const Type* val_type, BasicType bt, - DecoratorSet decorators); + DecoratorSet decorators, + bool deoptimize_on_exception = false); Node* access_load_at(Node* obj, // containing obj Node* adr, // actual adress to load val at @@ -632,7 +643,7 @@ BasicType bt, DecoratorSet decorators); - void access_clone(Node* src, Node* dst, Node* size, bool is_array); + void access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array); Node* access_resolve(Node* n, DecoratorSet decorators); @@ -669,6 +680,9 @@ // callee (with all arguments still on the stack). Node* null_check_receiver_before_call(ciMethod* callee) { assert(!callee->is_static(), "must be a virtual method"); + if (argument(0)->is_ValueType()) { + return argument(0); + } // Callsite signature can be different from actual method being called (i.e _linkTo* sites). // Use callsite signature always. ciMethod* declared_method = method()->get_method_at_bci(bci()); @@ -681,7 +695,7 @@ // Fill in argument edges for the call from argument(0), argument(1), ... // (The next step is to call set_edges_for_java_call.) - void set_arguments_for_java_call(CallJavaNode* call); + void set_arguments_for_java_call(CallJavaNode* call, bool incremental_inlining = false); // Fill in non-argument edges for the call. // Transform the call, and update the basics: control, i_o, memory. @@ -819,8 +833,14 @@ // Generate a check-cast idiom. Used by both the check-cast bytecode // and the array-store bytecode - Node* gen_checkcast( Node *subobj, Node* superkls, - Node* *failure_control = NULL ); + Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = NULL, bool never_null = false); + + Node* is_always_locked(Node* obj); + Node* gen_value_type_test(Node* kls); + void gen_value_type_guard(Node* obj, int nargs = 0); + void gen_value_type_array_guard(Node* ary, Node* obj, int nargs); + Node* load_lh_array_tag(Node* kls); + Node* gen_lh_array_test(Node* kls, unsigned int lh_value); Node* gen_subtype_check(Node* subklass, Node* superklass) { MergeMemNode* mem = merged_memory(); @@ -835,6 +855,7 @@ // (Caller is responsible for doing replace_in_map.) Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob, Node* *casted_receiver); + Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob); // Inexact type check used for predicted calls. Node* subtype_check_receiver(Node* receiver, ciKlass* klass, @@ -848,7 +869,8 @@ Node* new_instance(Node* klass_node, Node* slow_test = NULL, Node* *return_size_val = NULL, - bool deoptimize_on_exception = false); + bool deoptimize_on_exception = false, + ValueTypeBaseNode* value_node = NULL); Node* new_array(Node* klass_node, Node* count_val, int nargs, Node* *return_size_val = NULL, bool deoptimize_on_exception = false); @@ -886,6 +908,8 @@ void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); Node* make_constant_from_field(ciField* field, Node* obj); + + Node* load_mirror_from_klass(Node* klass); }; // Helper class to support building of control flow branches. Upon