src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/opto/graphKit.cpp

src/share/vm/opto/graphKit.cpp

Print this page

        

*** 3283,3305 **** // and return (Node*)NULL. Otherwise, load the non-constant // layout helper value, and return the node which represents it. // This two-faced routine is useful because allocation sites // almost always feature constant types. Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); if (!StressReflectiveCode && inst_klass != NULL) { ciKlass* klass = inst_klass->klass(); bool xklass = inst_klass->klass_is_exact(); if (xklass || klass->is_array_klass()) { jint lhelper = klass->layout_helper(); if (lhelper != Klass::_lh_neutral_value) { constant_value = lhelper; - return (Node*) NULL; } } } - constant_value = Klass::_lh_neutral_value; // put in a known value Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); } // We just put in an allocate/initialize with a big raw-memory effect. --- 3283,3304 ---- // and return (Node*)NULL. Otherwise, load the non-constant // layout helper value, and return the node which represents it. // This two-faced routine is useful because allocation sites // almost always feature constant types. Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { + constant_value = Klass::_lh_neutral_value; // put in a known value const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); if (!StressReflectiveCode && inst_klass != NULL) { ciKlass* klass = inst_klass->klass(); bool xklass = inst_klass->klass_is_exact(); if (xklass || klass->is_array_klass()) { jint lhelper = klass->layout_helper(); if (lhelper != Klass::_lh_neutral_value) { constant_value = lhelper; } } } Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); } // We just put in an allocate/initialize with a big raw-memory effect.
*** 3404,3449 **** // The optional arguments are for specialized use by intrinsics: // - If 'extra_slow_test' if not null is an extra condition for the slow-path. // - If 'return_size_val', report the the total object size to the caller. // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) Node* GraphKit::new_instance(Node* klass_node, - Node* extra_slow_test, Node* *return_size_val, bool deoptimize_on_exception) { // Compute size in doublewords // The size is always an integral number of doublewords, represented // as a positive bytewise size stored in the klass's layout_helper. // The layout_helper also encodes (in a low bit) the need for a slow path. jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); ! int layout_is_con = (layout_val == NULL); ! if (extra_slow_test == NULL) extra_slow_test = intcon(0); ! // Generate the initial go-slow test. It's either ALWAYS (return a ! // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective ! // case) a computed value derived from the layout_helper. ! Node* initial_slow_test = NULL; if (layout_is_con) { assert(!StressReflectiveCode, "stress mode does not use these paths"); ! bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con); ! initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test; } else { // reflective case // This reflective path is used by Unsafe.allocateInstance. // (It may be stress-tested by specifying StressReflectiveCode.) ! // Basically, we want to get into the VM is there's an illegal argument. ! Node* bit = intcon(Klass::_lh_instance_slow_path_bit); ! initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) ); ! if (extra_slow_test != intcon(0)) { ! initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) ); ! } // (Macro-expander will further convert this to a Bool, if necessary.) } // Find the size in bytes. This is easy; it's the layout_helper. // The size value must be valid even if the slow path is taken. Node* size = NULL; ! if (layout_is_con) { size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con)); } else { // reflective case // This reflective path is used by clone and Unsafe.allocateInstance. size = ConvI2X(layout_val); --- 3403,3461 ---- // The optional arguments are for specialized use by intrinsics: // - If 'extra_slow_test' if not null is an extra condition for the slow-path. // - If 'return_size_val', report the the total object size to the caller. // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) Node* GraphKit::new_instance(Node* klass_node, Node* *return_size_val, bool deoptimize_on_exception) { // Compute size in doublewords // The size is always an integral number of doublewords, represented // as a positive bytewise size stored in the klass's layout_helper. // The layout_helper also encodes (in a low bit) the need for a slow path. jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); ! bool layout_is_con = (layout_con != Klass::_lh_neutral_value); ! // Generate the go-slow test. It's either ALWAYS (ConI(1)) , NEVER (ConI(0)), ! // or (in the reflective case) a computed value derived from the Klass::_layout_helper and ! // InstanceKlass::_init_state. ! Node* slow_test = intcon(0); if (layout_is_con) { assert(!StressReflectiveCode, "stress mode does not use these paths"); ! if (!Klass::layout_helper_is_instance(layout_con) || // exception is thrown from runtime ! Klass::layout_helper_needs_slow_path(layout_con)) { ! slow_test = intcon(1); ! } } else { // reflective case // This reflective path is used by Unsafe.allocateInstance. // (It may be stress-tested by specifying StressReflectiveCode.) ! // Basically, we want to get into the VM if there's an illegal argument. ! ! // Note: The argument might still be an illegal value like ! // Serializable.class or Object[].class. The runtime will handle it. ! // But we must make an explicit check for initialization. ! ! // Check for instance klass & slow bit. ! Node* bit = intcon(Klass::_lh_array_bit | Klass::_lh_instance_slow_path_bit); ! Node* lh_test = _gvn.transform(new AndINode(layout_val, bit)); ! ! Node* insp = basic_plus_adr(klass_node, in_bytes(InstanceKlass::init_state_offset())); ! // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler ! // can generate code to load it as unsigned byte. ! Node* init_state = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); ! Node* init_bits = intcon(InstanceKlass::fully_initialized); ! Node* init_test = _gvn.transform(new SubINode(init_state, init_bits)); ! // The 'init_test' is non-zero if we need to take a slow path. ! ! slow_test = _gvn.transform(new OrINode(lh_test, init_test)); // (Macro-expander will further convert this to a Bool, if necessary.) } // Find the size in bytes. This is easy; it's the layout_helper. // The size value must be valid even if the slow path is taken. Node* size = NULL; ! if (layout_is_con && Klass::layout_helper_is_instance(layout_con)) { size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con)); } else { // reflective case // This reflective path is used by clone and Unsafe.allocateInstance. size = ConvI2X(layout_val);
*** 3470,3481 **** Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), ! size, klass_node, ! initial_slow_test); return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); } //-------------------------------new_array------------------------------------- --- 3482,3492 ---- Node *mem = reset_memory(); set_all_memory(mem); // Create new memory state AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), ! size, klass_node, slow_test); return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); } //-------------------------------new_array-------------------------------------
*** 3487,3497 **** int nargs, // number of arguments to push back for uncommon trap Node* *return_size_val, bool deoptimize_on_exception) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); ! int layout_is_con = (layout_val == NULL); if (!layout_is_con && !StressReflectiveCode && !too_many_traps(Deoptimization::Reason_class_check)) { // This is a reflective array creation site. // Optimistically assume that it is a subtype of Object[], --- 3498,3508 ---- int nargs, // number of arguments to push back for uncommon trap Node* *return_size_val, bool deoptimize_on_exception) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); ! bool layout_is_con = (layout_con != Klass::_lh_neutral_value); if (!layout_is_con && !StressReflectiveCode && !too_many_traps(Deoptimization::Reason_class_check)) { // This is a reflective array creation site. // Optimistically assume that it is a subtype of Object[],
src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File