< prev index next >

src/share/vm/opto/parse1.cpp

Print this page

        

*** 125,135 **** case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break; case T_VALUETYPE: { // Load oop and create a new ValueTypeNode const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(type->is_valuetype(), TypePtr::NotNull); l = _gvn.transform(new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, vtptr_type, MemNode::unordered)); ! l = ValueTypeNode::make(gvn(), mem, l); break; } case T_VALUETYPEPTR: { l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeValueTypePtr::NOTNULL, MemNode::unordered); break; --- 125,135 ---- case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break; case T_VALUETYPE: { // Load oop and create a new ValueTypeNode const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(type->is_valuetype(), TypePtr::NotNull); l = _gvn.transform(new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, vtptr_type, MemNode::unordered)); ! l = ValueTypeNode::make(this, l); break; } case T_VALUETYPEPTR: { l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeValueTypePtr::NOTNULL, MemNode::unordered); break;
*** 203,213 **** void Parse::load_interpreter_state(Node* osr_buf) { int index; int max_locals = jvms()->loc_size(); int max_stack = jvms()->stk_size(); - // Mismatch between method and jvms can occur since map briefly held // an OSR entry state (which takes up one RawPtr word). assert(max_locals == method()->max_locals(), "sanity"); assert(max_stack >= method()->max_stack(), "sanity"); assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity"); --- 203,212 ----
*** 241,259 **** Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize); for (index = 0; index < mcnt; index++) { // Make a BoxLockNode for the monitor. Node *box = _gvn.transform(new BoxLockNode(next_monitor())); - // Displaced headers and locked objects are interleaved in the // temp OSR buffer. We only copy the locked objects out here. // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf); // Try and copy the displaced header to the BoxNode Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf); - store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); // Build a bogus FastLockNode (no code will be generated) and push the // monitor into our debug info. const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock(); --- 240,256 ----
*** 806,816 **** if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && ret_type->isa_valuetypeptr() && ! ret_type->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { // When inlining or with multiple return values: return value // type as ValueTypeNode not as oop ret_type = ret_type->is_valuetypeptr()->value_type(); } int ret_size = type2size[ret_type->basic_type()]; --- 803,813 ---- if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && ret_type->isa_valuetypeptr() && ! !ret_type->is_valuetypeptr()->is__Value()) { // When inlining or with multiple return values: return value // type as ValueTypeNode not as oop ret_type = ret_type->is_valuetypeptr()->value_type(); } int ret_size = type2size[ret_type->basic_type()];
*** 858,870 **** } else { // Value type arguments are not passed by reference: we get an // argument per field of the value type. Build ValueTypeNodes // from the value type arguments. const Type* t = tf->domain_sig()->field_at(i); ! if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); ! Node* vt = ValueTypeNode::make(gvn, start, vk, j, true); map->init_req(i, gvn.transform(vt)); j += vk->value_arg_slots(); } else { Node* parm = gvn.transform(new ParmNode(start, j)); map->init_req(i, parm); --- 855,869 ---- } else { // Value type arguments are not passed by reference: we get an // argument per field of the value type. Build ValueTypeNodes // from the value type arguments. const Type* t = tf->domain_sig()->field_at(i); ! if (t->isa_valuetypeptr() && !t->is_valuetypeptr()->is__Value()) { ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); ! Node* ctl = map->control(); ! Node* vt = ValueTypeNode::make(gvn, ctl, map->memory(), start, vk, j, true); ! map->set_control(ctl); map->init_req(i, gvn.transform(vt)); j += vk->value_arg_slots(); } else { Node* parm = gvn.transform(new ParmNode(start, j)); map->init_req(i, parm);
*** 876,886 **** } else { Node* parm = gvn.transform(new ParmNode(start, i)); // Check if parameter is a value type pointer if (gvn.type(parm)->isa_valuetypeptr()) { // Create ValueTypeNode from the oop and replace the parameter ! parm = ValueTypeNode::make(gvn, map->memory(), parm); } map->init_req(i, parm); // Record all these guys for later GVN. record_for_igvn(parm); j++; --- 875,887 ---- } else { Node* parm = gvn.transform(new ParmNode(start, i)); // Check if parameter is a value type pointer if (gvn.type(parm)->isa_valuetypeptr()) { // Create ValueTypeNode from the oop and replace the parameter ! Node* ctl = map->control(); ! parm = ValueTypeNode::make(gvn, ctl, map->memory(), parm); ! map->set_control(ctl); } map->init_req(i, parm); // Record all these guys for later GVN. record_for_igvn(parm); j++;
*** 929,939 **** // to the Return node as returned values. assert(res->is_ValueType(), "what else supports multi value return"); ValueTypeNode* vt = res->as_ValueType(); ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms); vt->pass_klass(ret, TypeFunc::Parms, kit); ! vt->pass_fields(ret, TypeFunc::Parms+1, kit); } else { ret->add_req(res); // Note: The second dummy edge is not needed by a ReturnNode. } } --- 930,940 ---- // to the Return node as returned values. assert(res->is_ValueType(), "what else supports multi value return"); ValueTypeNode* vt = res->as_ValueType(); ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms); vt->pass_klass(ret, TypeFunc::Parms, kit); ! vt->pass_fields(ret, TypeFunc::Parms+1, kit, /* assert_allocated */ true); } else { ret->add_req(res); // Note: The second dummy edge is not needed by a ReturnNode. } }
*** 2270,2284 **** } //------------------------------return_current--------------------------------- // Append current _map to _exit_return void Parse::return_current(Node* value) { ! if (value != NULL && value->is_ValueType() && !_caller->has_method() && ! !tf()->returns_value_type_as_fields()) { ! // Returning from root JVMState without multiple returned values, ! // make sure value type is allocated ! value = value->as_ValueType()->allocate(this); } if (RegisterFinalizersAtInit && method()->intrinsic_id() == vmIntrinsics::_Object_init) { call_register_finalizer(); --- 2271,2289 ---- } //------------------------------return_current--------------------------------- // Append current _map to _exit_return void Parse::return_current(Node* value) { ! if (value != NULL && value->is_ValueType() && !_caller->has_method()) { ! // Returning a value type from root JVMState ! if (tf()->returns_value_type_as_fields()) { ! // Value type is returned as fields, make sure non-flattened value type fields are allocated ! value = value->as_ValueType()->allocate_fields(this); ! } else { ! // Value type is returned as oop, make sure it's allocated ! value = value->as_ValueType()->allocate(this)->get_oop(); ! } } if (RegisterFinalizersAtInit && method()->intrinsic_id() == vmIntrinsics::_Object_init) { call_register_finalizer();
< prev index next >