src/share/vm/opto/parse1.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/opto/parse1.cpp	Thu May 15 17:09:30 2014
--- new/src/share/vm/opto/parse1.cpp	Thu May 15 17:09:30 2014

*** 107,133 **** --- 107,133 ---- // Very similar to LoadNode::make, except we handle un-aligned longs and // doubles on Sparc. Intel can handle them just fine directly. Node *l; switch (bt) { // Signature is flattened - case T_INT: l = new (C) LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break; - case T_FLOAT: l = new (C) LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break; - case T_ADDRESS: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break; - case T_OBJECT: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break; case T_LONG: case T_DOUBLE: { // Since arguments are in reverse order, the argument address 'adr' // refers to the back half of the long/double. Recompute adr. adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize); if (Matcher::misaligned_doubles_ok) { l = (bt == T_DOUBLE) - ? (Node*)new (C) LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered) - : (Node*)new (C) LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered); } else { l = (bt == T_DOUBLE) - ? (Node*)new (C) LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered) - : (Node*)new (C) LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered); } break; } default: ShouldNotReachHere(); }
*** 147,161 **** --- 147,161 ---- // TypeFlow may assert null-ness if a type appears unloaded. if (type == TypePtr::NULL_PTR || (tp != NULL && !tp->klass()->is_loaded())) { // Value must be null, not a real oop. - Node* chk = _gvn.transform( new (C) CmpPNode(l, null()) ); - Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN); - set_control(_gvn.transform( new (C) IfTrueNode(iff) )); - Node* bad_type = _gvn.transform( new (C) IfFalseNode(iff) ); bad_type_exit->control()->add_req(bad_type); l = null(); } // Typeflow can also cut off paths from the CFG, based on
*** 218,228 **** --- 218,228 ---- assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr"); int mcnt = osr_block->flow()->monitor_count(); Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize); for (index = 0; index < mcnt; index++) { // Make a BoxLockNode for the monitor. - Node *box = _gvn.transform(new (C) BoxLockNode(next_monitor())); // Displaced headers and locked objects are interleaved in the // temp OSR buffer. We only copy the locked objects out here. // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
*** 233,243 **** --- 233,243 ---- store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); // Build a bogus FastLockNode (no code will be generated) and push the // monitor into our debug info. - const FastLockNode *flock = _gvn.transform(new (C) FastLockNode( 0, lock_object, box ))->as_FastLock(); map()->push_monitor(flock); // If the lock is our method synchronization lock, tuck it away in // _sync_lock for return and rethrow exit paths. if (index == 0 && method()->is_synchronized()) {
*** 323,333 **** --- 323,333 ---- osr_buf); // Now that the interpreter state is loaded, make sure it will match // at execution time what the compiler is expecting now: SafePointNode* bad_type_exit = clone_map(); - bad_type_exit->set_control(new (C) RegionNode(1)); assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point"); for (index = 0; index < max_locals; index++) { if (stopped()) break; Node* l = local(index);
*** 659,669 **** --- 659,669 ---- // It is fine to set it here since do_one_block() will set it anyway. set_parse_bci(block->start()); add_predicate(); // Add new region for back branches. int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region - RegionNode *r = new (C) RegionNode(edges+1); _gvn.set_type(r, Type::CONTROL); record_for_igvn(r); r->init_req(edges, control()); set_control(r); // Add new phis.
*** 726,743 **** --- 726,743 ---- // make a clone of caller to prevent sharing of side-effects _exits.set_map(_exits.clone_map()); _exits.clean_stack(_exits.sp()); _exits.sync_jvms(); - RegionNode* region = new (C) RegionNode(1); record_for_igvn(region); gvn().set_type_bottom(region); _exits.set_control(region); // Note: iophi and memphi are not transformed until do_exits. - Node* iophi = new (C) PhiNode(region, Type::ABIO); - Node* memphi = new (C) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); gvn().set_type_bottom(iophi); gvn().set_type_bottom(memphi); _exits.set_i_o(iophi); _exits.set_all_memory(memphi);
*** 750,760 **** --- 750,760 ---- const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr(); if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { ret_type = TypeOopPtr::BOTTOM; } int ret_size = type2size[ret_type->basic_type()]; - Node* ret_phi = new (C) PhiNode(region, ret_type); gvn().set_type_bottom(ret_phi); _exits.ensure_stack(ret_size); assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); _exits.set_argument(0, ret_phi); // here is where the parser finds it
*** 768,778 **** --- 768,778 ---- // unknown caller. The method & bci will be NULL & InvocationEntryBci. JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { int arg_size = tf->domain()->cnt(); int max_size = MAX2(arg_size, (int)tf->range()->cnt()); JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); - SafePointNode* map = new (this) SafePointNode(max_size, NULL); record_for_igvn(map); assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); Node_Notes* old_nn = default_node_notes(); if (old_nn != NULL && has_method()) { Node_Notes* entry_nn = old_nn->clone(this);
*** 782,792 **** --- 782,792 ---- entry_nn->set_jvms(entry_jvms); set_default_node_notes(entry_nn); } uint i; for (i = 0; i < (uint)arg_size; i++) { - Node* parm = initial_gvn()->transform(new (this) ParmNode(start, i)); map->init_req(i, parm); // Record all these guys for later GVN. record_for_igvn(parm); } for (; i < map->req(); i++) {
*** 813,823 **** --- 813,823 ---- //--------------------------return_values-------------------------------------- void Compile::return_values(JVMState* jvms) { GraphKit kit(jvms); - Node* ret = new (this) ReturnNode(TypeFunc::Parms, kit.control(), kit.i_o(), kit.reset_memory(), kit.frameptr(), kit.returnadr());
*** 841,851 **** --- 841,851 ---- GraphKit kit(jvms); if (!kit.has_exceptions()) return; // nothing to generate // Load my combined exception state into the kit, with all phis transformed: SafePointNode* ex_map = kit.combine_and_pop_all_exception_states(); Node* ex_oop = kit.use_exception_state(ex_map); - RethrowNode* exit = new (this) RethrowNode(kit.control(), kit.i_o(), kit.reset_memory(), kit.frameptr(), kit.returnadr(), // like a return but with exception input ex_oop); // bind to root
*** 1063,1073 **** --- 1063,1073 ---- assert(method() != NULL, "parser must have a method"); // Create an initial safepoint to hold JVM state during parsing JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL); - set_map(new (C) SafePointNode(len, jvms)); jvms->set_map(map()); record_for_igvn(map()); assert(jvms->endoff() == len, "correct jvms sizing"); SafePointNode* inmap = _caller->map();
*** 1576,1586 **** --- 1576,1586 ---- } // Add a Region to start the new basic block. Phis will be added // later lazily. int edges = target->pred_count(); if (edges < pnum) edges = pnum; // might be a new path! - RegionNode *r = new (C) RegionNode(edges+1); gvn().set_type(r, Type::CONTROL); record_for_igvn(r); // zap all inputs to NULL for debugging (done in Node(uint) constructor) // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); } r->init_req(pnum, control());
*** 1971,1993 **** --- 1971,1993 ---- Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset())); Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered); - Node* mask = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); - Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0))); - Node* test = _gvn.transform(new (C) BoolNode(check, BoolTest::ne)); IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN); - RegionNode* result_rgn = new (C) RegionNode(3); record_for_igvn(result_rgn); - Node *skip_register = _gvn.transform(new (C) IfFalseNode(iff)); result_rgn->init_req(1, skip_register); - Node *needs_register = _gvn.transform(new (C) IfTrueNode(iff)); set_control(needs_register); if (stopped()) { // There is no slow path. result_rgn->init_req(2, top()); } else {
*** 2037,2049 **** --- 2037,2049 ---- // In expand_macro_nodes() it will be replaced either // with this load when there are locks in the code // or with ProfileRTM (cmp->in(2)) otherwise so that // the check will fold. Node* profile_state = makecon(TypeInt::make(ProfileRTM)); - Node* opq = _gvn.transform( new (C) Opaque3Node(C, rtm_state, Opaque3Node::RTM_OPT) ); - Node* chk = _gvn.transform( new (C) CmpINode(opq, profile_state) ); - Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); // Branch to failure if state was changed { BuildCutout unless(this, tst, PROB_ALWAYS); uncommon_trap(Deoptimization::Reason_rtm_state_change, Deoptimization::Action_make_not_entrant); }
*** 2064,2077 **** --- 2064,2077 ---- const TypePtr* adr_type = TypeRawPtr::make((address)mc); Node* mc_adr = makecon(adr_type); Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset())); Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); - Node* decr = _gvn.transform(new (C) SubINode(cnt, makecon(TypeInt::ONE))); store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered); - Node *chk = _gvn.transform(new (C) CmpINode(decr, makecon(TypeInt::ZERO))); - Node* tst = _gvn.transform(new (C) BoolNode(chk, BoolTest::gt)); { BuildCutout unless(this, tst, PROB_ALWAYS); uncommon_trap(Deoptimization::Reason_tenured, Deoptimization::Action_make_not_entrant); } }
*** 2122,2132 **** --- 2122,2132 ---- if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) { // sharpen the type eagerly; this eases certain assert checking if (tp->higher_equal(TypeInstPtr::NOTNULL)) tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr(); - value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr)); } } phi->add_req(value); }
*** 2157,2167 **** --- 2157,2167 ---- // Clear out dead values from the debug info. kill_dead_locals(); // Clone the JVM State - SafePointNode *sfpnt = new (C) SafePointNode(parms, NULL); // Capture memory state BEFORE a SafePoint. Since we can block at a // SafePoint we need our GC state to be safe; i.e. we need all our current // write barriers (card marks) to not float down after the SafePoint so we // must read raw memory. Likewise we need all oop stores to match the card

src/share/vm/opto/parse1.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File