< prev index next >

src/hotspot/share/opto/callnode.cpp

Print this page
rev 48500 : 8194988: 8 Null pointer dereference defect groups related to MultiNode::proj_out()


 755         if (j == 2) {
 756           dest = in(i);
 757           break;
 758         }
 759       }
 760     }
 761     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 762       return true;
 763     }
 764     return false;
 765   }
 766   if (t_oop->is_known_instance()) {
 767     // The instance_id is set only for scalar-replaceable allocations which
 768     // are not passed as arguments according to Escape Analysis.
 769     return false;
 770   }
 771   if (t_oop->is_ptr_to_boxed_value()) {
 772     ciKlass* boxing_klass = t_oop->klass();
 773     if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
 774       // Skip unrelated boxing methods.
 775       Node* proj = proj_out(TypeFunc::Parms);
 776       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
 777         return false;
 778       }
 779     }
 780     if (is_CallJava() && as_CallJava()->method() != NULL) {
 781       ciMethod* meth = as_CallJava()->method();
 782       if (meth->is_getter()) {
 783         return false;
 784       }
 785       // May modify (by reflection) if an boxing object is passed
 786       // as argument or returned.
 787       Node* proj = returns_pointer() ? proj_out(TypeFunc::Parms) : NULL;
 788       if (proj != NULL) {
 789         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 790         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 791                                  (inst_t->klass() == boxing_klass))) {
 792           return true;
 793         }
 794       }
 795       const TypeTuple* d = tf()->domain();
 796       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 797         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 798         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 799                                  (inst_t->klass() == boxing_klass))) {
 800           return true;
 801         }
 802       }
 803       return false;
 804     }
 805   }
 806   return true;
 807 }
 808 
 809 // Does this call have a direct reference to n other than debug information?
 810 bool CallNode::has_non_debug_use(Node *n) {
 811   const TypeTuple * d = tf()->domain();
 812   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 813     Node *arg = in(i);
 814     if (arg == n) {
 815       return true;
 816     }
 817   }
 818   return false;
 819 }
 820 
 821 // Returns the unique CheckCastPP of a call
 822 // or 'this' if there are several CheckCastPP or unexpected uses
 823 // or returns NULL if there is no one.
 824 Node *CallNode::result_cast() {
 825   Node *cast = NULL;
 826 
 827   Node *p = proj_out(TypeFunc::Parms);
 828   if (p == NULL)
 829     return NULL;
 830 
 831   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 832     Node *use = p->fast_out(i);
 833     if (use->is_CheckCastPP()) {
 834       if (cast != NULL) {
 835         return this;  // more than 1 CheckCastPP
 836       }
 837       cast = use;
 838     } else if (!use->is_Initialize() &&
 839                !use->is_AddP() &&
 840                use->Opcode() != Op_MemBarStoreStore) {
 841       // Expected uses are restricted to a CheckCastPP, an Initialize
 842       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 843       // encounter any other use (a Phi node can be seen in rare
 844       // cases) return this to prevent incorrect optimizations.
 845       return this;
 846     }
 847   }


1361   }
1362 
1363   // Allocation node is first parameter in its initializer
1364   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1365     _is_allocation_MemBar_redundant = true;
1366   }
1367 }
1368 
1369 //=============================================================================
1370 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1371   if (remove_dead_region(phase, can_reshape))  return this;
1372   // Don't bother trying to transform a dead node
1373   if (in(0) && in(0)->is_top())  return NULL;
1374 
1375   const Type* type = phase->type(Ideal_length());
1376   if (type->isa_int() && type->is_int()->_hi < 0) {
1377     if (can_reshape) {
1378       PhaseIterGVN *igvn = phase->is_IterGVN();
1379       // Unreachable fall through path (negative array length),
1380       // the allocation can only throw so disconnect it.
1381       Node* proj = proj_out(TypeFunc::Control);
1382       Node* catchproj = NULL;
1383       if (proj != NULL) {
1384         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1385           Node *cn = proj->fast_out(i);
1386           if (cn->is_Catch()) {
1387             catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
1388             break;
1389           }
1390         }
1391       }
1392       if (catchproj != NULL && catchproj->outcnt() > 0 &&
1393           (catchproj->outcnt() > 1 ||
1394            catchproj->unique_out()->Opcode() != Op_Halt)) {
1395         assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1396         Node* nproj = catchproj->clone();
1397         igvn->register_new_node_with_optimizer(nproj);
1398 
1399         Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr );
1400         frame = phase->transform(frame);
1401         // Halt & Catch Fire
1402         Node *halt = new HaltNode( nproj, frame );
1403         phase->C->root()->add_req(halt);
1404         phase->transform(halt);
1405 
1406         igvn->replace_node(catchproj, phase->C->top());
1407         return this;


1425   const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1426 
1427   if (ary_type != NULL && length_type != NULL) {
1428     const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1429     if (narrow_length_type != length_type) {
1430       // Assert one of:
1431       //   - the narrow_length is 0
1432       //   - the narrow_length is not wider than length
1433       assert(narrow_length_type == TypeInt::ZERO ||
1434              length_type->is_con() && narrow_length_type->is_con() &&
1435                 (narrow_length_type->_hi <= length_type->_lo) ||
1436              (narrow_length_type->_hi <= length_type->_hi &&
1437               narrow_length_type->_lo >= length_type->_lo),
1438              "narrow type must be narrower than length type");
1439 
1440       // Return NULL if new nodes are not allowed
1441       if (!allow_new_nodes) return NULL;
1442       // Create a cast which is control dependent on the initialization to
1443       // propagate the fact that the array length must be positive.
1444       length = new CastIINode(length, narrow_length_type);
1445       length->set_req(0, initialization()->proj_out(0));
1446     }
1447   }
1448 
1449   return length;
1450 }
1451 
1452 //=============================================================================
1453 uint LockNode::size_of() const { return sizeof(*this); }
1454 
1455 // Redundant lock elimination
1456 //
1457 // There are various patterns of locking where we release and
1458 // immediately reacquire a lock in a piece of code where no operations
1459 // occur in between that would be observable.  In those cases we can
1460 // skip releasing and reacquiring the lock without violating any
1461 // fairness requirements.  Doing this around a loop could cause a lock
1462 // to be held for a very long time so we concentrate on non-looping
1463 // control flow.  We also require that the operations are fully
1464 // redundant meaning that we don't introduce new lock operations on
1465 // some paths so to be able to eliminate it on others ala PRE.  This




 755         if (j == 2) {
 756           dest = in(i);
 757           break;
 758         }
 759       }
 760     }
 761     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 762       return true;
 763     }
 764     return false;
 765   }
 766   if (t_oop->is_known_instance()) {
 767     // The instance_id is set only for scalar-replaceable allocations which
 768     // are not passed as arguments according to Escape Analysis.
 769     return false;
 770   }
 771   if (t_oop->is_ptr_to_boxed_value()) {
 772     ciKlass* boxing_klass = t_oop->klass();
 773     if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
 774       // Skip unrelated boxing methods.
 775       Node* proj = proj_out_or_null(TypeFunc::Parms);
 776       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
 777         return false;
 778       }
 779     }
 780     if (is_CallJava() && as_CallJava()->method() != NULL) {
 781       ciMethod* meth = as_CallJava()->method();
 782       if (meth->is_getter()) {
 783         return false;
 784       }
 785       // May modify (by reflection) if an boxing object is passed
 786       // as argument or returned.
 787       Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
 788       if (proj != NULL) {
 789         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 790         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 791                                  (inst_t->klass() == boxing_klass))) {
 792           return true;
 793         }
 794       }
 795       const TypeTuple* d = tf()->domain();
 796       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 797         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 798         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 799                                  (inst_t->klass() == boxing_klass))) {
 800           return true;
 801         }
 802       }
 803       return false;
 804     }
 805   }
 806   return true;
 807 }
 808 
 809 // Does this call have a direct reference to n other than debug information?
 810 bool CallNode::has_non_debug_use(Node *n) {
 811   const TypeTuple * d = tf()->domain();
 812   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 813     Node *arg = in(i);
 814     if (arg == n) {
 815       return true;
 816     }
 817   }
 818   return false;
 819 }
 820 
 821 // Returns the unique CheckCastPP of a call
 822 // or 'this' if there are several CheckCastPP or unexpected uses
 823 // or returns NULL if there is no one.
 824 Node *CallNode::result_cast() {
 825   Node *cast = NULL;
 826 
 827   Node *p = proj_out_or_null(TypeFunc::Parms);
 828   if (p == NULL)
 829     return NULL;
 830 
 831   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 832     Node *use = p->fast_out(i);
 833     if (use->is_CheckCastPP()) {
 834       if (cast != NULL) {
 835         return this;  // more than 1 CheckCastPP
 836       }
 837       cast = use;
 838     } else if (!use->is_Initialize() &&
 839                !use->is_AddP() &&
 840                use->Opcode() != Op_MemBarStoreStore) {
 841       // Expected uses are restricted to a CheckCastPP, an Initialize
 842       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 843       // encounter any other use (a Phi node can be seen in rare
 844       // cases) return this to prevent incorrect optimizations.
 845       return this;
 846     }
 847   }


1361   }
1362 
1363   // Allocation node is first parameter in its initializer
1364   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1365     _is_allocation_MemBar_redundant = true;
1366   }
1367 }
1368 
1369 //=============================================================================
1370 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1371   if (remove_dead_region(phase, can_reshape))  return this;
1372   // Don't bother trying to transform a dead node
1373   if (in(0) && in(0)->is_top())  return NULL;
1374 
1375   const Type* type = phase->type(Ideal_length());
1376   if (type->isa_int() && type->is_int()->_hi < 0) {
1377     if (can_reshape) {
1378       PhaseIterGVN *igvn = phase->is_IterGVN();
1379       // Unreachable fall through path (negative array length),
1380       // the allocation can only throw so disconnect it.
1381       Node* proj = proj_out_or_null(TypeFunc::Control);
1382       Node* catchproj = NULL;
1383       if (proj != NULL) {
1384         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1385           Node *cn = proj->fast_out(i);
1386           if (cn->is_Catch()) {
1387             catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index);
1388             break;
1389           }
1390         }
1391       }
1392       if (catchproj != NULL && catchproj->outcnt() > 0 &&
1393           (catchproj->outcnt() > 1 ||
1394            catchproj->unique_out()->Opcode() != Op_Halt)) {
1395         assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1396         Node* nproj = catchproj->clone();
1397         igvn->register_new_node_with_optimizer(nproj);
1398 
1399         Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr );
1400         frame = phase->transform(frame);
1401         // Halt & Catch Fire
1402         Node *halt = new HaltNode( nproj, frame );
1403         phase->C->root()->add_req(halt);
1404         phase->transform(halt);
1405 
1406         igvn->replace_node(catchproj, phase->C->top());
1407         return this;


1425   const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1426 
1427   if (ary_type != NULL && length_type != NULL) {
1428     const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1429     if (narrow_length_type != length_type) {
1430       // Assert one of:
1431       //   - the narrow_length is 0
1432       //   - the narrow_length is not wider than length
1433       assert(narrow_length_type == TypeInt::ZERO ||
1434              length_type->is_con() && narrow_length_type->is_con() &&
1435                 (narrow_length_type->_hi <= length_type->_lo) ||
1436              (narrow_length_type->_hi <= length_type->_hi &&
1437               narrow_length_type->_lo >= length_type->_lo),
1438              "narrow type must be narrower than length type");
1439 
1440       // Return NULL if new nodes are not allowed
1441       if (!allow_new_nodes) return NULL;
1442       // Create a cast which is control dependent on the initialization to
1443       // propagate the fact that the array length must be positive.
1444       length = new CastIINode(length, narrow_length_type);
1445       length->set_req(0, initialization()->proj_out_or_null(0));
1446     }
1447   }
1448 
1449   return length;
1450 }
1451 
1452 //=============================================================================
1453 uint LockNode::size_of() const { return sizeof(*this); }
1454 
1455 // Redundant lock elimination
1456 //
1457 // There are various patterns of locking where we release and
1458 // immediately reacquire a lock in a piece of code where no operations
1459 // occur in between that would be observable.  In those cases we can
1460 // skip releasing and reacquiring the lock without violating any
1461 // fairness requirements.  Doing this around a loop could cause a lock
1462 // to be held for a very long time so we concentrate on non-looping
1463 // control flow.  We also require that the operations are fully
1464 // redundant meaning that we don't introduce new lock operations on
1465 // some paths so to be able to eliminate it on others ala PRE.  This


< prev index next >