< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




2725   Node *adr = in(3);
2726   if (adr == NULL)  return NULL; // node is dead
2727   return MemNode::calculate_adr_type(adr->bottom_type());
2728 }
2729 
2730 //------------------------------match_edge-------------------------------------
2731 // Do we Match on this edge index or not?  Do not match memory
2732 uint ClearArrayNode::match_edge(uint idx) const {
2733   return idx > 1;
2734 }
2735 
2736 //------------------------------Identity---------------------------------------
2737 // Clearing a zero length array does nothing
2738 Node* ClearArrayNode::Identity(PhaseGVN* phase) {
2739   return phase->type(in(2))->higher_equal(TypeX::ZERO)  ? in(1) : this;
2740 }
2741 
2742 //------------------------------Idealize---------------------------------------
2743 // Clearing a short array is faster with stores
2744 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){



2745   const int unit = BytesPerLong;
2746   const TypeX* t = phase->type(in(2))->isa_intptr_t();
2747   if (!t)  return NULL;
2748   if (!t->is_con())  return NULL;
2749   intptr_t raw_count = t->get_con();
2750   intptr_t size = raw_count;
2751   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
2752   // Clearing nothing uses the Identity call.
2753   // Negative clears are possible on dead ClearArrays
2754   // (see jck test stmt114.stmt11402.val).
2755   if (size <= 0 || size % unit != 0)  return NULL;
2756   intptr_t count = size / unit;
2757   // Length too long; use fast hardware clear
2758   if (size > Matcher::init_array_short_size)  return NULL;



2759   Node *mem = in(1);
2760   if( phase->type(mem)==Type::TOP ) return NULL;
2761   Node *adr = in(3);
2762   const Type* at = phase->type(adr);
2763   if( at==Type::TOP ) return NULL;
2764   const TypePtr* atp = at->isa_ptr();
2765   // adjust atp to be the correct array element address type
2766   if (atp == NULL)  atp = TypePtr::BOTTOM;
2767   else              atp = atp->add_offset(Type::OffsetBot);
2768   // Get base for derived pointer purposes
2769   if( adr->Opcode() != Op_AddP ) Unimplemented();
2770   Node *base = adr->in(1);
2771 
2772   Node *zero = phase->makecon(TypeLong::ZERO);
2773   Node *off  = phase->MakeConX(BytesPerLong);
2774   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2775   count--;
2776   while( count-- ) {
2777     mem = phase->transform(mem);
2778     adr = phase->transform(new AddPNode(base,adr,off));


3885         if (next_full_store < 0) {
3886           // Conservative tack:  Zero to end of current word.
3887           zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
3888         } else {
3889           // Zero to beginning of next fully initialized word.
3890           // Or, don't zero at all, if we are already in that word.
3891           assert(next_full_store >= zeroes_needed, "must go forward");
3892           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
3893           zeroes_needed = next_full_store;
3894         }
3895       }
3896 
3897       if (zeroes_needed > zeroes_done) {
3898         intptr_t zsize = zeroes_needed - zeroes_done;
3899         // Do some incremental zeroing on rawmem, in parallel with inits.
3900         zeroes_done = align_size_down(zeroes_done, BytesPerInt);
3901         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
3902                                               zeroes_done, zeroes_needed,
3903                                               phase);
3904         zeroes_done = zeroes_needed;
3905         if (zsize > Matcher::init_array_short_size && ++big_init_gaps > 2)
3906           do_zeroing = false;   // leave the hole, next time
3907       }
3908     }
3909 
3910     // Collect the store and move on:
3911     st->set_req(MemNode::Memory, inits);
3912     inits = st;                 // put it on the linearized chain
3913     set_req(i, zmem);           // unhook from previous position
3914 
3915     if (zeroes_done == st_off)
3916       zeroes_done = next_init_off;
3917 
3918     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
3919 
3920     #ifdef ASSERT
3921     // Various order invariants.  Weaker than stores_are_sane because
3922     // a large constant tile can be filled in by smaller non-constant stores.
3923     assert(st_off >= last_init_off, "inits do not reverse");
3924     last_init_off = st_off;
3925     const Type* val = NULL;




2725   Node *adr = in(3);
2726   if (adr == NULL)  return NULL; // node is dead
2727   return MemNode::calculate_adr_type(adr->bottom_type());
2728 }
2729 
2730 //------------------------------match_edge-------------------------------------
2731 // Do we Match on this edge index or not?  Do not match memory
2732 uint ClearArrayNode::match_edge(uint idx) const {
2733   return idx > 1;
2734 }
2735 
2736 //------------------------------Identity---------------------------------------
2737 // Clearing a zero length array does nothing
2738 Node* ClearArrayNode::Identity(PhaseGVN* phase) {
2739   return phase->type(in(2))->higher_equal(TypeX::ZERO)  ? in(1) : this;
2740 }
2741 
2742 //------------------------------Idealize---------------------------------------
2743 // Clearing a short array is faster with stores
2744 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
2745   // Already know this is a large node, do not try to ideal it
2746   if (_is_large) return NULL;
2747 
2748   const int unit = BytesPerLong;
2749   const TypeX* t = phase->type(in(2))->isa_intptr_t();
2750   if (!t)  return NULL;
2751   if (!t->is_con())  return NULL;
2752   intptr_t raw_count = t->get_con();
2753   intptr_t size = raw_count;
2754   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
2755   // Clearing nothing uses the Identity call.
2756   // Negative clears are possible on dead ClearArrays
2757   // (see jck test stmt114.stmt11402.val).
2758   if (size <= 0 || size % unit != 0)  return NULL;
2759   intptr_t count = size / unit;
2760   // Length too long; communicate this to matchers and assemblers.
2761   // Assemblers are responsible to produce fast hardware clears for it.
2762   if (size > InitArrayShortSize) {
2763     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
2764   }
2765   Node *mem = in(1);
2766   if( phase->type(mem)==Type::TOP ) return NULL;
2767   Node *adr = in(3);
2768   const Type* at = phase->type(adr);
2769   if( at==Type::TOP ) return NULL;
2770   const TypePtr* atp = at->isa_ptr();
2771   // adjust atp to be the correct array element address type
2772   if (atp == NULL)  atp = TypePtr::BOTTOM;
2773   else              atp = atp->add_offset(Type::OffsetBot);
2774   // Get base for derived pointer purposes
2775   if( adr->Opcode() != Op_AddP ) Unimplemented();
2776   Node *base = adr->in(1);
2777 
2778   Node *zero = phase->makecon(TypeLong::ZERO);
2779   Node *off  = phase->MakeConX(BytesPerLong);
2780   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2781   count--;
2782   while( count-- ) {
2783     mem = phase->transform(mem);
2784     adr = phase->transform(new AddPNode(base,adr,off));


3891         if (next_full_store < 0) {
3892           // Conservative tack:  Zero to end of current word.
3893           zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
3894         } else {
3895           // Zero to beginning of next fully initialized word.
3896           // Or, don't zero at all, if we are already in that word.
3897           assert(next_full_store >= zeroes_needed, "must go forward");
3898           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
3899           zeroes_needed = next_full_store;
3900         }
3901       }
3902 
3903       if (zeroes_needed > zeroes_done) {
3904         intptr_t zsize = zeroes_needed - zeroes_done;
3905         // Do some incremental zeroing on rawmem, in parallel with inits.
3906         zeroes_done = align_size_down(zeroes_done, BytesPerInt);
3907         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
3908                                               zeroes_done, zeroes_needed,
3909                                               phase);
3910         zeroes_done = zeroes_needed;
3911         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
3912           do_zeroing = false;   // leave the hole, next time
3913       }
3914     }
3915 
3916     // Collect the store and move on:
3917     st->set_req(MemNode::Memory, inits);
3918     inits = st;                 // put it on the linearized chain
3919     set_req(i, zmem);           // unhook from previous position
3920 
3921     if (zeroes_done == st_off)
3922       zeroes_done = next_init_off;
3923 
3924     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
3925 
3926     #ifdef ASSERT
3927     // Various order invariants.  Weaker than stores_are_sane because
3928     // a large constant tile can be filled in by smaller non-constant stores.
3929     assert(st_off >= last_init_off, "inits do not reverse");
3930     last_init_off = st_off;
3931     const Type* val = NULL;


< prev index next >