< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




2715   Node *adr = in(3);
2716   if (adr == NULL)  return NULL; // node is dead
2717   return MemNode::calculate_adr_type(adr->bottom_type());
2718 }
2719 
2720 //------------------------------match_edge-------------------------------------
2721 // Do we Match on this edge index or not?  Do not match memory
2722 uint ClearArrayNode::match_edge(uint idx) const {
2723   return idx > 1;
2724 }
2725 
2726 //------------------------------Identity---------------------------------------
2727 // Clearing a zero length array does nothing
2728 Node* ClearArrayNode::Identity(PhaseGVN* phase) {
2729   return phase->type(in(2))->higher_equal(TypeX::ZERO)  ? in(1) : this;
2730 }
2731 
2732 //------------------------------Idealize---------------------------------------
2733 // Clearing a short array is faster with stores
2734 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){



2735   const int unit = BytesPerLong;
2736   const TypeX* t = phase->type(in(2))->isa_intptr_t();
2737   if (!t)  return NULL;
2738   if (!t->is_con())  return NULL;
2739   intptr_t raw_count = t->get_con();
2740   intptr_t size = raw_count;
2741   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
2742   // Clearing nothing uses the Identity call.
2743   // Negative clears are possible on dead ClearArrays
2744   // (see jck test stmt114.stmt11402.val).
2745   if (size <= 0 || size % unit != 0)  return NULL;
2746   intptr_t count = size / unit;
2747   // Length too long; use fast hardware clear
2748   if (size > Matcher::init_array_short_size)  return NULL;



2749   Node *mem = in(1);
2750   if( phase->type(mem)==Type::TOP ) return NULL;
2751   Node *adr = in(3);
2752   const Type* at = phase->type(adr);
2753   if( at==Type::TOP ) return NULL;
2754   const TypePtr* atp = at->isa_ptr();
2755   // adjust atp to be the correct array element address type
2756   if (atp == NULL)  atp = TypePtr::BOTTOM;
2757   else              atp = atp->add_offset(Type::OffsetBot);
2758   // Get base for derived pointer purposes
2759   if( adr->Opcode() != Op_AddP ) Unimplemented();
2760   Node *base = adr->in(1);
2761 
2762   Node *zero = phase->makecon(TypeLong::ZERO);
2763   Node *off  = phase->MakeConX(BytesPerLong);
2764   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2765   count--;
2766   while( count-- ) {
2767     mem = phase->transform(mem);
2768     adr = phase->transform(new AddPNode(base,adr,off));


3875         if (next_full_store < 0) {
3876           // Conservative tack:  Zero to end of current word.
3877           zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
3878         } else {
3879           // Zero to beginning of next fully initialized word.
3880           // Or, don't zero at all, if we are already in that word.
3881           assert(next_full_store >= zeroes_needed, "must go forward");
3882           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
3883           zeroes_needed = next_full_store;
3884         }
3885       }
3886 
3887       if (zeroes_needed > zeroes_done) {
3888         intptr_t zsize = zeroes_needed - zeroes_done;
3889         // Do some incremental zeroing on rawmem, in parallel with inits.
3890         zeroes_done = align_size_down(zeroes_done, BytesPerInt);
3891         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
3892                                               zeroes_done, zeroes_needed,
3893                                               phase);
3894         zeroes_done = zeroes_needed;
3895         if (zsize > Matcher::init_array_short_size && ++big_init_gaps > 2)
3896           do_zeroing = false;   // leave the hole, next time
3897       }
3898     }
3899 
3900     // Collect the store and move on:
3901     st->set_req(MemNode::Memory, inits);
3902     inits = st;                 // put it on the linearized chain
3903     set_req(i, zmem);           // unhook from previous position
3904 
3905     if (zeroes_done == st_off)
3906       zeroes_done = next_init_off;
3907 
3908     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
3909 
3910     #ifdef ASSERT
3911     // Various order invariants.  Weaker than stores_are_sane because
3912     // a large constant tile can be filled in by smaller non-constant stores.
3913     assert(st_off >= last_init_off, "inits do not reverse");
3914     last_init_off = st_off;
3915     const Type* val = NULL;




2715   Node *adr = in(3);
2716   if (adr == NULL)  return NULL; // node is dead
2717   return MemNode::calculate_adr_type(adr->bottom_type());
2718 }
2719 
2720 //------------------------------match_edge-------------------------------------
2721 // Do we Match on this edge index or not?  Do not match memory
2722 uint ClearArrayNode::match_edge(uint idx) const {
2723   return idx > 1;
2724 }
2725 
2726 //------------------------------Identity---------------------------------------
2727 // Clearing a zero length array does nothing
2728 Node* ClearArrayNode::Identity(PhaseGVN* phase) {
2729   return phase->type(in(2))->higher_equal(TypeX::ZERO)  ? in(1) : this;
2730 }
2731 
2732 //------------------------------Idealize---------------------------------------
2733 // Clearing a short array is faster with stores
2734 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
2735   // Already know this is a large node, do not try to ideal it
2736   if (_is_large) return NULL;
2737 
2738   const int unit = BytesPerLong;
2739   const TypeX* t = phase->type(in(2))->isa_intptr_t();
2740   if (!t)  return NULL;
2741   if (!t->is_con())  return NULL;
2742   intptr_t raw_count = t->get_con();
2743   intptr_t size = raw_count;
2744   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
2745   // Clearing nothing uses the Identity call.
2746   // Negative clears are possible on dead ClearArrays
2747   // (see jck test stmt114.stmt11402.val).
2748   if (size <= 0 || size % unit != 0)  return NULL;
2749   intptr_t count = size / unit;
2750   // Length too long; communicate this to matchers and assemblers.
2751   // Assemblers are responsible to produce fast hardware clears for it.
2752   if (size > InitArrayShortSize) {
2753     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
2754   }
2755   Node *mem = in(1);
2756   if( phase->type(mem)==Type::TOP ) return NULL;
2757   Node *adr = in(3);
2758   const Type* at = phase->type(adr);
2759   if( at==Type::TOP ) return NULL;
2760   const TypePtr* atp = at->isa_ptr();
2761   // adjust atp to be the correct array element address type
2762   if (atp == NULL)  atp = TypePtr::BOTTOM;
2763   else              atp = atp->add_offset(Type::OffsetBot);
2764   // Get base for derived pointer purposes
2765   if( adr->Opcode() != Op_AddP ) Unimplemented();
2766   Node *base = adr->in(1);
2767 
2768   Node *zero = phase->makecon(TypeLong::ZERO);
2769   Node *off  = phase->MakeConX(BytesPerLong);
2770   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2771   count--;
2772   while( count-- ) {
2773     mem = phase->transform(mem);
2774     adr = phase->transform(new AddPNode(base,adr,off));


3881         if (next_full_store < 0) {
3882           // Conservative tack:  Zero to end of current word.
3883           zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
3884         } else {
3885           // Zero to beginning of next fully initialized word.
3886           // Or, don't zero at all, if we are already in that word.
3887           assert(next_full_store >= zeroes_needed, "must go forward");
3888           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
3889           zeroes_needed = next_full_store;
3890         }
3891       }
3892 
3893       if (zeroes_needed > zeroes_done) {
3894         intptr_t zsize = zeroes_needed - zeroes_done;
3895         // Do some incremental zeroing on rawmem, in parallel with inits.
3896         zeroes_done = align_size_down(zeroes_done, BytesPerInt);
3897         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
3898                                               zeroes_done, zeroes_needed,
3899                                               phase);
3900         zeroes_done = zeroes_needed;
3901         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
3902           do_zeroing = false;   // leave the hole, next time
3903       }
3904     }
3905 
3906     // Collect the store and move on:
3907     st->set_req(MemNode::Memory, inits);
3908     inits = st;                 // put it on the linearized chain
3909     set_req(i, zmem);           // unhook from previous position
3910 
3911     if (zeroes_done == st_off)
3912       zeroes_done = next_init_off;
3913 
3914     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
3915 
3916     #ifdef ASSERT
3917     // Various order invariants.  Weaker than stores_are_sane because
3918     // a large constant tile can be filled in by smaller non-constant stores.
3919     assert(st_off >= last_init_off, "inits do not reverse");
3920     last_init_off = st_off;
3921     const Type* val = NULL;


< prev index next >