3833
3834 remove_extra_zeroes();
3835
3836 if (ReduceFieldZeroing || ReduceBulkZeroing)
3837 // reduce instruction count for common initialization patterns
3838 coalesce_subword_stores(header_size, size_in_bytes, phase);
3839
3840 Node* zmem = zero_memory(); // initially zero memory state
3841 Node* inits = zmem; // accumulating a linearized chain of inits
3842 #ifdef ASSERT
3843 intptr_t first_offset = allocation()->minimum_header_size();
3844 intptr_t last_init_off = first_offset; // previous init offset
3845 intptr_t last_init_end = first_offset; // previous init offset+size
3846 intptr_t last_tile_end = first_offset; // previous tile offset+size
3847 #endif
3848 intptr_t zeroes_done = header_size;
3849
3850 bool do_zeroing = true; // we might give up if inits are very sparse
3851 int big_init_gaps = 0; // how many large gaps have we seen?
3852
3853 if (ZeroTLAB) do_zeroing = false;
3854 if (!ReduceFieldZeroing && !ReduceBulkZeroing) do_zeroing = false;
3855
3856 for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
3857 Node* st = in(i);
3858 intptr_t st_off = get_store_offset(st, phase);
3859 if (st_off < 0)
3860 break; // unknown junk in the inits
3861 if (st->in(MemNode::Memory) != zmem)
3862 break; // complicated store chains somehow in list
3863
3864 int st_size = st->as_Store()->memory_size();
3865 intptr_t next_init_off = st_off + st_size;
3866
3867 if (do_zeroing && zeroes_done < next_init_off) {
3868 // See if this store needs a zero before it or under it.
3869 intptr_t zeroes_needed = st_off;
3870
3871 if (st_size < BytesPerInt) {
3872 // Look for subword stores which only partially initialize words.
3873 // If we find some, we must lay down some word-level zeroes first,
3934 last_init_off = st_off;
3935 const Type* val = NULL;
3936 if (st_size >= BytesPerInt &&
3937 (val = phase->type(st->in(MemNode::ValueIn)))->singleton() &&
3938 (int)val->basic_type() < (int)T_OBJECT) {
3939 assert(st_off >= last_tile_end, "tiles do not overlap");
3940 assert(st_off >= last_init_end, "tiles do not overwrite inits");
3941 last_tile_end = MAX2(last_tile_end, next_init_off);
3942 } else {
3943 intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong);
3944 assert(st_tile_end >= last_tile_end, "inits stay with tiles");
3945 assert(st_off >= last_init_end, "inits do not overlap");
3946 last_init_end = next_init_off; // it's a non-tile
3947 }
3948 #endif //ASSERT
3949 }
3950
3951 remove_extra_zeroes(); // clear out all the zmems left over
3952 add_req(inits);
3953
3954 if (!ZeroTLAB) {
3955 // If anything remains to be zeroed, zero it all now.
3956 zeroes_done = align_size_down(zeroes_done, BytesPerInt);
3957 // if it is the last unused 4 bytes of an instance, forget about it
3958 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
3959 if (zeroes_done + BytesPerLong >= size_limit) {
3960 assert(allocation() != NULL, "");
3961 if (allocation()->Opcode() == Op_Allocate) {
3962 Node* klass_node = allocation()->in(AllocateNode::KlassNode);
3963 ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
3964 if (zeroes_done == k->layout_helper())
3965 zeroes_done = size_limit;
3966 }
3967 }
3968 if (zeroes_done < size_limit) {
3969 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
3970 zeroes_done, size_in_bytes, phase);
3971 }
3972 }
3973
3974 set_complete(phase);
|
3833
3834 remove_extra_zeroes();
3835
3836 if (ReduceFieldZeroing || ReduceBulkZeroing)
3837 // reduce instruction count for common initialization patterns
3838 coalesce_subword_stores(header_size, size_in_bytes, phase);
3839
3840 Node* zmem = zero_memory(); // initially zero memory state
3841 Node* inits = zmem; // accumulating a linearized chain of inits
3842 #ifdef ASSERT
3843 intptr_t first_offset = allocation()->minimum_header_size();
3844 intptr_t last_init_off = first_offset; // previous init offset
3845 intptr_t last_init_end = first_offset; // previous init offset+size
3846 intptr_t last_tile_end = first_offset; // previous tile offset+size
3847 #endif
3848 intptr_t zeroes_done = header_size;
3849
3850 bool do_zeroing = true; // we might give up if inits are very sparse
3851 int big_init_gaps = 0; // how many large gaps have we seen?
3852
3853 if (UseTLAB && ZeroTLAB) do_zeroing = false;
3854 if (!ReduceFieldZeroing && !ReduceBulkZeroing) do_zeroing = false;
3855
3856 for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
3857 Node* st = in(i);
3858 intptr_t st_off = get_store_offset(st, phase);
3859 if (st_off < 0)
3860 break; // unknown junk in the inits
3861 if (st->in(MemNode::Memory) != zmem)
3862 break; // complicated store chains somehow in list
3863
3864 int st_size = st->as_Store()->memory_size();
3865 intptr_t next_init_off = st_off + st_size;
3866
3867 if (do_zeroing && zeroes_done < next_init_off) {
3868 // See if this store needs a zero before it or under it.
3869 intptr_t zeroes_needed = st_off;
3870
3871 if (st_size < BytesPerInt) {
3872 // Look for subword stores which only partially initialize words.
3873 // If we find some, we must lay down some word-level zeroes first,
3934 last_init_off = st_off;
3935 const Type* val = NULL;
3936 if (st_size >= BytesPerInt &&
3937 (val = phase->type(st->in(MemNode::ValueIn)))->singleton() &&
3938 (int)val->basic_type() < (int)T_OBJECT) {
3939 assert(st_off >= last_tile_end, "tiles do not overlap");
3940 assert(st_off >= last_init_end, "tiles do not overwrite inits");
3941 last_tile_end = MAX2(last_tile_end, next_init_off);
3942 } else {
3943 intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong);
3944 assert(st_tile_end >= last_tile_end, "inits stay with tiles");
3945 assert(st_off >= last_init_end, "inits do not overlap");
3946 last_init_end = next_init_off; // it's a non-tile
3947 }
3948 #endif //ASSERT
3949 }
3950
3951 remove_extra_zeroes(); // clear out all the zmems left over
3952 add_req(inits);
3953
3954 if (!(UseTLAB && ZeroTLAB)) {
3955 // If anything remains to be zeroed, zero it all now.
3956 zeroes_done = align_size_down(zeroes_done, BytesPerInt);
3957 // if it is the last unused 4 bytes of an instance, forget about it
3958 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
3959 if (zeroes_done + BytesPerLong >= size_limit) {
3960 assert(allocation() != NULL, "");
3961 if (allocation()->Opcode() == Op_Allocate) {
3962 Node* klass_node = allocation()->in(AllocateNode::KlassNode);
3963 ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
3964 if (zeroes_done == k->layout_helper())
3965 zeroes_done = size_limit;
3966 }
3967 }
3968 if (zeroes_done < size_limit) {
3969 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
3970 zeroes_done, size_in_bytes, phase);
3971 }
3972 }
3973
3974 set_complete(phase);
|