2165 // keep dependencies with Object[] array accesses (that could be 2166 // to a flattened array) correct. We're done with parsing so we 2167 // now know all flattened array accesses in this compile 2168 // unit. Let's move flattened array accesses to their own slice, 2169 // one per element field. This should help memory access 2170 // optimizations. 2171 ResourceMark rm; 2172 Unique_Node_List wq; 2173 wq.push(root()); 2174 2175 Node_List mergememnodes; 2176 Node_List memnodes; 2177 2178 // Alias index currently shared by all flattened memory accesses 2179 int index = get_alias_index(TypeAryPtr::VALUES); 2180 2181 // Find MergeMem nodes and flattened array accesses 2182 for (uint i = 0; i < wq.size(); i++) { 2183 Node* n = wq.at(i); 2184 if (n->is_Mem()) { 2185 const TypePtr* adr_type = get_adr_type(get_alias_index(n->adr_type())); 2186 if (adr_type == TypeAryPtr::VALUES) { 2187 memnodes.push(n); 2188 } 2189 } else if (n->is_MergeMem()) { 2190 MergeMemNode* mm = n->as_MergeMem(); 2191 if (mm->memory_at(index) != mm->base_memory()) { 2192 mergememnodes.push(n); 2193 } 2194 } 2195 for (uint j = 0; j < n->req(); j++) { 2196 Node* m = n->in(j); 2197 if (m != NULL) { 2198 wq.push(m); 2199 } 2200 } 2201 } 2202 2203 if (memnodes.size() > 0) { 2204 _flattened_accesses_share_alias = false; 2205 2206 // We are going to change the slice for the flattened array 2207 // accesses so we need to clear the cache entries that refer to 2208 // them. 2209 for (uint i = 0; i < AliasCacheSize; i++) { 2210 AliasCacheEntry* ace = &_alias_cache[i]; 2211 if (ace->_adr_type != NULL && 2212 ace->_adr_type->isa_aryptr() && 2213 ace->_adr_type->is_aryptr()->elem()->isa_valuetype()) { 2214 ace->_adr_type = NULL; 2215 ace->_index = 0; 2216 } 2217 } 2218 2219 // Find what aliases we are going to add 2220 int start_alias = num_alias_types()-1; 2221 int stop_alias = 0; 2222 2223 for (uint i = 0; i < memnodes.size(); i++) { 2224 Node* m = memnodes.at(i); 2225 const TypePtr* adr_type = m->adr_type(); 2226 #ifdef ASSERT 2227 m->as_Mem()->set_adr_type(adr_type); 2228 #endif 2229 int idx = get_alias_index(adr_type); 2230 start_alias = MIN2(start_alias, idx); 2231 stop_alias = MAX2(stop_alias, idx); 2232 } 2233 2234 assert(stop_alias >= start_alias, "should have expanded aliases"); 2235 2236 Node_Stack stack(0); 2237 #ifdef ASSERT 2238 VectorSet seen(Thread::current()->resource_area()); 2239 #endif 2240 // Now let's fix the memory graph so each flattened array access 2241 // is moved to the right slice. Start from the MergeMem nodes. 2242 uint last = unique(); 2243 for (uint i = 0; i < mergememnodes.size(); i++) { 2244 MergeMemNode* current = mergememnodes.at(i)->as_MergeMem(); 2245 Node* n = current->memory_at(index); 2246 MergeMemNode* mm = NULL; 2247 do { 2248 // Follow memory edges through memory accesses, phis and | 2165 // keep dependencies with Object[] array accesses (that could be 2166 // to a flattened array) correct. We're done with parsing so we 2167 // now know all flattened array accesses in this compile 2168 // unit. Let's move flattened array accesses to their own slice, 2169 // one per element field. This should help memory access 2170 // optimizations. 2171 ResourceMark rm; 2172 Unique_Node_List wq; 2173 wq.push(root()); 2174 2175 Node_List mergememnodes; 2176 Node_List memnodes; 2177 2178 // Alias index currently shared by all flattened memory accesses 2179 int index = get_alias_index(TypeAryPtr::VALUES); 2180 2181 // Find MergeMem nodes and flattened array accesses 2182 for (uint i = 0; i < wq.size(); i++) { 2183 Node* n = wq.at(i); 2184 if (n->is_Mem()) { 2185 const TypePtr* adr_type = NULL; 2186 if (n->Opcode() == Op_StoreCM) { 2187 adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type())); 2188 } else { 2189 adr_type = get_adr_type(get_alias_index(n->adr_type())); 2190 } 2191 if (adr_type == TypeAryPtr::VALUES) { 2192 memnodes.push(n); 2193 } 2194 } else if (n->is_MergeMem()) { 2195 MergeMemNode* mm = n->as_MergeMem(); 2196 if (mm->memory_at(index) != mm->base_memory()) { 2197 mergememnodes.push(n); 2198 } 2199 } 2200 for (uint j = 0; j < n->req(); j++) { 2201 Node* m = n->in(j); 2202 if (m != NULL) { 2203 wq.push(m); 2204 } 2205 } 2206 } 2207 2208 if (memnodes.size() > 0) { 2209 _flattened_accesses_share_alias = false; 2210 2211 // We are going to change the slice for the flattened array 2212 // accesses so we need to clear the cache entries that refer to 2213 // them. 2214 for (uint i = 0; i < AliasCacheSize; i++) { 2215 AliasCacheEntry* ace = &_alias_cache[i]; 2216 if (ace->_adr_type != NULL && 2217 ace->_adr_type->isa_aryptr() && 2218 ace->_adr_type->is_aryptr()->elem()->isa_valuetype()) { 2219 ace->_adr_type = NULL; 2220 ace->_index = 0; 2221 } 2222 } 2223 2224 // Find what aliases we are going to add 2225 int start_alias = num_alias_types()-1; 2226 int stop_alias = 0; 2227 2228 for (uint i = 0; i < memnodes.size(); i++) { 2229 Node* m = memnodes.at(i); 2230 const TypePtr* adr_type = NULL; 2231 if (m->Opcode() == Op_StoreCM) { 2232 adr_type = m->in(MemNode::OopStore)->adr_type(); 2233 Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address), 2234 m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore), 2235 get_alias_index(adr_type)); 2236 igvn.register_new_node_with_optimizer(clone); 2237 igvn.replace_node(m, clone); 2238 } else { 2239 adr_type = m->adr_type(); 2240 #ifdef ASSERT 2241 m->as_Mem()->set_adr_type(adr_type); 2242 #endif 2243 } 2244 int idx = get_alias_index(adr_type); 2245 start_alias = MIN2(start_alias, idx); 2246 stop_alias = MAX2(stop_alias, idx); 2247 } 2248 2249 assert(stop_alias >= start_alias, "should have expanded aliases"); 2250 2251 Node_Stack stack(0); 2252 #ifdef ASSERT 2253 VectorSet seen(Thread::current()->resource_area()); 2254 #endif 2255 // Now let's fix the memory graph so each flattened array access 2256 // is moved to the right slice. Start from the MergeMem nodes. 2257 uint last = unique(); 2258 for (uint i = 0; i < mergememnodes.size(); i++) { 2259 MergeMemNode* current = mergememnodes.at(i)->as_MergeMem(); 2260 Node* n = current->memory_at(index); 2261 MergeMemNode* mm = NULL; 2262 do { 2263 // Follow memory edges through memory accesses, phis and |