1 /*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
186 // Otherwise skip it (the call updated 'result' value).
187 } else if (result->is_MergeMem()) {
188 result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
189 }
190 }
191 return result;
192 }
193
194 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
195 const TypeOopPtr* t_oop = t_adr->isa_oopptr();
196 if (t_oop == NULL)
197 return mchain; // don't try to optimize non-oop types
198 Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
199 bool is_instance = t_oop->is_known_instance_field();
200 PhaseIterGVN *igvn = phase->is_IterGVN();
201 if (is_instance && igvn != NULL && result->is_Phi()) {
202 PhiNode *mphi = result->as_Phi();
203 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
204 const TypePtr *t = mphi->adr_type();
205 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
206 t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
207 t->is_oopptr()->cast_to_exactness(true)
208 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
209 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) {
210 // clone the Phi with our address type
211 result = mphi->split_out_instance(t_adr, igvn);
212 } else {
213 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
214 }
215 }
216 return result;
217 }
218
219 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
220 uint alias_idx = phase->C->get_alias_index(tp);
221 Node *mem = mmem;
222 #ifdef ASSERT
223 {
224 // Check that current type is consistent with the alias index used during graph construction
225 assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
226 bool consistent = adr_check == NULL || adr_check->empty() ||
227 phase->C->must_alias(adr_check, alias_idx );
228 // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
229 if( !consistent && adr_check != NULL && !adr_check->empty() &&
302 }
303 }
304 // Ignore if memory is dead, or self-loop
305 Node *mem = in(MemNode::Memory);
306 if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
307 assert(mem != this, "dead loop in MemNode::Ideal");
308
309 if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
310 // This memory slice may be dead.
311 // Delay this mem node transformation until the memory is processed.
312 phase->is_IterGVN()->_worklist.push(this);
313 return NodeSentinel; // caller will return NULL
314 }
315
316 Node *address = in(MemNode::Address);
317 const Type *t_adr = phase->type(address);
318 if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL
319
320 if (can_reshape && igvn != NULL &&
321 (igvn->_worklist.member(address) ||
322 igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) {
323 // The address's base and type may change when the address is processed.
324 // Delay this mem node transformation until the address is processed.
325 phase->is_IterGVN()->_worklist.push(this);
326 return NodeSentinel; // caller will return NULL
327 }
328
329 // Do NOT remove or optimize the next lines: ensure a new alias index
330 // is allocated for an oop pointer type before Escape Analysis.
331 // Note: C++ will not remove it since the call has side effect.
332 if (t_adr->isa_oopptr()) {
333 int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
334 }
335
336 Node* base = NULL;
337 if (address->is_AddP()) {
338 base = address->in(AddPNode::Base);
339 }
340 if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
341 !t_adr->isa_rawptr()) {
342 // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
802 case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
803 case T_BYTE: load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
804 case T_INT: load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
805 case T_CHAR: load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
806 case T_SHORT: load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
807 case T_LONG: load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
808 case T_FLOAT: load = new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
809 case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
810 case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); break;
811 case T_OBJECT:
812 #ifdef _LP64
813 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
814 load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
815 } else
816 #endif
817 {
818 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
819 load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
820 }
821 break;
822 }
823 assert(load != NULL, "LoadNode should have been created");
824 if (unaligned) {
825 load->set_unaligned_access();
826 }
827 if (mismatched) {
828 load->set_mismatched_access();
829 }
830 if (load->Opcode() == Op_LoadN) {
831 Node* ld = gvn.transform(load);
832 return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
833 }
834
835 return load;
836 }
837
838 LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
839 ControlDependency control_dependency, bool unaligned, bool mismatched) {
840 bool require_atomic = true;
841 LoadLNode* load = new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
1184 return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
1185 raw_adr_type(), rt, bt, _mo, _control_dependency,
1186 is_unaligned_access(), is_mismatched_access());
1187 }
1188
1189 // We're loading from an object which has autobox behaviour.
1190 // If this object is result of a valueOf call we'll have a phi
1191 // merging a newly allocated object and a load from the cache.
1192 // We want to replace this load with the original incoming
1193 // argument to the valueOf call.
1194 Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
1195 assert(phase->C->eliminate_boxing(), "sanity");
1196 intptr_t ignore = 0;
1197 Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1198 if ((base == NULL) || base->is_Phi()) {
1199 // Push the loads from the phi that comes from valueOf up
1200 // through it to allow elimination of the loads and the recovery
1201 // of the original value. It is done in split_through_phi().
1202 return NULL;
1203 } else if (base->is_Load() ||
1204 base->is_DecodeN() && base->in(1)->is_Load()) {
1205 // Eliminate the load of boxed value for integer types from the cache
1206 // array by deriving the value from the index into the array.
1207 // Capture the offset of the load and then reverse the computation.
1208
1209 // Get LoadN node which loads a boxing object from 'cache' array.
1210 if (base->is_DecodeN()) {
1211 base = base->in(1);
1212 }
1213 if (!base->in(Address)->is_AddP()) {
1214 return NULL; // Complex address
1215 }
1216 AddPNode* address = base->in(Address)->as_AddP();
1217 Node* cache_base = address->in(AddPNode::Base);
1218 if ((cache_base != NULL) && cache_base->is_DecodeN()) {
1219 // Get ConP node which is static 'cache' field.
1220 cache_base = cache_base->in(1);
1221 }
1222 if ((cache_base != NULL) && cache_base->is_Con()) {
1223 const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr();
1224 if ((base_type != NULL) && base_type->is_autobox_cache()) {
1225 Node* elements[4];
1226 int shift = exact_log2(type2aelembytes(T_OBJECT));
1227 int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
1228 if ((count > 0) && elements[0]->is_Con() &&
1229 ((count == 1) ||
1230 (count == 2) && elements[1]->Opcode() == Op_LShiftX &&
1231 elements[1]->in(2) == phase->intcon(shift))) {
1232 ciObjArray* array = base_type->const_oop()->as_obj_array();
1233 // Fetch the box object cache[0] at the base of the array and get its value
1234 ciInstance* box = array->obj_at(0)->as_instance();
1235 ciInstanceKlass* ik = box->klass()->as_instance_klass();
1236 assert(ik->is_box_klass(), "sanity");
1237 assert(ik->nof_nonstatic_fields() == 1, "change following code");
1238 if (ik->nof_nonstatic_fields() == 1) {
1239 // This should be true nonstatic_field_at requires calling
1240 // nof_nonstatic_fields so check it anyway
1241 ciConstant c = box->field_value(ik->nonstatic_field_at(0));
1242 BasicType bt = c.basic_type();
1243 // Only integer types have boxing cache.
1244 assert(bt == T_BOOLEAN || bt == T_CHAR ||
1245 bt == T_BYTE || bt == T_SHORT ||
1246 bt == T_INT || bt == T_LONG, "wrong type = %s", type2name(bt));
1247 jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
1248 if (cache_low != (int)cache_low) {
1249 return NULL; // should not happen since cache is array indexed by value
1250 }
1251 jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
2345 case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2346 case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2347 case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2348 case T_METADATA:
2349 case T_ADDRESS:
2350 case T_OBJECT:
2351 #ifdef _LP64
2352 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2353 val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2354 return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2355 } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2356 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2357 adr->bottom_type()->isa_rawptr())) {
2358 val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2359 return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2360 }
2361 #endif
2362 {
2363 return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2364 }
2365 }
2366 ShouldNotReachHere();
2367 return (StoreNode*)NULL;
2368 }
2369
2370 StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2371 bool require_atomic = true;
2372 return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2373 }
2374
2375 StoreDNode* StoreDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2376 bool require_atomic = true;
2377 return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2378 }
2379
2380
2381 //--------------------------bottom_type----------------------------------------
2382 const Type *StoreNode::bottom_type() const {
2383 return Type::MEMORY;
2384 }
2385
2386 //------------------------------hash-------------------------------------------
2387 uint StoreNode::hash() const {
4287 }
4288 }
4289
4290 if (new_base != old_base) {
4291 set_req(Compile::AliasIdxBot, new_base);
4292 // Don't use set_base_memory(new_base), because we need to update du.
4293 assert(base_memory() == new_base, "");
4294 progress = this;
4295 }
4296
4297 if( base_memory() == this ) {
4298 // a self cycle indicates this memory path is dead
4299 set_req(Compile::AliasIdxBot, empty_mem);
4300 }
4301
4302 // Resolve external cycles by calling Ideal on a MergeMem base_memory
4303 // Recursion must occur after the self cycle check above
4304 if( base_memory()->is_MergeMem() ) {
4305 MergeMemNode *new_mbase = base_memory()->as_MergeMem();
4306 Node *m = phase->transform(new_mbase); // Rollup any cycles
4307 if( m != NULL && (m->is_top() ||
4308 m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem) ) {
4309 // propagate rollup of dead cycle to self
4310 set_req(Compile::AliasIdxBot, empty_mem);
4311 }
4312 }
4313
4314 if( base_memory() == empty_mem ) {
4315 progress = this;
4316 // Cut inputs during Parse phase only.
4317 // During Optimize phase a dead MergeMem node will be subsumed by Top.
4318 if( !can_reshape ) {
4319 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4320 if( in(i) != empty_mem ) { set_req(i, empty_mem); }
4321 }
4322 }
4323 }
4324
4325 if( !progress && base_memory()->is_Phi() && can_reshape ) {
4326 // Check if PhiNode::Ideal's "Split phis through memory merges"
4327 // transform should be attempted. Look for this->phi->this cycle.
4328 uint merge_width = req();
|
1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
186 // Otherwise skip it (the call updated 'result' value).
187 } else if (result->is_MergeMem()) {
188 result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
189 }
190 }
191 return result;
192 }
193
194 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
195 const TypeOopPtr* t_oop = t_adr->isa_oopptr();
196 if (t_oop == NULL)
197 return mchain; // don't try to optimize non-oop types
198 Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
199 bool is_instance = t_oop->is_known_instance_field();
200 PhaseIterGVN *igvn = phase->is_IterGVN();
201 if (is_instance && igvn != NULL && result->is_Phi()) {
202 PhiNode *mphi = result->as_Phi();
203 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
204 const TypePtr *t = mphi->adr_type();
205 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
206 (t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
207 t->is_oopptr()->cast_to_exactness(true)
208 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
209 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)) {
210 // clone the Phi with our address type
211 result = mphi->split_out_instance(t_adr, igvn);
212 } else {
213 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
214 }
215 }
216 return result;
217 }
218
219 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
220 uint alias_idx = phase->C->get_alias_index(tp);
221 Node *mem = mmem;
222 #ifdef ASSERT
223 {
224 // Check that current type is consistent with the alias index used during graph construction
225 assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
226 bool consistent = adr_check == NULL || adr_check->empty() ||
227 phase->C->must_alias(adr_check, alias_idx );
228 // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
229 if( !consistent && adr_check != NULL && !adr_check->empty() &&
302 }
303 }
304 // Ignore if memory is dead, or self-loop
305 Node *mem = in(MemNode::Memory);
306 if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
307 assert(mem != this, "dead loop in MemNode::Ideal");
308
309 if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
310 // This memory slice may be dead.
311 // Delay this mem node transformation until the memory is processed.
312 phase->is_IterGVN()->_worklist.push(this);
313 return NodeSentinel; // caller will return NULL
314 }
315
316 Node *address = in(MemNode::Address);
317 const Type *t_adr = phase->type(address);
318 if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL
319
320 if (can_reshape && igvn != NULL &&
321 (igvn->_worklist.member(address) ||
322 (igvn->_worklist.size() > 0 && t_adr != adr_type())) ) {
323 // The address's base and type may change when the address is processed.
324 // Delay this mem node transformation until the address is processed.
325 phase->is_IterGVN()->_worklist.push(this);
326 return NodeSentinel; // caller will return NULL
327 }
328
329 // Do NOT remove or optimize the next lines: ensure a new alias index
330 // is allocated for an oop pointer type before Escape Analysis.
331 // Note: C++ will not remove it since the call has side effect.
332 if (t_adr->isa_oopptr()) {
333 int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
334 }
335
336 Node* base = NULL;
337 if (address->is_AddP()) {
338 base = address->in(AddPNode::Base);
339 }
340 if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
341 !t_adr->isa_rawptr()) {
342 // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
802 case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
803 case T_BYTE: load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
804 case T_INT: load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
805 case T_CHAR: load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
806 case T_SHORT: load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
807 case T_LONG: load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
808 case T_FLOAT: load = new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
809 case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
810 case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); break;
811 case T_OBJECT:
812 #ifdef _LP64
813 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
814 load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
815 } else
816 #endif
817 {
818 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
819 load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
820 }
821 break;
822 default:
823 // ShouldNotReachHere(); ???
824 break;
825 }
826 assert(load != NULL, "LoadNode should have been created");
827 if (unaligned) {
828 load->set_unaligned_access();
829 }
830 if (mismatched) {
831 load->set_mismatched_access();
832 }
833 if (load->Opcode() == Op_LoadN) {
834 Node* ld = gvn.transform(load);
835 return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
836 }
837
838 return load;
839 }
840
841 LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
842 ControlDependency control_dependency, bool unaligned, bool mismatched) {
843 bool require_atomic = true;
844 LoadLNode* load = new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
1187 return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
1188 raw_adr_type(), rt, bt, _mo, _control_dependency,
1189 is_unaligned_access(), is_mismatched_access());
1190 }
1191
1192 // We're loading from an object which has autobox behaviour.
1193 // If this object is result of a valueOf call we'll have a phi
1194 // merging a newly allocated object and a load from the cache.
1195 // We want to replace this load with the original incoming
1196 // argument to the valueOf call.
1197 Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
1198 assert(phase->C->eliminate_boxing(), "sanity");
1199 intptr_t ignore = 0;
1200 Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1201 if ((base == NULL) || base->is_Phi()) {
1202 // Push the loads from the phi that comes from valueOf up
1203 // through it to allow elimination of the loads and the recovery
1204 // of the original value. It is done in split_through_phi().
1205 return NULL;
1206 } else if (base->is_Load() ||
1207 (base->is_DecodeN() && base->in(1)->is_Load())) {
1208 // Eliminate the load of boxed value for integer types from the cache
1209 // array by deriving the value from the index into the array.
1210 // Capture the offset of the load and then reverse the computation.
1211
1212 // Get LoadN node which loads a boxing object from 'cache' array.
1213 if (base->is_DecodeN()) {
1214 base = base->in(1);
1215 }
1216 if (!base->in(Address)->is_AddP()) {
1217 return NULL; // Complex address
1218 }
1219 AddPNode* address = base->in(Address)->as_AddP();
1220 Node* cache_base = address->in(AddPNode::Base);
1221 if ((cache_base != NULL) && cache_base->is_DecodeN()) {
1222 // Get ConP node which is static 'cache' field.
1223 cache_base = cache_base->in(1);
1224 }
1225 if ((cache_base != NULL) && cache_base->is_Con()) {
1226 const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr();
1227 if ((base_type != NULL) && base_type->is_autobox_cache()) {
1228 Node* elements[4];
1229 int shift = exact_log2(type2aelembytes(T_OBJECT));
1230 int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
1231 if (count > 0 && elements[0]->is_Con() &&
1232 (count == 1 ||
1233 (count == 2 && elements[1]->Opcode() == Op_LShiftX &&
1234 elements[1]->in(2) == phase->intcon(shift)))) {
1235 ciObjArray* array = base_type->const_oop()->as_obj_array();
1236 // Fetch the box object cache[0] at the base of the array and get its value
1237 ciInstance* box = array->obj_at(0)->as_instance();
1238 ciInstanceKlass* ik = box->klass()->as_instance_klass();
1239 assert(ik->is_box_klass(), "sanity");
1240 assert(ik->nof_nonstatic_fields() == 1, "change following code");
1241 if (ik->nof_nonstatic_fields() == 1) {
1242 // This should be true nonstatic_field_at requires calling
1243 // nof_nonstatic_fields so check it anyway
1244 ciConstant c = box->field_value(ik->nonstatic_field_at(0));
1245 BasicType bt = c.basic_type();
1246 // Only integer types have boxing cache.
1247 assert(bt == T_BOOLEAN || bt == T_CHAR ||
1248 bt == T_BYTE || bt == T_SHORT ||
1249 bt == T_INT || bt == T_LONG, "wrong type = %s", type2name(bt));
1250 jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
1251 if (cache_low != (int)cache_low) {
1252 return NULL; // should not happen since cache is array indexed by value
1253 }
1254 jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
2348 case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2349 case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2350 case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2351 case T_METADATA:
2352 case T_ADDRESS:
2353 case T_OBJECT:
2354 #ifdef _LP64
2355 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2356 val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2357 return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2358 } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2359 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2360 adr->bottom_type()->isa_rawptr())) {
2361 val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2362 return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2363 }
2364 #endif
2365 {
2366 return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2367 }
2368 default:
2369 ShouldNotReachHere();
2370 return (StoreNode*)NULL;
2371 }
2372 }
2373
2374 StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2375 bool require_atomic = true;
2376 return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2377 }
2378
2379 StoreDNode* StoreDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2380 bool require_atomic = true;
2381 return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2382 }
2383
2384
2385 //--------------------------bottom_type----------------------------------------
2386 const Type *StoreNode::bottom_type() const {
2387 return Type::MEMORY;
2388 }
2389
2390 //------------------------------hash-------------------------------------------
2391 uint StoreNode::hash() const {
4291 }
4292 }
4293
4294 if (new_base != old_base) {
4295 set_req(Compile::AliasIdxBot, new_base);
4296 // Don't use set_base_memory(new_base), because we need to update du.
4297 assert(base_memory() == new_base, "");
4298 progress = this;
4299 }
4300
4301 if( base_memory() == this ) {
4302 // a self cycle indicates this memory path is dead
4303 set_req(Compile::AliasIdxBot, empty_mem);
4304 }
4305
4306 // Resolve external cycles by calling Ideal on a MergeMem base_memory
4307 // Recursion must occur after the self cycle check above
4308 if( base_memory()->is_MergeMem() ) {
4309 MergeMemNode *new_mbase = base_memory()->as_MergeMem();
4310 Node *m = phase->transform(new_mbase); // Rollup any cycles
4311 if( m != NULL &&
4312 (m->is_top() ||
4313 (m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem)) ) {
4314 // propagate rollup of dead cycle to self
4315 set_req(Compile::AliasIdxBot, empty_mem);
4316 }
4317 }
4318
4319 if( base_memory() == empty_mem ) {
4320 progress = this;
4321 // Cut inputs during Parse phase only.
4322 // During Optimize phase a dead MergeMem node will be subsumed by Top.
4323 if( !can_reshape ) {
4324 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4325 if( in(i) != empty_mem ) { set_req(i, empty_mem); }
4326 }
4327 }
4328 }
4329
4330 if( !progress && base_memory()->is_Phi() && can_reshape ) {
4331 // Check if PhiNode::Ideal's "Split phis through memory merges"
4332 // transform should be attempted. Look for this->phi->this cycle.
4333 uint merge_width = req();
|