2316 if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2317 tkls->offset() == in_bytes(Klass::super_offset())) {
2318 ciKlass* sup = klass->as_instance_klass()->super();
2319 // The field is Klass::_super. Return its (constant) value.
2320 // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2321 return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2322 }
2323 }
2324
2325 // Bailout case
2326 return LoadNode::Value(phase);
2327 }
2328
2329 //------------------------------Identity---------------------------------------
2330 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2331 // Also feed through the klass in Allocate(...klass...)._klass.
2332 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2333 return klass_identity_common(phase);
2334 }
2335
2336 Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
2337 Node* x = LoadNode::Identity(phase);
2338 if (x != this) return x;
2339
2340 // Take apart the address into an oop and and offset.
2341 // Return 'this' if we cannot.
2342 Node* adr = in(MemNode::Address);
2343 intptr_t offset = 0;
2344 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2345 if (base == NULL) return this;
2346 const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2347 if (toop == NULL) return this;
2348
2349 // Step over potential GC barrier for OopHandle resolve
2350 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2351 if (bs->is_gc_barrier_node(base)) {
2352 base = bs->step_over_gc_barrier(base);
2353 }
2354
2355 // We can fetch the klass directly through an AllocateNode.
2563 //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2564
2565 // Since they are not commoned, do not hash them:
2566 return NO_HASH;
2567 }
2568
2569 //------------------------------Ideal------------------------------------------
2570 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2571 // When a store immediately follows a relevant allocation/initialization,
2572 // try to capture it into the initialization, or hoist it above.
2573 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2574 Node* p = MemNode::Ideal_common(phase, can_reshape);
2575 if (p) return (p == NodeSentinel) ? NULL : p;
2576
2577 Node* mem = in(MemNode::Memory);
2578 Node* address = in(MemNode::Address);
2579 // Back-to-back stores to same address? Fold em up. Generally
2580 // unsafe if I have intervening uses... Also disallowed for StoreCM
2581 // since they must follow each StoreP operation. Redundant StoreCMs
2582 // are eliminated just before matching in final_graph_reshape.
2583 {
2584 Node* st = mem;
2585 // If Store 'st' has more than one use, we cannot fold 'st' away.
2586 // For example, 'st' might be the final state at a conditional
2587 // return. Or, 'st' might be used by some node which is live at
2588 // the same time 'st' is live, which might be unschedulable. So,
2589 // require exactly ONE user until such time as we clone 'mem' for
2590 // each of 'mem's uses (thus making the exactly-1-user-rule hold
2591 // true).
2592 while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2593 // Looking at a dead closed cycle of memory?
2594 assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2595 assert(Opcode() == st->Opcode() ||
2596 st->Opcode() == Op_StoreVector ||
2597 Opcode() == Op_StoreVector ||
2598 phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2599 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
2600 (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
2601 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
2602 (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
2603 "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
|
2316 if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2317 tkls->offset() == in_bytes(Klass::super_offset())) {
2318 ciKlass* sup = klass->as_instance_klass()->super();
2319 // The field is Klass::_super. Return its (constant) value.
2320 // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2321 return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2322 }
2323 }
2324
2325 // Bailout case
2326 return LoadNode::Value(phase);
2327 }
2328
2329 //------------------------------Identity---------------------------------------
2330 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2331 // Also feed through the klass in Allocate(...klass...)._klass.
2332 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2333 return klass_identity_common(phase);
2334 }
2335
2336 const Type* GetNullFreePropertyNode::Value(PhaseGVN* phase) const {
2337 if (in(1) != NULL) {
2338 const Type* in1_t = phase->type(in(1));
2339 if (in1_t == Type::TOP) {
2340 return Type::TOP;
2341 }
2342 const TypeKlassPtr* tk = in1_t->make_ptr()->is_klassptr();
2343 ciArrayKlass* ak = tk->klass()->as_array_klass();
2344 ciKlass* elem = ak->element_klass();
2345 if (tk->klass_is_exact() || (!elem->is_java_lang_Object() && !elem->is_interface() && !elem->is_valuetype())) {
2346 int props_shift = in1_t->isa_narrowklass() ? oopDesc::narrow_storage_props_shift : oopDesc::wide_storage_props_shift;
2347 ArrayStorageProperties props = ak->storage_properties();
2348 intptr_t storage_properties = props.encode<intptr_t>(props_shift);
2349 if (in1_t->isa_narrowklass()) {
2350 return TypeInt::make((int)storage_properties);
2351 }
2352 return TypeX::make(storage_properties);
2353 }
2354 }
2355 return bottom_type();
2356 }
2357
2358 Node* GetNullFreePropertyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2359 if (!can_reshape) {
2360 return NULL;
2361 }
2362 if (in(1) != NULL && in(1)->is_Phi()) {
2363 Node* phi = in(1);
2364 Node* r = phi->in(0);
2365 Node* new_phi = new PhiNode(r, bottom_type());
2366 for (uint i = 1; i < r->req(); i++) {
2367 Node* in = phi->in(i);
2368 if (in == NULL) continue;
2369 new_phi->init_req(i, phase->transform(new GetNullFreePropertyNode(in)));
2370 }
2371 return new_phi;
2372 }
2373 return NULL;
2374 }
2375
2376 Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
2377 Node* x = LoadNode::Identity(phase);
2378 if (x != this) return x;
2379
2380 // Take apart the address into an oop and and offset.
2381 // Return 'this' if we cannot.
2382 Node* adr = in(MemNode::Address);
2383 intptr_t offset = 0;
2384 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2385 if (base == NULL) return this;
2386 const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2387 if (toop == NULL) return this;
2388
2389 // Step over potential GC barrier for OopHandle resolve
2390 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2391 if (bs->is_gc_barrier_node(base)) {
2392 base = bs->step_over_gc_barrier(base);
2393 }
2394
2395 // We can fetch the klass directly through an AllocateNode.
2603 //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2604
2605 // Since they are not commoned, do not hash them:
2606 return NO_HASH;
2607 }
2608
2609 //------------------------------Ideal------------------------------------------
2610 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2611 // When a store immediately follows a relevant allocation/initialization,
2612 // try to capture it into the initialization, or hoist it above.
2613 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2614 Node* p = MemNode::Ideal_common(phase, can_reshape);
2615 if (p) return (p == NodeSentinel) ? NULL : p;
2616
2617 Node* mem = in(MemNode::Memory);
2618 Node* address = in(MemNode::Address);
2619 // Back-to-back stores to same address? Fold em up. Generally
2620 // unsafe if I have intervening uses... Also disallowed for StoreCM
2621 // since they must follow each StoreP operation. Redundant StoreCMs
2622 // are eliminated just before matching in final_graph_reshape.
2623 if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::VALUES) {
2624 Node* st = mem;
2625 // If Store 'st' has more than one use, we cannot fold 'st' away.
2626 // For example, 'st' might be the final state at a conditional
2627 // return. Or, 'st' might be used by some node which is live at
2628 // the same time 'st' is live, which might be unschedulable. So,
2629 // require exactly ONE user until such time as we clone 'mem' for
2630 // each of 'mem's uses (thus making the exactly-1-user-rule hold
2631 // true).
2632 while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2633 // Looking at a dead closed cycle of memory?
2634 assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2635 assert(Opcode() == st->Opcode() ||
2636 st->Opcode() == Op_StoreVector ||
2637 Opcode() == Op_StoreVector ||
2638 phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2639 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
2640 (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
2641 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
2642 (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
2643 "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
|