< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




1122         int shift = exact_log2(type2aelembytes(T_OBJECT));
1123         int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
1124         if ((count >  0) && elements[0]->is_Con() &&
1125             ((count == 1) ||
1126              (count == 2) && elements[1]->Opcode() == Op_LShiftX &&
1127                              elements[1]->in(2) == phase->intcon(shift))) {
1128           ciObjArray* array = base_type->const_oop()->as_obj_array();
1129           // Fetch the box object cache[0] at the base of the array and get its value
1130           ciInstance* box = array->obj_at(0)->as_instance();
1131           ciInstanceKlass* ik = box->klass()->as_instance_klass();
1132           assert(ik->is_box_klass(), "sanity");
1133           assert(ik->nof_nonstatic_fields() == 1, "change following code");
1134           if (ik->nof_nonstatic_fields() == 1) {
1135             // This should be true nonstatic_field_at requires calling
1136             // nof_nonstatic_fields so check it anyway
1137             ciConstant c = box->field_value(ik->nonstatic_field_at(0));
1138             BasicType bt = c.basic_type();
1139             // Only integer types have boxing cache.
1140             assert(bt == T_BOOLEAN || bt == T_CHAR  ||
1141                    bt == T_BYTE    || bt == T_SHORT ||
1142                    bt == T_INT     || bt == T_LONG, err_msg_res("wrong type = %s", type2name(bt)));
1143             jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
1144             if (cache_low != (int)cache_low) {
1145               return NULL; // should not happen since cache is array indexed by value
1146             }
1147             jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
1148             if (offset != (int)offset) {
1149               return NULL; // should not happen since cache is array indexed by value
1150             }
1151            // Add up all the offsets making of the address of the load
1152             Node* result = elements[0];
1153             for (int i = 1; i < count; i++) {
1154               result = phase->transform(new AddXNode(result, elements[i]));
1155             }
1156             // Remove the constant offset from the address and then
1157             result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset)));
1158             // remove the scaling of the offset to recover the original index.
1159             if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
1160               // Peel the shift off directly but wrap it in a dummy node
1161               // since Ideal can't return existing nodes
1162               result = new RShiftXNode(result->in(1), phase->intcon(0));


2377   // unsafe if I have intervening uses...  Also disallowed for StoreCM
2378   // since they must follow each StoreP operation.  Redundant StoreCMs
2379   // are eliminated just before matching in final_graph_reshape.
2380   {
2381     Node* st = mem;
2382     // If Store 'st' has more than one use, we cannot fold 'st' away.
2383     // For example, 'st' might be the final state at a conditional
2384     // return.  Or, 'st' might be used by some node which is live at
2385     // the same time 'st' is live, which might be unschedulable.  So,
2386     // require exactly ONE user until such time as we clone 'mem' for
2387     // each of 'mem's uses (thus making the exactly-1-user-rule hold
2388     // true).
2389     while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2390       // Looking at a dead closed cycle of memory?
2391       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2392       assert(Opcode() == st->Opcode() ||
2393              st->Opcode() == Op_StoreVector ||
2394              Opcode() == Op_StoreVector ||
2395              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2396              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI), // expanded ClearArrayNode
2397              err_msg_res("no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]));
2398 
2399       if (st->in(MemNode::Address)->eqv_uncast(address) &&
2400           st->as_Store()->memory_size() <= this->memory_size()) {
2401         Node* use = st->raw_out(0);
2402         phase->igvn_rehash_node_delayed(use);
2403         if (can_reshape) {
2404           use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN());
2405         } else {
2406           // It's OK to do this in the parser, since DU info is always accurate,
2407           // and the parser always refers to nodes via SafePointNode maps.
2408           use->set_req(MemNode::Memory, st->in(MemNode::Memory));
2409         }
2410         return this;
2411       }
2412       st = st->in(MemNode::Memory);
2413     }
2414   }
2415 
2416 
2417   // Capture an unaliased, unconditional, simple store into an initializer.


3272             // store node that we'd like to capture. We need to check
3273             // the uses of the MergeMemNode.
3274             mems.push(n);
3275           }
3276         } else if (n->is_Mem()) {
3277           Node* other_adr = n->in(MemNode::Address);
3278           if (other_adr == adr) {
3279             failed = true;
3280             break;
3281           } else {
3282             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
3283             if (other_t_adr != NULL) {
3284               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
3285               if (other_alias_idx == alias_idx) {
3286                 // A load from the same memory slice as the store right
3287                 // after the InitializeNode. We check the control of the
3288                 // object/array that is loaded from. If it's the same as
3289                 // the store control then we cannot capture the store.
3290                 assert(!n->is_Store(), "2 stores to same slice on same control?");
3291                 Node* base = other_adr;
3292                 assert(base->is_AddP(), err_msg_res("should be addp but is %s", base->Name()));
3293                 base = base->in(AddPNode::Base);
3294                 if (base != NULL) {
3295                   base = base->uncast();
3296                   if (base->is_Proj() && base->in(0) == alloc) {
3297                     failed = true;
3298                     break;
3299                   }
3300                 }
3301               }
3302             }
3303           }
3304         } else {
3305           failed = true;
3306           break;
3307         }
3308       }
3309     }
3310   }
3311   if (failed) {
3312     if (!can_reshape) {




1122         int shift = exact_log2(type2aelembytes(T_OBJECT));
1123         int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
1124         if ((count >  0) && elements[0]->is_Con() &&
1125             ((count == 1) ||
1126              (count == 2) && elements[1]->Opcode() == Op_LShiftX &&
1127                              elements[1]->in(2) == phase->intcon(shift))) {
1128           ciObjArray* array = base_type->const_oop()->as_obj_array();
1129           // Fetch the box object cache[0] at the base of the array and get its value
1130           ciInstance* box = array->obj_at(0)->as_instance();
1131           ciInstanceKlass* ik = box->klass()->as_instance_klass();
1132           assert(ik->is_box_klass(), "sanity");
1133           assert(ik->nof_nonstatic_fields() == 1, "change following code");
1134           if (ik->nof_nonstatic_fields() == 1) {
1135             // This should be true nonstatic_field_at requires calling
1136             // nof_nonstatic_fields so check it anyway
1137             ciConstant c = box->field_value(ik->nonstatic_field_at(0));
1138             BasicType bt = c.basic_type();
1139             // Only integer types have boxing cache.
1140             assert(bt == T_BOOLEAN || bt == T_CHAR  ||
1141                    bt == T_BYTE    || bt == T_SHORT ||
1142                    bt == T_INT     || bt == T_LONG, "wrong type = %s", type2name(bt));
1143             jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
1144             if (cache_low != (int)cache_low) {
1145               return NULL; // should not happen since cache is array indexed by value
1146             }
1147             jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
1148             if (offset != (int)offset) {
1149               return NULL; // should not happen since cache is array indexed by value
1150             }
1151            // Add up all the offsets making of the address of the load
1152             Node* result = elements[0];
1153             for (int i = 1; i < count; i++) {
1154               result = phase->transform(new AddXNode(result, elements[i]));
1155             }
1156             // Remove the constant offset from the address and then
1157             result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset)));
1158             // remove the scaling of the offset to recover the original index.
1159             if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
1160               // Peel the shift off directly but wrap it in a dummy node
1161               // since Ideal can't return existing nodes
1162               result = new RShiftXNode(result->in(1), phase->intcon(0));


2377   // unsafe if I have intervening uses...  Also disallowed for StoreCM
2378   // since they must follow each StoreP operation.  Redundant StoreCMs
2379   // are eliminated just before matching in final_graph_reshape.
2380   {
2381     Node* st = mem;
2382     // If Store 'st' has more than one use, we cannot fold 'st' away.
2383     // For example, 'st' might be the final state at a conditional
2384     // return.  Or, 'st' might be used by some node which is live at
2385     // the same time 'st' is live, which might be unschedulable.  So,
2386     // require exactly ONE user until such time as we clone 'mem' for
2387     // each of 'mem's uses (thus making the exactly-1-user-rule hold
2388     // true).
2389     while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2390       // Looking at a dead closed cycle of memory?
2391       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2392       assert(Opcode() == st->Opcode() ||
2393              st->Opcode() == Op_StoreVector ||
2394              Opcode() == Op_StoreVector ||
2395              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2396              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI), // expanded ClearArrayNode
2397              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
2398 
2399       if (st->in(MemNode::Address)->eqv_uncast(address) &&
2400           st->as_Store()->memory_size() <= this->memory_size()) {
2401         Node* use = st->raw_out(0);
2402         phase->igvn_rehash_node_delayed(use);
2403         if (can_reshape) {
2404           use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN());
2405         } else {
2406           // It's OK to do this in the parser, since DU info is always accurate,
2407           // and the parser always refers to nodes via SafePointNode maps.
2408           use->set_req(MemNode::Memory, st->in(MemNode::Memory));
2409         }
2410         return this;
2411       }
2412       st = st->in(MemNode::Memory);
2413     }
2414   }
2415 
2416 
2417   // Capture an unaliased, unconditional, simple store into an initializer.


3272             // store node that we'd like to capture. We need to check
3273             // the uses of the MergeMemNode.
3274             mems.push(n);
3275           }
3276         } else if (n->is_Mem()) {
3277           Node* other_adr = n->in(MemNode::Address);
3278           if (other_adr == adr) {
3279             failed = true;
3280             break;
3281           } else {
3282             const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
3283             if (other_t_adr != NULL) {
3284               int other_alias_idx = phase->C->get_alias_index(other_t_adr);
3285               if (other_alias_idx == alias_idx) {
3286                 // A load from the same memory slice as the store right
3287                 // after the InitializeNode. We check the control of the
3288                 // object/array that is loaded from. If it's the same as
3289                 // the store control then we cannot capture the store.
3290                 assert(!n->is_Store(), "2 stores to same slice on same control?");
3291                 Node* base = other_adr;
3292                 assert(base->is_AddP(), "should be addp but is %s", base->Name());
3293                 base = base->in(AddPNode::Base);
3294                 if (base != NULL) {
3295                   base = base->uncast();
3296                   if (base->is_Proj() && base->in(0) == alloc) {
3297                     failed = true;
3298                     break;
3299                   }
3300                 }
3301               }
3302             }
3303           }
3304         } else {
3305           failed = true;
3306           break;
3307         }
3308       }
3309     }
3310   }
3311   if (failed) {
3312     if (!can_reshape) {


< prev index next >