< prev index next >

src/share/vm/opto/memnode.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 201,214 **** if (is_instance && igvn != NULL && result->is_Phi()) { PhiNode *mphi = result->as_Phi(); assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); const TypePtr *t = mphi->adr_type(); if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || ! t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ! ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) { // clone the Phi with our address type result = mphi->split_out_instance(t_adr, igvn); } else { assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); } --- 201,214 ---- if (is_instance && igvn != NULL && result->is_Phi()) { PhiNode *mphi = result->as_Phi(); assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); const TypePtr *t = mphi->adr_type(); if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || ! (t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && t->is_oopptr()->cast_to_exactness(true) ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) ! ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop)) { // clone the Phi with our address type result = mphi->split_out_instance(t_adr, igvn); } else { assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); }
*** 317,327 **** const Type *t_adr = phase->type(address); if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL if (can_reshape && igvn != NULL && (igvn->_worklist.member(address) || ! igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) { // The address's base and type may change when the address is processed. // Delay this mem node transformation until the address is processed. phase->is_IterGVN()->_worklist.push(this); return NodeSentinel; // caller will return NULL } --- 317,327 ---- const Type *t_adr = phase->type(address); if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL if (can_reshape && igvn != NULL && (igvn->_worklist.member(address) || ! (igvn->_worklist.size() > 0 && t_adr != adr_type())) ) { // The address's base and type may change when the address is processed. // Delay this mem node transformation until the address is processed. phase->is_IterGVN()->_worklist.push(this); return NodeSentinel; // caller will return NULL }
*** 817,826 **** --- 817,829 ---- { assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); } break; + default: + // ShouldNotReachHere(); ??? + break; } assert(load != NULL, "LoadNode should have been created"); if (unaligned) { load->set_unaligned_access(); }
*** 1199,1209 **** // Push the loads from the phi that comes from valueOf up // through it to allow elimination of the loads and the recovery // of the original value. It is done in split_through_phi(). return NULL; } else if (base->is_Load() || ! base->is_DecodeN() && base->in(1)->is_Load()) { // Eliminate the load of boxed value for integer types from the cache // array by deriving the value from the index into the array. // Capture the offset of the load and then reverse the computation. // Get LoadN node which loads a boxing object from 'cache' array. --- 1202,1212 ---- // Push the loads from the phi that comes from valueOf up // through it to allow elimination of the loads and the recovery // of the original value. It is done in split_through_phi(). return NULL; } else if (base->is_Load() || ! (base->is_DecodeN() && base->in(1)->is_Load())) { // Eliminate the load of boxed value for integer types from the cache // array by deriving the value from the index into the array. // Capture the offset of the load and then reverse the computation. // Get LoadN node which loads a boxing object from 'cache' array.
*** 1223,1236 **** const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr(); if ((base_type != NULL) && base_type->is_autobox_cache()) { Node* elements[4]; int shift = exact_log2(type2aelembytes(T_OBJECT)); int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); ! if ((count > 0) && elements[0]->is_Con() && ! ((count == 1) || ! (count == 2) && elements[1]->Opcode() == Op_LShiftX && ! elements[1]->in(2) == phase->intcon(shift))) { ciObjArray* array = base_type->const_oop()->as_obj_array(); // Fetch the box object cache[0] at the base of the array and get its value ciInstance* box = array->obj_at(0)->as_instance(); ciInstanceKlass* ik = box->klass()->as_instance_klass(); assert(ik->is_box_klass(), "sanity"); --- 1226,1239 ---- const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr(); if ((base_type != NULL) && base_type->is_autobox_cache()) { Node* elements[4]; int shift = exact_log2(type2aelembytes(T_OBJECT)); int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); ! if (count > 0 && elements[0]->is_Con() && ! (count == 1 || ! (count == 2 && elements[1]->Opcode() == Op_LShiftX && ! elements[1]->in(2) == phase->intcon(shift)))) { ciObjArray* array = base_type->const_oop()->as_obj_array(); // Fetch the box object cache[0] at the base of the array and get its value ciInstance* box = array->obj_at(0)->as_instance(); ciInstanceKlass* ik = box->klass()->as_instance_klass(); assert(ik->is_box_klass(), "sanity");
*** 2360,2372 **** } #endif { return new StorePNode(ctl, mem, adr, adr_type, val, mo); } ! } ShouldNotReachHere(); return (StoreNode*)NULL; } StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) { bool require_atomic = true; return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic); --- 2363,2376 ---- } #endif { return new StorePNode(ctl, mem, adr, adr_type, val, mo); } ! default: ShouldNotReachHere(); return (StoreNode*)NULL; + } } StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) { bool require_atomic = true; return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
*** 4302,4313 **** // Resolve external cycles by calling Ideal on a MergeMem base_memory // Recursion must occur after the self cycle check above if( base_memory()->is_MergeMem() ) { MergeMemNode *new_mbase = base_memory()->as_MergeMem(); Node *m = phase->transform(new_mbase); // Rollup any cycles ! if( m != NULL && (m->is_top() || ! m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem) ) { // propagate rollup of dead cycle to self set_req(Compile::AliasIdxBot, empty_mem); } } --- 4306,4318 ---- // Resolve external cycles by calling Ideal on a MergeMem base_memory // Recursion must occur after the self cycle check above if( base_memory()->is_MergeMem() ) { MergeMemNode *new_mbase = base_memory()->as_MergeMem(); Node *m = phase->transform(new_mbase); // Rollup any cycles ! if( m != NULL && ! (m->is_top() || ! (m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem)) ) { // propagate rollup of dead cycle to self set_req(Compile::AliasIdxBot, empty_mem); } }
< prev index next >