890 if( !Verbose && !WizardMode ) {
891 // standard dump does this in Verbose and WizardMode
892 st->print(" #"); _type->dump_on(st);
893 }
894 }
895 #endif
896
897 #ifdef ASSERT
898 //----------------------------is_immutable_value-------------------------------
899 // Helper function to allow a raw load without control edge for some cases
900 bool LoadNode::is_immutable_value(Node* adr) {
901 return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() &&
902 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
903 (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) ==
904 in_bytes(JavaThread::osthread_offset())));
905 }
906 #endif
907
908 //----------------------------LoadNode::make-----------------------------------
909 // Polymorphic factory method:
910 Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
911 Compile* C = gvn.C;
912
913 // sanity check the alias category against the created node type
914 assert(!(adr_type->isa_oopptr() &&
915 adr_type->offset() == oopDesc::klass_offset_in_bytes()),
916 "use LoadKlassNode instead");
917 assert(!(adr_type->isa_aryptr() &&
918 adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
919 "use LoadRangeNode instead");
920 // Check control edge of raw loads
921 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
922 // oop will be recorded in oop map if load crosses safepoint
923 rt->isa_oopptr() || is_immutable_value(adr),
924 "raw memory operations should have control edge");
925 switch (bt) {
926 case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int() );
927 case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int() );
928 case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int() );
929 case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int() );
930 case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int() );
931 case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long() );
932 case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt );
933 case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt );
934 case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr() );
935 case T_OBJECT:
936 #ifdef _LP64
937 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
938 Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop()));
939 return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
940 } else
941 #endif
942 {
943 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
944 return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
945 }
946 }
947 ShouldNotReachHere();
948 return (LoadNode*)NULL;
949 }
950
951 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt) {
952 bool require_atomic = true;
953 return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic);
954 }
955
956
957
958
959 //------------------------------hash-------------------------------------------
960 uint LoadNode::hash() const {
961 // unroll addition of interesting fields
962 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
963 }
964
965 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
966 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
967 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
968 bool is_stable_ary = FoldStableValues &&
969 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
970 tp->isa_aryptr()->is_stable();
971
972 return (eliminate_boxing && non_volatile) || is_stable_ary;
973 }
2015 // it must be truncated. We can't delay until Ideal call since
2016 // a singleton Value is needed for split_thru_phi optimization.
2017 int con = value->get_int();
2018 return TypeInt::make((con << 16) >> 16);
2019 }
2020 return LoadNode::Value(phase);
2021 }
2022
2023 //=============================================================================
2024 //----------------------------LoadKlassNode::make------------------------------
2025 // Polymorphic factory method:
2026 Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
2027 Compile* C = gvn.C;
2028 Node *ctl = NULL;
2029 // sanity check the alias category against the created node type
2030 const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2031 assert(adr_type != NULL, "expecting TypeKlassPtr");
2032 #ifdef _LP64
2033 if (adr_type->is_ptr_to_narrowklass()) {
2034 assert(UseCompressedClassPointers, "no compressed klasses");
2035 Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
2036 return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2037 }
2038 #endif
2039 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2040 return new (C) LoadKlassNode(ctl, mem, adr, at, tk);
2041 }
2042
2043 //------------------------------Value------------------------------------------
2044 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
2045 return klass_value_common(phase);
2046 }
2047
2048 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
2049 // Either input is TOP ==> the result is TOP
2050 const Type *t1 = phase->type( in(MemNode::Memory) );
2051 if (t1 == Type::TOP) return Type::TOP;
2052 Node *adr = in(MemNode::Address);
2053 const Type *t2 = phase->type( adr );
2054 if (t2 == Type::TOP) return Type::TOP;
2055 const TypePtr *tp = t2->is_ptr();
2056 if (TypePtr::above_centerline(tp->ptr()) ||
2057 tp->ptr() == TypePtr::Null) return Type::TOP;
2058
2059 // Return a more precise klass, if possible
2060 const TypeInstPtr *tinst = tp->isa_instptr();
2330 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2331 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
2332 if (alloc != NULL) {
2333 Node* allocated_length = alloc->Ideal_length();
2334 // Do not allow make_ideal_length to allocate a CastII node.
2335 Node* len = alloc->make_ideal_length(tary, phase, false);
2336 if (allocated_length == len) {
2337 // Return allocated_length only if it would not be improved by a CastII.
2338 return allocated_length;
2339 }
2340 }
2341 }
2342
2343 return this;
2344
2345 }
2346
2347 //=============================================================================
2348 //---------------------------StoreNode::make-----------------------------------
2349 // Polymorphic factory method:
2350 StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
2351 Compile* C = gvn.C;
2352 assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2353 ctl != NULL, "raw memory operations should have control edge");
2354
2355 switch (bt) {
2356 case T_BOOLEAN:
2357 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val);
2358 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val);
2359 case T_CHAR:
2360 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val);
2361 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val);
2362 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val);
2363 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val);
2364 case T_METADATA:
2365 case T_ADDRESS:
2366 case T_OBJECT:
2367 #ifdef _LP64
2368 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2369 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
2370 return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
2371 } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2372 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2373 adr->bottom_type()->isa_rawptr())) {
2374 val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2375 return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
2376 }
2377 #endif
2378 {
2379 return new (C) StorePNode(ctl, mem, adr, adr_type, val);
2380 }
2381 }
2382 ShouldNotReachHere();
2383 return (StoreNode*)NULL;
2384 }
2385
2386 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) {
2387 bool require_atomic = true;
2388 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic);
2389 }
2390
2391
2392 //--------------------------bottom_type----------------------------------------
2393 const Type *StoreNode::bottom_type() const {
2394 return Type::MEMORY;
2395 }
2396
2397 //------------------------------hash-------------------------------------------
2398 uint StoreNode::hash() const {
2399 // unroll addition of interesting fields
2400 //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2401
2402 // Since they are not commoned, do not hash them:
2403 return NO_HASH;
2404 }
2405
2406 //------------------------------Ideal------------------------------------------
2407 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2408 // When a store immediately follows a relevant allocation/initialization,
2761 // (see jck test stmt114.stmt11402.val).
2762 if (size <= 0 || size % unit != 0) return NULL;
2763 intptr_t count = size / unit;
2764 // Length too long; use fast hardware clear
2765 if (size > Matcher::init_array_short_size) return NULL;
2766 Node *mem = in(1);
2767 if( phase->type(mem)==Type::TOP ) return NULL;
2768 Node *adr = in(3);
2769 const Type* at = phase->type(adr);
2770 if( at==Type::TOP ) return NULL;
2771 const TypePtr* atp = at->isa_ptr();
2772 // adjust atp to be the correct array element address type
2773 if (atp == NULL) atp = TypePtr::BOTTOM;
2774 else atp = atp->add_offset(Type::OffsetBot);
2775 // Get base for derived pointer purposes
2776 if( adr->Opcode() != Op_AddP ) Unimplemented();
2777 Node *base = adr->in(1);
2778
2779 Node *zero = phase->makecon(TypeLong::ZERO);
2780 Node *off = phase->MakeConX(BytesPerLong);
2781 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
2782 count--;
2783 while( count-- ) {
2784 mem = phase->transform(mem);
2785 adr = phase->transform(new (phase->C) AddPNode(base,adr,off));
2786 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
2787 }
2788 return mem;
2789 }
2790
2791 //----------------------------step_through----------------------------------
2792 // Return allocation input memory edge if it is different instance
2793 // or itself if it is the one we are looking for.
2794 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
2795 Node* n = *np;
2796 assert(n->is_ClearArray(), "sanity");
2797 intptr_t offset;
2798 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
2799 // This method is called only before Allocate nodes are expanded during
2800 // macro nodes expansion. Before that ClearArray nodes are only generated
2801 // in LibraryCallKit::generate_arraycopy() which follows allocations.
2802 assert(alloc != NULL, "should have allocation");
2803 if (alloc->_idx == instance_id) {
2804 // Can not bypass initialization of the instance we are looking for.
2805 return false;
2806 }
2810 *np = init->in(TypeFunc::Memory);
2811 else
2812 *np = alloc->in(TypeFunc::Memory);
2813 return true;
2814 }
2815
2816 //----------------------------clear_memory-------------------------------------
2817 // Generate code to initialize object storage to zero.
2818 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2819 intptr_t start_offset,
2820 Node* end_offset,
2821 PhaseGVN* phase) {
2822 Compile* C = phase->C;
2823 intptr_t offset = start_offset;
2824
2825 int unit = BytesPerLong;
2826 if ((offset % unit) != 0) {
2827 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset));
2828 adr = phase->transform(adr);
2829 const TypePtr* atp = TypeRawPtr::BOTTOM;
2830 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
2831 mem = phase->transform(mem);
2832 offset += BytesPerInt;
2833 }
2834 assert((offset % unit) == 0, "");
2835
2836 // Initialize the remaining stuff, if any, with a ClearArray.
2837 return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
2838 }
2839
2840 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2841 Node* start_offset,
2842 Node* end_offset,
2843 PhaseGVN* phase) {
2844 if (start_offset == end_offset) {
2845 // nothing to do
2846 return mem;
2847 }
2848
2849 Compile* C = phase->C;
2850 int unit = BytesPerLong;
2871 PhaseGVN* phase) {
2872 if (start_offset == end_offset) {
2873 // nothing to do
2874 return mem;
2875 }
2876
2877 Compile* C = phase->C;
2878 assert((end_offset % BytesPerInt) == 0, "odd end offset");
2879 intptr_t done_offset = end_offset;
2880 if ((done_offset % BytesPerLong) != 0) {
2881 done_offset -= BytesPerInt;
2882 }
2883 if (done_offset > start_offset) {
2884 mem = clear_memory(ctl, mem, dest,
2885 start_offset, phase->MakeConX(done_offset), phase);
2886 }
2887 if (done_offset < end_offset) { // emit the final 32-bit store
2888 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset));
2889 adr = phase->transform(adr);
2890 const TypePtr* atp = TypeRawPtr::BOTTOM;
2891 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
2892 mem = phase->transform(mem);
2893 done_offset += BytesPerInt;
2894 }
2895 assert(done_offset == end_offset, "");
2896 return mem;
2897 }
2898
2899 //=============================================================================
2900 // Do not match memory edge.
2901 uint StrIntrinsicNode::match_edge(uint idx) const {
2902 return idx == 2 || idx == 3;
2903 }
2904
2905 //------------------------------Ideal------------------------------------------
2906 // Return a node which is more "ideal" than the current node. Strip out
2907 // control copies
2908 Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2909 if (remove_dead_region(phase, can_reshape)) return this;
2910 // Don't bother trying to transform a dead node
2911 if (in(0) && in(0)->is_top()) return NULL;
3745
3746 // Here's a case where init0 is neither 0 nor -1:
3747 // byte a[] = { ... 0,0,foo(),0, 0,0,0,42 ... }
3748 // Assuming big-endian memory, init0, init1 are 0x0000FF00, 0x000000FF.
3749 // In this case the tile is not split; it is (jlong)42.
3750 // The big tile is stored down, and then the foo() value is inserted.
3751 // (If there were foo(),foo() instead of foo(),0, init0 would be -1.)
3752
3753 Node* ctl = old->in(MemNode::Control);
3754 Node* adr = make_raw_address(offset, phase);
3755 const TypePtr* atp = TypeRawPtr::BOTTOM;
3756
3757 // One or two coalesced stores to plop down.
3758 Node* st[2];
3759 intptr_t off[2];
3760 int nst = 0;
3761 if (!split) {
3762 ++new_long;
3763 off[nst] = offset;
3764 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3765 phase->longcon(con), T_LONG);
3766 } else {
3767 // Omit either if it is a zero.
3768 if (con0 != 0) {
3769 ++new_int;
3770 off[nst] = offset;
3771 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3772 phase->intcon(con0), T_INT);
3773 }
3774 if (con1 != 0) {
3775 ++new_int;
3776 offset += BytesPerInt;
3777 adr = make_raw_address(offset, phase);
3778 off[nst] = offset;
3779 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3780 phase->intcon(con1), T_INT);
3781 }
3782 }
3783
3784 // Insert second store first, then the first before the second.
3785 // Insert each one just before any overlapping non-constant stores.
3786 while (nst > 0) {
3787 Node* st1 = st[--nst];
3788 C->copy_node_notes_to(st1, old);
3789 st1 = phase->transform(st1);
3790 offset = off[nst];
3791 assert(offset >= header_size, "do not smash header");
3792 int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase);
3793 guarantee(ins_idx != 0, "must re-insert constant store");
3794 if (ins_idx < 0) ins_idx = -ins_idx; // never overlap
3795 if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem)
3796 set_req(--ins_idx, st1);
3797 else
3798 ins_req(ins_idx, st1);
3799 }
3800 }
|
890 if( !Verbose && !WizardMode ) {
891 // standard dump does this in Verbose and WizardMode
892 st->print(" #"); _type->dump_on(st);
893 }
894 }
895 #endif
896
897 #ifdef ASSERT
898 //----------------------------is_immutable_value-------------------------------
899 // Helper function to allow a raw load without control edge for some cases
900 bool LoadNode::is_immutable_value(Node* adr) {
901 return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() &&
902 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
903 (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) ==
904 in_bytes(JavaThread::osthread_offset())));
905 }
906 #endif
907
908 //----------------------------LoadNode::make-----------------------------------
909 // Polymorphic factory method:
910 Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, Sem sem) {
911 Compile* C = gvn.C;
912
913 // sanity check the alias category against the created node type
914 assert(!(adr_type->isa_oopptr() &&
915 adr_type->offset() == oopDesc::klass_offset_in_bytes()),
916 "use LoadKlassNode instead");
917 assert(!(adr_type->isa_aryptr() &&
918 adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
919 "use LoadRangeNode instead");
920 // Check control edge of raw loads
921 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
922 // oop will be recorded in oop map if load crosses safepoint
923 rt->isa_oopptr() || is_immutable_value(adr),
924 "raw memory operations should have control edge");
925 switch (bt) {
926 case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), sem);
927 case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), sem);
928 case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), sem);
929 case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), sem);
930 case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), sem);
931 case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), false, sem);
932 case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, sem);
933 case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, sem);
934 case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), sem);
935 case T_OBJECT:
936 #ifdef _LP64
937 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
938 Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), sem));
939 return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
940 } else
941 #endif
942 {
943 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
944 return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), sem);
945 }
946 }
947 ShouldNotReachHere();
948 return (LoadNode*)NULL;
949 }
950
951 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, Sem sem) {
952 bool require_atomic = true;
953 return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic, sem);
954 }
955
956
957
958
959 //------------------------------hash-------------------------------------------
960 uint LoadNode::hash() const {
961 // unroll addition of interesting fields
962 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
963 }
964
965 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
966 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
967 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
968 bool is_stable_ary = FoldStableValues &&
969 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
970 tp->isa_aryptr()->is_stable();
971
972 return (eliminate_boxing && non_volatile) || is_stable_ary;
973 }
2015 // it must be truncated. We can't delay until Ideal call since
2016 // a singleton Value is needed for split_thru_phi optimization.
2017 int con = value->get_int();
2018 return TypeInt::make((con << 16) >> 16);
2019 }
2020 return LoadNode::Value(phase);
2021 }
2022
2023 //=============================================================================
2024 //----------------------------LoadKlassNode::make------------------------------
2025 // Polymorphic factory method:
2026 Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
2027 Compile* C = gvn.C;
2028 Node *ctl = NULL;
2029 // sanity check the alias category against the created node type
2030 const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2031 assert(adr_type != NULL, "expecting TypeKlassPtr");
2032 #ifdef _LP64
2033 if (adr_type->is_ptr_to_narrowklass()) {
2034 assert(UseCompressedClassPointers, "no compressed klasses");
2035 Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), LoadNode::unordered));
2036 return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2037 }
2038 #endif
2039 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2040 return new (C) LoadKlassNode(ctl, mem, adr, at, tk, LoadNode::unordered);
2041 }
2042
2043 //------------------------------Value------------------------------------------
2044 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
2045 return klass_value_common(phase);
2046 }
2047
2048 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
2049 // Either input is TOP ==> the result is TOP
2050 const Type *t1 = phase->type( in(MemNode::Memory) );
2051 if (t1 == Type::TOP) return Type::TOP;
2052 Node *adr = in(MemNode::Address);
2053 const Type *t2 = phase->type( adr );
2054 if (t2 == Type::TOP) return Type::TOP;
2055 const TypePtr *tp = t2->is_ptr();
2056 if (TypePtr::above_centerline(tp->ptr()) ||
2057 tp->ptr() == TypePtr::Null) return Type::TOP;
2058
2059 // Return a more precise klass, if possible
2060 const TypeInstPtr *tinst = tp->isa_instptr();
2330 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2331 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
2332 if (alloc != NULL) {
2333 Node* allocated_length = alloc->Ideal_length();
2334 // Do not allow make_ideal_length to allocate a CastII node.
2335 Node* len = alloc->make_ideal_length(tary, phase, false);
2336 if (allocated_length == len) {
2337 // Return allocated_length only if it would not be improved by a CastII.
2338 return allocated_length;
2339 }
2340 }
2341 }
2342
2343 return this;
2344
2345 }
2346
2347 //=============================================================================
2348 //---------------------------StoreNode::make-----------------------------------
2349 // Polymorphic factory method:
2350 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, Sem sem) {
2351 assert((sem == unordered || sem == release), "unexpected");
2352 Compile* C = gvn.C;
2353 assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2354 ctl != NULL, "raw memory operations should have control edge");
2355
2356 switch (bt) {
2357 case T_BOOLEAN:
2358 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val, sem);
2359 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val, sem);
2360 case T_CHAR:
2361 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val, sem);
2362 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val, false, sem);
2363 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val, sem);
2364 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val, sem);
2365 case T_METADATA:
2366 case T_ADDRESS:
2367 case T_OBJECT:
2368 #ifdef _LP64
2369 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2370 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
2371 return new (C) StoreNNode(ctl, mem, adr, adr_type, val, sem);
2372 } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2373 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2374 adr->bottom_type()->isa_rawptr())) {
2375 val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2376 return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, sem);
2377 }
2378 #endif
2379 {
2380 return new (C) StorePNode(ctl, mem, adr, adr_type, val, sem);
2381 }
2382 }
2383 ShouldNotReachHere();
2384 return (StoreNode*)NULL;
2385 }
2386
2387 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, Sem sem) {
2388 bool require_atomic = true;
2389 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic, sem);
2390 }
2391
2392
2393 //--------------------------bottom_type----------------------------------------
2394 const Type *StoreNode::bottom_type() const {
2395 return Type::MEMORY;
2396 }
2397
2398 //------------------------------hash-------------------------------------------
2399 uint StoreNode::hash() const {
2400 // unroll addition of interesting fields
2401 //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2402
2403 // Since they are not commoned, do not hash them:
2404 return NO_HASH;
2405 }
2406
2407 //------------------------------Ideal------------------------------------------
2408 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2409 // When a store immediately follows a relevant allocation/initialization,
2762 // (see jck test stmt114.stmt11402.val).
2763 if (size <= 0 || size % unit != 0) return NULL;
2764 intptr_t count = size / unit;
2765 // Length too long; use fast hardware clear
2766 if (size > Matcher::init_array_short_size) return NULL;
2767 Node *mem = in(1);
2768 if( phase->type(mem)==Type::TOP ) return NULL;
2769 Node *adr = in(3);
2770 const Type* at = phase->type(adr);
2771 if( at==Type::TOP ) return NULL;
2772 const TypePtr* atp = at->isa_ptr();
2773 // adjust atp to be the correct array element address type
2774 if (atp == NULL) atp = TypePtr::BOTTOM;
2775 else atp = atp->add_offset(Type::OffsetBot);
2776 // Get base for derived pointer purposes
2777 if( adr->Opcode() != Op_AddP ) Unimplemented();
2778 Node *base = adr->in(1);
2779
2780 Node *zero = phase->makecon(TypeLong::ZERO);
2781 Node *off = phase->MakeConX(BytesPerLong);
2782 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,false,StoreNode::unordered);
2783 count--;
2784 while( count-- ) {
2785 mem = phase->transform(mem);
2786 adr = phase->transform(new (phase->C) AddPNode(base,adr,off));
2787 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,false,StoreNode::unordered);
2788 }
2789 return mem;
2790 }
2791
2792 //----------------------------step_through----------------------------------
2793 // Return allocation input memory edge if it is different instance
2794 // or itself if it is the one we are looking for.
2795 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
2796 Node* n = *np;
2797 assert(n->is_ClearArray(), "sanity");
2798 intptr_t offset;
2799 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
2800 // This method is called only before Allocate nodes are expanded during
2801 // macro nodes expansion. Before that ClearArray nodes are only generated
2802 // in LibraryCallKit::generate_arraycopy() which follows allocations.
2803 assert(alloc != NULL, "should have allocation");
2804 if (alloc->_idx == instance_id) {
2805 // Can not bypass initialization of the instance we are looking for.
2806 return false;
2807 }
2811 *np = init->in(TypeFunc::Memory);
2812 else
2813 *np = alloc->in(TypeFunc::Memory);
2814 return true;
2815 }
2816
2817 //----------------------------clear_memory-------------------------------------
2818 // Generate code to initialize object storage to zero.
2819 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2820 intptr_t start_offset,
2821 Node* end_offset,
2822 PhaseGVN* phase) {
2823 Compile* C = phase->C;
2824 intptr_t offset = start_offset;
2825
2826 int unit = BytesPerLong;
2827 if ((offset % unit) != 0) {
2828 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset));
2829 adr = phase->transform(adr);
2830 const TypePtr* atp = TypeRawPtr::BOTTOM;
2831 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, StoreNode::unordered);
2832 mem = phase->transform(mem);
2833 offset += BytesPerInt;
2834 }
2835 assert((offset % unit) == 0, "");
2836
2837 // Initialize the remaining stuff, if any, with a ClearArray.
2838 return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
2839 }
2840
2841 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2842 Node* start_offset,
2843 Node* end_offset,
2844 PhaseGVN* phase) {
2845 if (start_offset == end_offset) {
2846 // nothing to do
2847 return mem;
2848 }
2849
2850 Compile* C = phase->C;
2851 int unit = BytesPerLong;
2872 PhaseGVN* phase) {
2873 if (start_offset == end_offset) {
2874 // nothing to do
2875 return mem;
2876 }
2877
2878 Compile* C = phase->C;
2879 assert((end_offset % BytesPerInt) == 0, "odd end offset");
2880 intptr_t done_offset = end_offset;
2881 if ((done_offset % BytesPerLong) != 0) {
2882 done_offset -= BytesPerInt;
2883 }
2884 if (done_offset > start_offset) {
2885 mem = clear_memory(ctl, mem, dest,
2886 start_offset, phase->MakeConX(done_offset), phase);
2887 }
2888 if (done_offset < end_offset) { // emit the final 32-bit store
2889 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset));
2890 adr = phase->transform(adr);
2891 const TypePtr* atp = TypeRawPtr::BOTTOM;
2892 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, StoreNode::unordered);
2893 mem = phase->transform(mem);
2894 done_offset += BytesPerInt;
2895 }
2896 assert(done_offset == end_offset, "");
2897 return mem;
2898 }
2899
2900 //=============================================================================
2901 // Do not match memory edge.
2902 uint StrIntrinsicNode::match_edge(uint idx) const {
2903 return idx == 2 || idx == 3;
2904 }
2905
2906 //------------------------------Ideal------------------------------------------
2907 // Return a node which is more "ideal" than the current node. Strip out
2908 // control copies
2909 Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2910 if (remove_dead_region(phase, can_reshape)) return this;
2911 // Don't bother trying to transform a dead node
2912 if (in(0) && in(0)->is_top()) return NULL;
3746
3747 // Here's a case where init0 is neither 0 nor -1:
3748 // byte a[] = { ... 0,0,foo(),0, 0,0,0,42 ... }
3749 // Assuming big-endian memory, init0, init1 are 0x0000FF00, 0x000000FF.
3750 // In this case the tile is not split; it is (jlong)42.
3751 // The big tile is stored down, and then the foo() value is inserted.
3752 // (If there were foo(),foo() instead of foo(),0, init0 would be -1.)
3753
3754 Node* ctl = old->in(MemNode::Control);
3755 Node* adr = make_raw_address(offset, phase);
3756 const TypePtr* atp = TypeRawPtr::BOTTOM;
3757
3758 // One or two coalesced stores to plop down.
3759 Node* st[2];
3760 intptr_t off[2];
3761 int nst = 0;
3762 if (!split) {
3763 ++new_long;
3764 off[nst] = offset;
3765 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3766 phase->longcon(con), T_LONG, StoreNode::unordered);
3767 } else {
3768 // Omit either if it is a zero.
3769 if (con0 != 0) {
3770 ++new_int;
3771 off[nst] = offset;
3772 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3773 phase->intcon(con0), T_INT, StoreNode::unordered);
3774 }
3775 if (con1 != 0) {
3776 ++new_int;
3777 offset += BytesPerInt;
3778 adr = make_raw_address(offset, phase);
3779 off[nst] = offset;
3780 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3781 phase->intcon(con1), T_INT, StoreNode::unordered);
3782 }
3783 }
3784
3785 // Insert second store first, then the first before the second.
3786 // Insert each one just before any overlapping non-constant stores.
3787 while (nst > 0) {
3788 Node* st1 = st[--nst];
3789 C->copy_node_notes_to(st1, old);
3790 st1 = phase->transform(st1);
3791 offset = off[nst];
3792 assert(offset >= header_size, "do not smash header");
3793 int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase);
3794 guarantee(ins_idx != 0, "must re-insert constant store");
3795 if (ins_idx < 0) ins_idx = -ins_idx; // never overlap
3796 if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem)
3797 set_req(--ins_idx, st1);
3798 else
3799 ins_req(ins_idx, st1);
3800 }
3801 }
|