1150 // Create Debug Information Recorder to record scopes, oopmaps, etc.
1151 env()->set_oop_recorder(new OopRecorder(env()->arena()));
1152 env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1153 env()->set_dependencies(new Dependencies(env()));
1154
1155 _fixed_slots = 0;
1156 set_has_split_ifs(false);
1157 set_has_loops(has_method() && method()->has_loops()); // first approximation
1158 set_has_stringbuilder(false);
1159 set_has_boxed_value(false);
1160 _trap_can_recompile = false; // no traps emitted yet
1161 _major_progress = true; // start out assuming good things will happen
1162 set_has_unsafe_access(false);
1163 set_max_vector_size(0);
1164 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1165 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1166 set_decompile_count(0);
1167
1168 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1169 _loop_opts_cnt = LoopOptsCount;
1170 set_do_inlining(Inline);
1171 set_max_inline_size(MaxInlineSize);
1172 set_freq_inline_size(FreqInlineSize);
1173 set_do_scheduling(OptoScheduling);
1174 set_do_count_invocations(false);
1175 set_do_method_data_update(false);
1176
1177 set_do_vector_loop(false);
1178
1179 if (AllowVectorizeOnDemand) {
1180 if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
1181 set_do_vector_loop(true);
1182 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1183 } else if (has_method() && method()->name() != 0 &&
1184 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1185 set_do_vector_loop(true);
1186 }
1187 }
1188 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1189 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1517 ta = tj->isa_aryptr();
1518 } else { // Random constant offset into array body
1519 offset = Type::OffsetBot; // Flatten constant access into array body
1520 tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1521 }
1522 }
1523 // Arrays of fixed size alias with arrays of unknown size.
1524 if (ta->size() != TypeInt::POS) {
1525 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1526 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset());
1527 }
1528 // Arrays of known objects become arrays of unknown objects.
1529 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1530 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1531 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1532 }
1533 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1534 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1535 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1536 }
1537 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1538 // cannot be distinguished by bytecode alone.
1539 if (ta->elem() == TypeInt::BOOL) {
1540 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1541 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1542 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1543 }
1544 // During the 2nd round of IterGVN, NotNull castings are removed.
1545 // Make sure the Bottom and NotNull variants alias the same.
1546 // Also, make sure exact and non-exact variants alias the same.
1547 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1548 tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1549 }
1550 }
1551
1552 // Oop pointers need some flattening
1553 const TypeInstPtr *to = tj->isa_instptr();
1554 if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1555 ciInstanceKlass *k = to->klass()->as_instance_klass();
1556 if( ptr == TypePtr::Constant ) {
1758 intptr_t key = (intptr_t) adr_type;
1759 key ^= key >> logAliasCacheSize;
1760 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1761 }
1762
1763
1764 //-----------------------------grow_alias_types--------------------------------
1765 void Compile::grow_alias_types() {
1766 const int old_ats = _max_alias_types; // how many before?
1767 const int new_ats = old_ats; // how many more?
1768 const int grow_ats = old_ats+new_ats; // how many now?
1769 _max_alias_types = grow_ats;
1770 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1771 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1772 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1773 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1774 }
1775
1776
1777 //--------------------------------find_alias_type------------------------------
1778 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1779 if (_AliasLevel == 0)
1780 return alias_type(AliasIdxBot);
1781
1782 AliasCacheEntry* ace = probe_alias_cache(adr_type);
1783 if (ace->_adr_type == adr_type) {
1784 return alias_type(ace->_index);
1785 }
1786
1787 // Handle special cases.
1788 if (adr_type == NULL) return alias_type(AliasIdxTop);
1789 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1790
1791 // Do it the slow way.
1792 const TypePtr* flat = flatten_alias_type(adr_type);
1793
1794 #ifdef ASSERT
1795 {
1796 ResourceMark rm;
1797 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1798 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1799 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1800 Type::str(adr_type));
1801 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1802 const TypeOopPtr* foop = flat->is_oopptr();
1803 // Scalarizable allocations have exact klass always.
1804 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1805 const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
1826 _alias_types[idx]->Init(idx, flat);
1827 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1828 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1829 if (flat->isa_instptr()) {
1830 if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
1831 && flat->is_instptr()->klass() == env()->Class_klass())
1832 alias_type(idx)->set_rewritable(false);
1833 }
1834 ciField* field = NULL;
1835 if (flat->isa_aryptr()) {
1836 #ifdef ASSERT
1837 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1838 // (T_BYTE has the weakest alignment and size restrictions...)
1839 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1840 #endif
1841 const Type* elemtype = flat->is_aryptr()->elem();
1842 if (flat->offset() == TypePtr::OffsetBot) {
1843 alias_type(idx)->set_element(elemtype);
1844 }
1845 int field_offset = flat->is_aryptr()->field_offset().get();
1846 if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) {
1847 ciValueKlass* vk = elemtype->value_klass();
1848 field_offset += vk->first_field_offset();
1849 field = vk->get_field_by_offset(field_offset, false);
1850 }
1851 }
1852 if (flat->isa_klassptr()) {
1853 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1854 alias_type(idx)->set_rewritable(false);
1855 if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1856 alias_type(idx)->set_rewritable(false);
1857 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1858 alias_type(idx)->set_rewritable(false);
1859 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1860 alias_type(idx)->set_rewritable(false);
1861 }
1862 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1863 // but the base pointer type is not distinctive enough to identify
1864 // references into JavaThread.)
1865
1866 // Check for final fields.
1867 const TypeInstPtr* tinst = flat->isa_instptr();
1868 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1869 if (tinst->const_oop() != NULL &&
1870 tinst->klass() == ciEnv::current()->Class_klass() &&
1871 tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
1872 // static field
1873 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1874 field = k->get_field_by_offset(tinst->offset(), true);
1875 } else if (tinst->klass()->is_valuetype()) {
1876 // Value type field
1877 ciValueKlass* vk = tinst->value_klass();
1878 field = vk->get_field_by_offset(tinst->offset(), false);
1879 } else {
1880 ciInstanceKlass* k = tinst->klass()->as_instance_klass();
1881 field = k->get_field_by_offset(tinst->offset(), false);
1882 }
1883 }
1884 assert(field == NULL ||
1885 original_field == NULL ||
1886 (field->holder() == original_field->holder() &&
1887 field->offset() == original_field->offset() &&
1888 field->is_static() == original_field->is_static()), "wrong field?");
1889 // Set field() and is_rewritable() attributes.
1890 if (field != NULL) alias_type(idx)->set_field(field);
1891 }
1892
1893 // Fill the cache for next time.
1894 ace->_adr_type = adr_type;
1895 ace->_index = idx;
1896 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1897
1898 // Might as well try to fill the cache for the flattened version, too.
1899 AliasCacheEntry* face = probe_alias_cache(flat);
1900 if (face->_adr_type == NULL) {
1901 face->_adr_type = flat;
1902 face->_index = idx;
1903 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1904 }
1905
1906 return alias_type(idx);
1907 }
1908
1909
1910 Compile::AliasType* Compile::alias_type(ciField* field) {
1911 const TypeOopPtr* t;
1912 if (field->is_static())
1913 t = TypeInstPtr::make(field->holder()->java_mirror());
1914 else
1915 t = TypeOopPtr::make_from_klass_raw(field->holder());
1916 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1917 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1918 return atp;
1919 }
1920
1921
1922 //------------------------------have_alias_type--------------------------------
1923 bool Compile::have_alias_type(const TypePtr* adr_type) {
1924 AliasCacheEntry* ace = probe_alias_cache(adr_type);
2118 for (uint i = 1; i < root()->req(); i++){
2119 Node* in = root()->in(i);
2120 if (in->Opcode() == Op_Return) {
2121 assert(ret == NULL, "only one return");
2122 ret = in;
2123 }
2124 }
2125 if (ret != NULL) {
2126 Node* ret_val = ret->in(TypeFunc::Parms);
2127 if (igvn.type(ret_val)->isa_oopptr() &&
2128 return_val_keeps_allocations_alive(ret_val)) {
2129 igvn.replace_input_of(ret, TypeFunc::Parms, ValueTypeNode::tagged_klass(igvn.type(ret_val)->value_klass(), igvn));
2130 assert(ret_val->outcnt() == 0, "should be dead now");
2131 igvn.remove_dead_node(ret_val);
2132 }
2133 }
2134 }
2135 igvn.optimize();
2136 }
2137
2138 // StringOpts and late inlining of string methods
2139 void Compile::inline_string_calls(bool parse_time) {
2140 {
2141 // remove useless nodes to make the usage analysis simpler
2142 ResourceMark rm;
2143 PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2144 }
2145
2146 {
2147 ResourceMark rm;
2148 print_method(PHASE_BEFORE_STRINGOPTS, 3);
2149 PhaseStringOpts pso(initial_gvn(), for_igvn());
2150 print_method(PHASE_AFTER_STRINGOPTS, 3);
2151 }
2152
2153 // now inline anything that we skipped the first time around
2154 if (!parse_time) {
2155 _late_inlines_pos = _late_inlines.length();
2156 }
2157
2397
2398 if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2399 Compile::TracePhase tp("", &timers[_t_renumberLive]);
2400 initial_gvn()->replace_with(&igvn);
2401 for_igvn()->clear();
2402 Unique_Node_List new_worklist(C->comp_arena());
2403 {
2404 ResourceMark rm;
2405 PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2406 }
2407 set_for_igvn(&new_worklist);
2408 igvn = PhaseIterGVN(initial_gvn());
2409 igvn.optimize();
2410 }
2411
2412 if (_value_type_nodes->size() > 0) {
2413 // Do this once all inlining is over to avoid getting inconsistent debug info
2414 process_value_types(igvn);
2415 }
2416
2417 // Perform escape analysis
2418 if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2419 if (has_loops()) {
2420 // Cleanup graph (remove dead nodes).
2421 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2422 PhaseIdealLoop::optimize(igvn, LoopOptsNone);
2423 if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2424 if (failing()) return;
2425 }
2426 ConnectionGraph::do_analysis(this, &igvn);
2427
2428 if (failing()) return;
2429
2430 // Optimize out fields loads from scalar replaceable allocations.
2431 igvn.optimize();
2432 print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2433
2434 if (failing()) return;
2435
2436 if (congraph() != NULL && macro_count() > 0) {
3113 case Op_LoadUS:
3114 case Op_LoadI:
3115 case Op_LoadKlass:
3116 case Op_LoadNKlass:
3117 case Op_LoadL:
3118 case Op_LoadL_unaligned:
3119 case Op_LoadPLocked:
3120 case Op_LoadP:
3121 case Op_LoadN:
3122 case Op_LoadRange:
3123 case Op_LoadS: {
3124 handle_mem:
3125 #ifdef ASSERT
3126 if( VerifyOptoOopOffsets ) {
3127 MemNode* mem = n->as_Mem();
3128 // Check to see if address types have grounded out somehow.
3129 const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
3130 assert( !tp || oop_offset_is_sane(tp), "" );
3131 }
3132 #endif
3133 break;
3134 }
3135
3136 case Op_AddP: { // Assert sane base pointers
3137 Node *addp = n->in(AddPNode::Address);
3138 assert( !addp->is_AddP() ||
3139 addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
3140 addp->in(AddPNode::Base) == n->in(AddPNode::Base),
3141 "Base pointers must match (addp %u)", addp->_idx );
3142 #ifdef _LP64
3143 if ((UseCompressedOops || UseCompressedClassPointers) &&
3144 addp->Opcode() == Op_ConP &&
3145 addp == n->in(AddPNode::Base) &&
3146 n->in(AddPNode::Offset)->is_Con()) {
3147 // If the transformation of ConP to ConN+DecodeN is beneficial depends
3148 // on the platform and on the compressed oops mode.
3149 // Use addressing with narrow klass to load with offset on x86.
3150 // Some platforms can use the constant pool to load ConP.
3151 // Do this transformation here since IGVN will convert ConN back to ConP.
3152 const Type* t = addp->bottom_type();
3635 if (!Matcher::has_match_rule(Op_CmpUL)) {
3636 // No support for unsigned long comparisons
3637 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3638 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3639 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3640 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3641 Node* andl = new AndLNode(orl, remove_sign_mask);
3642 Node* cmp = new CmpLNode(andl, n->in(2));
3643 n->subsume_by(cmp, this);
3644 }
3645 break;
3646 }
3647 #ifdef ASSERT
3648 case Op_ValueTypePtr:
3649 case Op_ValueType: {
3650 n->dump(-1);
3651 assert(false, "value type node was not removed");
3652 break;
3653 }
3654 #endif
3655 default:
3656 assert(!n->is_Call(), "");
3657 assert(!n->is_Mem(), "");
3658 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3659 break;
3660 }
3661 }
3662
3663 //------------------------------final_graph_reshaping_walk---------------------
3664 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3665 // requires that the walk visits a node's inputs before visiting the node.
3666 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3667 ResourceArea *area = Thread::current()->resource_area();
3668 Unique_Node_List sfpt(area);
3669
3670 frc._visited.set(root->_idx); // first, mark node as visited
3671 uint cnt = root->req();
3672 Node *n = root;
3673 uint i = 0;
3674 while (true) {
|
1150 // Create Debug Information Recorder to record scopes, oopmaps, etc.
1151 env()->set_oop_recorder(new OopRecorder(env()->arena()));
1152 env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1153 env()->set_dependencies(new Dependencies(env()));
1154
1155 _fixed_slots = 0;
1156 set_has_split_ifs(false);
1157 set_has_loops(has_method() && method()->has_loops()); // first approximation
1158 set_has_stringbuilder(false);
1159 set_has_boxed_value(false);
1160 _trap_can_recompile = false; // no traps emitted yet
1161 _major_progress = true; // start out assuming good things will happen
1162 set_has_unsafe_access(false);
1163 set_max_vector_size(0);
1164 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1165 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1166 set_decompile_count(0);
1167
1168 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1169 _loop_opts_cnt = LoopOptsCount;
1170 _has_flattened_accesses = false;
1171 _flattened_accesses_share_alias = true;
1172
1173 set_do_inlining(Inline);
1174 set_max_inline_size(MaxInlineSize);
1175 set_freq_inline_size(FreqInlineSize);
1176 set_do_scheduling(OptoScheduling);
1177 set_do_count_invocations(false);
1178 set_do_method_data_update(false);
1179
1180 set_do_vector_loop(false);
1181
1182 if (AllowVectorizeOnDemand) {
1183 if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
1184 set_do_vector_loop(true);
1185 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1186 } else if (has_method() && method()->name() != 0 &&
1187 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1188 set_do_vector_loop(true);
1189 }
1190 }
1191 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1192 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1520 ta = tj->isa_aryptr();
1521 } else { // Random constant offset into array body
1522 offset = Type::OffsetBot; // Flatten constant access into array body
1523 tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1524 }
1525 }
1526 // Arrays of fixed size alias with arrays of unknown size.
1527 if (ta->size() != TypeInt::POS) {
1528 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1529 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset());
1530 }
1531 // Arrays of known objects become arrays of unknown objects.
1532 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1533 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1534 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1535 }
1536 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1537 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1538 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1539 }
1540 // Initially all flattened array accesses share a single slice
1541 if (ta->elem()->isa_valuetype() && ta->elem() != TypeValueType::BOTTOM && _flattened_accesses_share_alias) {
1542 const TypeAry *tary = TypeAry::make(TypeValueType::BOTTOM, ta->size());
1543 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
1544 }
1545 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1546 // cannot be distinguished by bytecode alone.
1547 if (ta->elem() == TypeInt::BOOL) {
1548 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1549 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1550 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1551 }
1552 // During the 2nd round of IterGVN, NotNull castings are removed.
1553 // Make sure the Bottom and NotNull variants alias the same.
1554 // Also, make sure exact and non-exact variants alias the same.
1555 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1556 tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1557 }
1558 }
1559
1560 // Oop pointers need some flattening
1561 const TypeInstPtr *to = tj->isa_instptr();
1562 if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1563 ciInstanceKlass *k = to->klass()->as_instance_klass();
1564 if( ptr == TypePtr::Constant ) {
1766 intptr_t key = (intptr_t) adr_type;
1767 key ^= key >> logAliasCacheSize;
1768 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1769 }
1770
1771
1772 //-----------------------------grow_alias_types--------------------------------
1773 void Compile::grow_alias_types() {
1774 const int old_ats = _max_alias_types; // how many before?
1775 const int new_ats = old_ats; // how many more?
1776 const int grow_ats = old_ats+new_ats; // how many now?
1777 _max_alias_types = grow_ats;
1778 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1779 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1780 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1781 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1782 }
1783
1784
1785 //--------------------------------find_alias_type------------------------------
1786 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1787 if (_AliasLevel == 0)
1788 return alias_type(AliasIdxBot);
1789
1790 AliasCacheEntry* ace = NULL;
1791 if (!uncached) {
1792 ace = probe_alias_cache(adr_type);
1793 if (ace->_adr_type == adr_type) {
1794 return alias_type(ace->_index);
1795 }
1796 }
1797
1798 // Handle special cases.
1799 if (adr_type == NULL) return alias_type(AliasIdxTop);
1800 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1801
1802 // Do it the slow way.
1803 const TypePtr* flat = flatten_alias_type(adr_type);
1804
1805 #ifdef ASSERT
1806 {
1807 ResourceMark rm;
1808 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1809 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1810 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1811 Type::str(adr_type));
1812 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1813 const TypeOopPtr* foop = flat->is_oopptr();
1814 // Scalarizable allocations have exact klass always.
1815 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1816 const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
1837 _alias_types[idx]->Init(idx, flat);
1838 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1839 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1840 if (flat->isa_instptr()) {
1841 if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
1842 && flat->is_instptr()->klass() == env()->Class_klass())
1843 alias_type(idx)->set_rewritable(false);
1844 }
1845 ciField* field = NULL;
1846 if (flat->isa_aryptr()) {
1847 #ifdef ASSERT
1848 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1849 // (T_BYTE has the weakest alignment and size restrictions...)
1850 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1851 #endif
1852 const Type* elemtype = flat->is_aryptr()->elem();
1853 if (flat->offset() == TypePtr::OffsetBot) {
1854 alias_type(idx)->set_element(elemtype);
1855 }
1856 int field_offset = flat->is_aryptr()->field_offset().get();
1857 if (elemtype->isa_valuetype() &&
1858 elemtype->value_klass() != NULL &&
1859 field_offset != Type::OffsetBot) {
1860 ciValueKlass* vk = elemtype->value_klass();
1861 field_offset += vk->first_field_offset();
1862 field = vk->get_field_by_offset(field_offset, false);
1863 }
1864 }
1865 if (flat->isa_klassptr()) {
1866 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1867 alias_type(idx)->set_rewritable(false);
1868 if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1869 alias_type(idx)->set_rewritable(false);
1870 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1871 alias_type(idx)->set_rewritable(false);
1872 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1873 alias_type(idx)->set_rewritable(false);
1874 if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1875 alias_type(idx)->set_rewritable(false);
1876 }
1877 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1878 // but the base pointer type is not distinctive enough to identify
1879 // references into JavaThread.)
1880
1881 // Check for final fields.
1882 const TypeInstPtr* tinst = flat->isa_instptr();
1883 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1884 if (tinst->const_oop() != NULL &&
1885 tinst->klass() == ciEnv::current()->Class_klass() &&
1886 tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
1887 // static field
1888 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1889 field = k->get_field_by_offset(tinst->offset(), true);
1890 } else if (tinst->klass()->is_valuetype()) {
1891 // Value type field
1892 ciValueKlass* vk = tinst->value_klass();
1893 field = vk->get_field_by_offset(tinst->offset(), false);
1894 } else {
1895 ciInstanceKlass* k = tinst->klass()->as_instance_klass();
1896 field = k->get_field_by_offset(tinst->offset(), false);
1897 }
1898 }
1899 assert(field == NULL ||
1900 original_field == NULL ||
1901 (field->holder() == original_field->holder() &&
1902 field->offset() == original_field->offset() &&
1903 field->is_static() == original_field->is_static()), "wrong field?");
1904 // Set field() and is_rewritable() attributes.
1905 if (field != NULL) alias_type(idx)->set_field(field);
1906 }
1907
1908 // Fill the cache for next time.
1909 if (!uncached) {
1910 ace->_adr_type = adr_type;
1911 ace->_index = idx;
1912 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1913
1914 // Might as well try to fill the cache for the flattened version, too.
1915 AliasCacheEntry* face = probe_alias_cache(flat);
1916 if (face->_adr_type == NULL) {
1917 face->_adr_type = flat;
1918 face->_index = idx;
1919 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1920 }
1921 }
1922
1923 return alias_type(idx);
1924 }
1925
1926
1927 Compile::AliasType* Compile::alias_type(ciField* field) {
1928 const TypeOopPtr* t;
1929 if (field->is_static())
1930 t = TypeInstPtr::make(field->holder()->java_mirror());
1931 else
1932 t = TypeOopPtr::make_from_klass_raw(field->holder());
1933 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1934 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1935 return atp;
1936 }
1937
1938
1939 //------------------------------have_alias_type--------------------------------
1940 bool Compile::have_alias_type(const TypePtr* adr_type) {
1941 AliasCacheEntry* ace = probe_alias_cache(adr_type);
2135 for (uint i = 1; i < root()->req(); i++){
2136 Node* in = root()->in(i);
2137 if (in->Opcode() == Op_Return) {
2138 assert(ret == NULL, "only one return");
2139 ret = in;
2140 }
2141 }
2142 if (ret != NULL) {
2143 Node* ret_val = ret->in(TypeFunc::Parms);
2144 if (igvn.type(ret_val)->isa_oopptr() &&
2145 return_val_keeps_allocations_alive(ret_val)) {
2146 igvn.replace_input_of(ret, TypeFunc::Parms, ValueTypeNode::tagged_klass(igvn.type(ret_val)->value_klass(), igvn));
2147 assert(ret_val->outcnt() == 0, "should be dead now");
2148 igvn.remove_dead_node(ret_val);
2149 }
2150 }
2151 }
2152 igvn.optimize();
2153 }
2154
2155 void Compile::adjust_flattened_array_access_aliases(PhaseIterGVN& igvn) {
2156 if (!_has_flattened_accesses) {
2157 return;
2158 }
2159 // Initially, all flattened array accesses share the same slice to
2160 // keep dependencies with Object[] array accesses (that could be
2161 // to a flattened array) correct. We're done with parsing so we
2162 // now know all flattened array accesses in this compile
2163 // unit. Let's move flattened array accesses to their own slice,
2164 // one per element field. This should help memory access
2165 // optimizations.
2166 ResourceMark rm;
2167 Unique_Node_List wq;
2168 wq.push(root());
2169
2170 Node_List mergememnodes;
2171 Node_List memnodes;
2172
2173 // Alias index currently shared by all flattened memory accesses
2174 int index = get_alias_index(TypeAryPtr::VALUES);
2175
2176 // Find MergeMem nodes and flattened array accesses
2177 for (uint i = 0; i < wq.size(); i++) {
2178 Node* n = wq.at(i);
2179 if (n->is_Mem()) {
2180 const TypePtr* adr_type = get_adr_type(get_alias_index(n->adr_type()));
2181 if (adr_type == TypeAryPtr::VALUES) {
2182 memnodes.push(n);
2183 }
2184 } else if (n->is_MergeMem()) {
2185 MergeMemNode* mm = n->as_MergeMem();
2186 if (mm->memory_at(index) != mm->base_memory()) {
2187 mergememnodes.push(n);
2188 }
2189 }
2190 for (uint j = 0; j < n->req(); j++) {
2191 Node* m = n->in(j);
2192 if (m != NULL) {
2193 wq.push(m);
2194 }
2195 }
2196 }
2197
2198 if (memnodes.size() > 0) {
2199 _flattened_accesses_share_alias = false;
2200
2201 // We are going to change the slice for the flattened array
2202 // accesses so we need to clear the cache entries that refer to
2203 // them.
2204 for (uint i = 0; i < AliasCacheSize; i++) {
2205 AliasCacheEntry* ace = &_alias_cache[i];
2206 if (ace->_adr_type != NULL &&
2207 ace->_adr_type->isa_aryptr() &&
2208 ace->_adr_type->is_aryptr()->elem()->isa_valuetype()) {
2209 ace->_adr_type = NULL;
2210 ace->_index = 0;
2211 }
2212 }
2213
2214 // Find what aliases we are going to add
2215 int start_alias = num_alias_types()-1;
2216 int stop_alias = 0;
2217
2218 for (uint i = 0; i < memnodes.size(); i++) {
2219 Node* m = memnodes.at(i);
2220 const TypePtr* adr_type = m->adr_type();
2221 #ifdef ASSERT
2222 m->as_Mem()->set_adr_type(adr_type);
2223 #endif
2224 int idx = get_alias_index(adr_type);
2225 start_alias = MIN2(start_alias, idx);
2226 stop_alias = MAX2(stop_alias, idx);
2227 }
2228
2229 assert(stop_alias >= start_alias, "should have expanded aliases");
2230
2231 Node_Stack stack(0);
2232 #ifdef ASSERT
2233 VectorSet seen(Thread::current()->resource_area());
2234 #endif
2235 // Now let's fix the memory graph so each flattened array access
2236 // is moved to the right slice. Start from the MergeMem nodes.
2237 uint last = unique();
2238 for (uint i = 0; i < mergememnodes.size(); i++) {
2239 MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2240 Node* n = current->memory_at(index);
2241 MergeMemNode* mm = NULL;
2242 do {
2243 // Follow memory edges through memory accesses, phis and
2244 // narrow membars and push nodes on the stack. Once we hit
2245 // bottom memory, we pop element off the stack one at a
2246 // time, in reverse order, and move them to the right slice
2247 // by changing their memory edges.
2248 if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::VALUES) {
2249 assert(!seen.test_set(n->_idx), "");
2250 // Uses (a load for instance) will need to be moved to the
2251 // right slice as well and will get a new memory state
2252 // that we don't know yet. The use could also be the
2253 // backedge of a loop. We put a place holder node between
2254 // the memory node and its uses. We replace that place
2255 // holder with the correct memory state once we know it,
2256 // i.e. when nodes are popped off the stack. Using the
2257 // place holder make the logic work in the presence of
2258 // loops.
2259 if (n->outcnt() > 1) {
2260 Node* place_holder = NULL;
2261 assert(!n->has_out_with(Op_Node), "");
2262 for (DUIterator k = n->outs(); n->has_out(k); k++) {
2263 Node* u = n->out(k);
2264 if (u != current && u->_idx < last) {
2265 bool success = false;
2266 for (uint l = 0; l < u->req(); l++) {
2267 if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2268 continue;
2269 }
2270 Node* in = u->in(l);
2271 if (in == n) {
2272 if (place_holder == NULL) {
2273 place_holder = new Node(1);
2274 place_holder->init_req(0, n);
2275 }
2276 igvn.replace_input_of(u, l, place_holder);
2277 success = true;
2278 }
2279 }
2280 if (success) {
2281 --k;
2282 }
2283 }
2284 }
2285 }
2286 if (n->is_Phi()) {
2287 stack.push(n, 1);
2288 n = n->in(1);
2289 } else if (n->is_Mem()) {
2290 stack.push(n, n->req());
2291 n = n->in(MemNode::Memory);
2292 } else {
2293 assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2294 stack.push(n, n->req());
2295 n = n->in(0)->in(TypeFunc::Memory);
2296 }
2297 } else {
2298 assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
2299 // Build a new MergeMem node to carry the new memory state
2300 // as we build it. IGVN should fold extraneous MergeMem
2301 // nodes.
2302 mm = MergeMemNode::make(n);
2303 igvn.register_new_node_with_optimizer(mm);
2304 while (stack.size() > 0) {
2305 Node* m = stack.node();
2306 uint idx = stack.index();
2307 if (m->is_Mem()) {
2308 // Move memory node to its new slice
2309 const TypePtr* adr_type = m->adr_type();
2310 int alias = get_alias_index(adr_type);
2311 Node* prev = mm->memory_at(alias);
2312 igvn.replace_input_of(m, MemNode::Memory, prev);
2313 mm->set_memory_at(alias, m);
2314 } else if (m->is_Phi()) {
2315 // We need as many new phis as there are new aliases
2316 igvn.replace_input_of(m, idx, mm);
2317 if (idx == m->req()-1) {
2318 Node* r = m->in(0);
2319 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2320 const Type* adr_type = get_adr_type(j);
2321 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->elem()->isa_valuetype()) {
2322 continue;
2323 }
2324 Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2325 igvn.register_new_node_with_optimizer(phi);
2326 for (uint k = 1; k < m->req(); k++) {
2327 phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2328 }
2329 mm->set_memory_at(j, phi);
2330 }
2331 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2332 igvn.register_new_node_with_optimizer(base_phi);
2333 for (uint k = 1; k < m->req(); k++) {
2334 base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2335 }
2336 mm->set_base_memory(base_phi);
2337 }
2338 } else {
2339 // This is a MemBarCPUOrder node from
2340 // Parse::array_load()/Parse::array_store(), in the
2341 // branch that handles flattened arrays hidden under
2342 // an Object[] array. We also need one new membar per
2343 // new alias to keep the unknown access that the
2344 // membars protect properly ordered with accesses to
2345 // known flattened array.
2346 assert(m->is_Proj(), "projection expected");
2347 Node* ctrl = m->in(0)->in(TypeFunc::Control);
2348 igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2349 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2350 const Type* adr_type = get_adr_type(j);
2351 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->elem()->isa_valuetype()) {
2352 continue;
2353 }
2354 MemBarNode* mb = new MemBarCPUOrderNode(this, j, NULL);
2355 igvn.register_new_node_with_optimizer(mb);
2356 Node* mem = mm->memory_at(j);
2357 mb->init_req(TypeFunc::Control, ctrl);
2358 mb->init_req(TypeFunc::Memory, mem);
2359 ctrl = new ProjNode(mb, TypeFunc::Control);
2360 igvn.register_new_node_with_optimizer(ctrl);
2361 mem = new ProjNode(mb, TypeFunc::Memory);
2362 igvn.register_new_node_with_optimizer(mem);
2363 mm->set_memory_at(j, mem);
2364 }
2365 igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2366 }
2367 if (idx < m->req()-1) {
2368 idx += 1;
2369 stack.set_index(idx);
2370 n = m->in(idx);
2371 break;
2372 }
2373 // Take care of place holder nodes
2374 if (m->has_out_with(Op_Node)) {
2375 Node* place_holder = m->find_out_with(Op_Node);
2376 if (place_holder != NULL) {
2377 Node* mm_clone = mm->clone();
2378 igvn.register_new_node_with_optimizer(mm_clone);
2379 Node* hook = new Node(1);
2380 hook->init_req(0, mm);
2381 igvn.replace_node(place_holder, mm_clone);
2382 hook->destruct();
2383 }
2384 assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2385 }
2386 stack.pop();
2387 }
2388 }
2389 } while(stack.size() > 0);
2390 // Fix the memory state at the MergeMem we started from
2391 igvn.rehash_node_delayed(current);
2392 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2393 const Type* adr_type = get_adr_type(j);
2394 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->elem()->isa_valuetype()) {
2395 continue;
2396 }
2397 current->set_memory_at(j, mm);
2398 }
2399 current->set_memory_at(index, current->base_memory());
2400 }
2401 igvn.optimize();
2402 }
2403 print_method(PHASE_SPLIT_VALUES_ARRAY, 2);
2404 }
2405
2406
2407 // StringOpts and late inlining of string methods
2408 void Compile::inline_string_calls(bool parse_time) {
2409 {
2410 // remove useless nodes to make the usage analysis simpler
2411 ResourceMark rm;
2412 PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2413 }
2414
2415 {
2416 ResourceMark rm;
2417 print_method(PHASE_BEFORE_STRINGOPTS, 3);
2418 PhaseStringOpts pso(initial_gvn(), for_igvn());
2419 print_method(PHASE_AFTER_STRINGOPTS, 3);
2420 }
2421
2422 // now inline anything that we skipped the first time around
2423 if (!parse_time) {
2424 _late_inlines_pos = _late_inlines.length();
2425 }
2426
2666
2667 if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2668 Compile::TracePhase tp("", &timers[_t_renumberLive]);
2669 initial_gvn()->replace_with(&igvn);
2670 for_igvn()->clear();
2671 Unique_Node_List new_worklist(C->comp_arena());
2672 {
2673 ResourceMark rm;
2674 PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2675 }
2676 set_for_igvn(&new_worklist);
2677 igvn = PhaseIterGVN(initial_gvn());
2678 igvn.optimize();
2679 }
2680
2681 if (_value_type_nodes->size() > 0) {
2682 // Do this once all inlining is over to avoid getting inconsistent debug info
2683 process_value_types(igvn);
2684 }
2685
2686 adjust_flattened_array_access_aliases(igvn);
2687
2688 // Perform escape analysis
2689 if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2690 if (has_loops()) {
2691 // Cleanup graph (remove dead nodes).
2692 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2693 PhaseIdealLoop::optimize(igvn, LoopOptsNone);
2694 if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2695 if (failing()) return;
2696 }
2697 ConnectionGraph::do_analysis(this, &igvn);
2698
2699 if (failing()) return;
2700
2701 // Optimize out fields loads from scalar replaceable allocations.
2702 igvn.optimize();
2703 print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2704
2705 if (failing()) return;
2706
2707 if (congraph() != NULL && macro_count() > 0) {
3384 case Op_LoadUS:
3385 case Op_LoadI:
3386 case Op_LoadKlass:
3387 case Op_LoadNKlass:
3388 case Op_LoadL:
3389 case Op_LoadL_unaligned:
3390 case Op_LoadPLocked:
3391 case Op_LoadP:
3392 case Op_LoadN:
3393 case Op_LoadRange:
3394 case Op_LoadS: {
3395 handle_mem:
3396 #ifdef ASSERT
3397 if( VerifyOptoOopOffsets ) {
3398 MemNode* mem = n->as_Mem();
3399 // Check to see if address types have grounded out somehow.
3400 const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
3401 assert( !tp || oop_offset_is_sane(tp), "" );
3402 }
3403 #endif
3404 if (nop == Op_LoadKlass || nop == Op_LoadNKlass) {
3405 const TypeKlassPtr* tk = n->bottom_type()->make_ptr()->is_klassptr();
3406 assert(!tk->klass_is_exact(), "should have been folded");
3407 if (tk->klass()->is_obj_array_klass() || tk->klass()->is_java_lang_Object()) {
3408 bool maybe_value_array = tk->klass()->is_java_lang_Object();
3409 if (!maybe_value_array) {
3410 ciArrayKlass* ak = tk->klass()->as_array_klass();
3411 ciKlass* elem = ak->element_klass();
3412 maybe_value_array = elem->is_java_lang_Object() || elem->is_interface() || elem->is_valuetype();
3413 }
3414 if (maybe_value_array) {
3415 // Array load klass needs to filter out property bits (but not
3416 // GetNullFreePropertyNode which needs to extract the null free
3417 // bits)
3418 uint last = unique();
3419 Node* pointer = NULL;
3420 if (nop == Op_LoadKlass) {
3421 Node* cast = new CastP2XNode(NULL, n);
3422 Node* masked = new LShiftXNode(cast, new ConINode(TypeInt::make(oopDesc::storage_props_nof_bits)));
3423 masked = new RShiftXNode(masked, new ConINode(TypeInt::make(oopDesc::storage_props_nof_bits)));
3424 pointer = new CastX2PNode(masked);
3425 pointer = new CheckCastPPNode(NULL, pointer, n->bottom_type());
3426 } else {
3427 Node* cast = new CastN2INode(n);
3428 Node* masked = new AndINode(cast, new ConINode(TypeInt::make(oopDesc::compressed_klass_mask())));
3429 pointer = new CastI2NNode(masked, n->bottom_type());
3430 }
3431 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3432 Node* u = n->fast_out(i);
3433 if (u->_idx < last && u->Opcode() != Op_GetNullFreeProperty) {
3434 int nb = u->replace_edge(n, pointer);
3435 --i, imax -= nb;
3436 }
3437 }
3438 }
3439 }
3440 }
3441 break;
3442 }
3443
3444 case Op_AddP: { // Assert sane base pointers
3445 Node *addp = n->in(AddPNode::Address);
3446 assert( !addp->is_AddP() ||
3447 addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
3448 addp->in(AddPNode::Base) == n->in(AddPNode::Base),
3449 "Base pointers must match (addp %u)", addp->_idx );
3450 #ifdef _LP64
3451 if ((UseCompressedOops || UseCompressedClassPointers) &&
3452 addp->Opcode() == Op_ConP &&
3453 addp == n->in(AddPNode::Base) &&
3454 n->in(AddPNode::Offset)->is_Con()) {
3455 // If the transformation of ConP to ConN+DecodeN is beneficial depends
3456 // on the platform and on the compressed oops mode.
3457 // Use addressing with narrow klass to load with offset on x86.
3458 // Some platforms can use the constant pool to load ConP.
3459 // Do this transformation here since IGVN will convert ConN back to ConP.
3460 const Type* t = addp->bottom_type();
3943 if (!Matcher::has_match_rule(Op_CmpUL)) {
3944 // No support for unsigned long comparisons
3945 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3946 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3947 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3948 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3949 Node* andl = new AndLNode(orl, remove_sign_mask);
3950 Node* cmp = new CmpLNode(andl, n->in(2));
3951 n->subsume_by(cmp, this);
3952 }
3953 break;
3954 }
3955 #ifdef ASSERT
3956 case Op_ValueTypePtr:
3957 case Op_ValueType: {
3958 n->dump(-1);
3959 assert(false, "value type node was not removed");
3960 break;
3961 }
3962 #endif
3963 case Op_GetNullFreeProperty: {
3964 // Extract the null free bits
3965 uint last = unique();
3966 Node* null_free = NULL;
3967 if (n->in(1)->Opcode() == Op_LoadKlass) {
3968 Node* cast = new CastP2XNode(NULL, n->in(1));
3969 null_free = new AndLNode(cast, new ConLNode(TypeLong::make(((jlong)1)<<(oopDesc::wide_storage_props_shift + ArrayStorageProperties::null_free_bit))));
3970 } else {
3971 assert(n->in(1)->Opcode() == Op_LoadNKlass, "not a compressed klass?");
3972 Node* cast = new CastN2INode(n->in(1));
3973 null_free = new AndINode(cast, new ConINode(TypeInt::make(1<<(oopDesc::narrow_storage_props_shift + ArrayStorageProperties::null_free_bit))));
3974 }
3975 n->replace_by(null_free);
3976 break;
3977 }
3978 default:
3979 assert(!n->is_Call(), "");
3980 assert(!n->is_Mem(), "");
3981 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3982 break;
3983 }
3984 }
3985
3986 //------------------------------final_graph_reshaping_walk---------------------
3987 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3988 // requires that the walk visits a node's inputs before visiting the node.
3989 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3990 ResourceArea *area = Thread::current()->resource_area();
3991 Unique_Node_List sfpt(area);
3992
3993 frc._visited.set(root->_idx); // first, mark node as visited
3994 uint cnt = root->req();
3995 Node *n = root;
3996 uint i = 0;
3997 while (true) {
|