1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "ci/ciReplay.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/exceptionHandlerTable.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "opto/addnode.hpp"
  36 #include "opto/block.hpp"
  37 #include "opto/c2compiler.hpp"
  38 #include "opto/callGenerator.hpp"
  39 #include "opto/callnode.hpp"
  40 #include "opto/cfgnode.hpp"
  41 #include "opto/chaitin.hpp"
  42 #include "opto/compile.hpp"
  43 #include "opto/connode.hpp"
  44 #include "opto/divnode.hpp"
  45 #include "opto/escape.hpp"
  46 #include "opto/idealGraphPrinter.hpp"
  47 #include "opto/loopnode.hpp"
  48 #include "opto/machnode.hpp"
  49 #include "opto/macro.hpp"
  50 #include "opto/matcher.hpp"
  51 #include "opto/mathexactnode.hpp"
  52 #include "opto/memnode.hpp"
  53 #include "opto/mulnode.hpp"
  54 #include "opto/narrowptrnode.hpp"
  55 #include "opto/node.hpp"
  56 #include "opto/opcodes.hpp"
  57 #include "opto/output.hpp"
  58 #include "opto/parse.hpp"
  59 #include "opto/phaseX.hpp"
  60 #include "opto/rootnode.hpp"
  61 #include "opto/runtime.hpp"
  62 #include "opto/stringopts.hpp"
  63 #include "opto/type.hpp"
  64 #include "opto/vectornode.hpp"
  65 #include "runtime/arguments.hpp"
  66 #include "runtime/signature.hpp"
  67 #include "runtime/stubRoutines.hpp"
  68 #include "runtime/timer.hpp"
  69 #include "trace/tracing.hpp"
  70 #include "utilities/copy.hpp"
  71 
  72 
  73 // -------------------- Compile::mach_constant_base_node -----------------------
  74 // Constant table base node singleton.
  75 MachConstantBaseNode* Compile::mach_constant_base_node() {
  76   if (_mach_constant_base_node == NULL) {
  77     _mach_constant_base_node = new MachConstantBaseNode();
  78     _mach_constant_base_node->add_req(C->root());
  79   }
  80   return _mach_constant_base_node;
  81 }
  82 
  83 
  84 /// Support for intrinsics.
  85 
  86 // Return the index at which m must be inserted (or already exists).
  87 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
  88 int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
  89 #ifdef ASSERT
  90   for (int i = 1; i < _intrinsics->length(); i++) {
  91     CallGenerator* cg1 = _intrinsics->at(i-1);
  92     CallGenerator* cg2 = _intrinsics->at(i);
  93     assert(cg1->method() != cg2->method()
  94            ? cg1->method()     < cg2->method()
  95            : cg1->is_virtual() < cg2->is_virtual(),
  96            "compiler intrinsics list must stay sorted");
  97   }
  98 #endif
  99   // Binary search sorted list, in decreasing intervals [lo, hi].
 100   int lo = 0, hi = _intrinsics->length()-1;
 101   while (lo <= hi) {
 102     int mid = (uint)(hi + lo) / 2;
 103     ciMethod* mid_m = _intrinsics->at(mid)->method();
 104     if (m < mid_m) {
 105       hi = mid-1;
 106     } else if (m > mid_m) {
 107       lo = mid+1;
 108     } else {
 109       // look at minor sort key
 110       bool mid_virt = _intrinsics->at(mid)->is_virtual();
 111       if (is_virtual < mid_virt) {
 112         hi = mid-1;
 113       } else if (is_virtual > mid_virt) {
 114         lo = mid+1;
 115       } else {
 116         return mid;  // exact match
 117       }
 118     }
 119   }
 120   return lo;  // inexact match
 121 }
 122 
 123 void Compile::register_intrinsic(CallGenerator* cg) {
 124   if (_intrinsics == NULL) {
 125     _intrinsics = new (comp_arena())GrowableArray<CallGenerator*>(comp_arena(), 60, 0, NULL);
 126   }
 127   // This code is stolen from ciObjectFactory::insert.
 128   // Really, GrowableArray should have methods for
 129   // insert_at, remove_at, and binary_search.
 130   int len = _intrinsics->length();
 131   int index = intrinsic_insertion_index(cg->method(), cg->is_virtual());
 132   if (index == len) {
 133     _intrinsics->append(cg);
 134   } else {
 135 #ifdef ASSERT
 136     CallGenerator* oldcg = _intrinsics->at(index);
 137     assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
 138 #endif
 139     _intrinsics->append(_intrinsics->at(len-1));
 140     int pos;
 141     for (pos = len-2; pos >= index; pos--) {
 142       _intrinsics->at_put(pos+1,_intrinsics->at(pos));
 143     }
 144     _intrinsics->at_put(index, cg);
 145   }
 146   assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
 147 }
 148 
 149 CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
 150   assert(m->is_loaded(), "don't try this on unloaded methods");
 151   if (_intrinsics != NULL) {
 152     int index = intrinsic_insertion_index(m, is_virtual);
 153     if (index < _intrinsics->length()
 154         && _intrinsics->at(index)->method() == m
 155         && _intrinsics->at(index)->is_virtual() == is_virtual) {
 156       return _intrinsics->at(index);
 157     }
 158   }
 159   // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
 160   if (m->intrinsic_id() != vmIntrinsics::_none &&
 161       m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
 162     CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
 163     if (cg != NULL) {
 164       // Save it for next time:
 165       register_intrinsic(cg);
 166       return cg;
 167     } else {
 168       gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
 169     }
 170   }
 171   return NULL;
 172 }
 173 
 174 // Compile:: register_library_intrinsics and make_vm_intrinsic are defined
 175 // in library_call.cpp.
 176 
 177 
 178 #ifndef PRODUCT
 179 // statistics gathering...
 180 
 181 juint  Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0};
 182 jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0};
 183 
 184 bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
 185   assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
 186   int oflags = _intrinsic_hist_flags[id];
 187   assert(flags != 0, "what happened?");
 188   if (is_virtual) {
 189     flags |= _intrinsic_virtual;
 190   }
 191   bool changed = (flags != oflags);
 192   if ((flags & _intrinsic_worked) != 0) {
 193     juint count = (_intrinsic_hist_count[id] += 1);
 194     if (count == 1) {
 195       changed = true;           // first time
 196     }
 197     // increment the overall count also:
 198     _intrinsic_hist_count[vmIntrinsics::_none] += 1;
 199   }
 200   if (changed) {
 201     if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
 202       // Something changed about the intrinsic's virtuality.
 203       if ((flags & _intrinsic_virtual) != 0) {
 204         // This is the first use of this intrinsic as a virtual call.
 205         if (oflags != 0) {
 206           // We already saw it as a non-virtual, so note both cases.
 207           flags |= _intrinsic_both;
 208         }
 209       } else if ((oflags & _intrinsic_both) == 0) {
 210         // This is the first use of this intrinsic as a non-virtual
 211         flags |= _intrinsic_both;
 212       }
 213     }
 214     _intrinsic_hist_flags[id] = (jubyte) (oflags | flags);
 215   }
 216   // update the overall flags also:
 217   _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags;
 218   return changed;
 219 }
 220 
 221 static char* format_flags(int flags, char* buf) {
 222   buf[0] = 0;
 223   if ((flags & Compile::_intrinsic_worked) != 0)    strcat(buf, ",worked");
 224   if ((flags & Compile::_intrinsic_failed) != 0)    strcat(buf, ",failed");
 225   if ((flags & Compile::_intrinsic_disabled) != 0)  strcat(buf, ",disabled");
 226   if ((flags & Compile::_intrinsic_virtual) != 0)   strcat(buf, ",virtual");
 227   if ((flags & Compile::_intrinsic_both) != 0)      strcat(buf, ",nonvirtual");
 228   if (buf[0] == 0)  strcat(buf, ",");
 229   assert(buf[0] == ',', "must be");
 230   return &buf[1];
 231 }
 232 
 233 void Compile::print_intrinsic_statistics() {
 234   char flagsbuf[100];
 235   ttyLocker ttyl;
 236   if (xtty != NULL)  xtty->head("statistics type='intrinsic'");
 237   tty->print_cr("Compiler intrinsic usage:");
 238   juint total = _intrinsic_hist_count[vmIntrinsics::_none];
 239   if (total == 0)  total = 1;  // avoid div0 in case of no successes
 240   #define PRINT_STAT_LINE(name, c, f) \
 241     tty->print_cr("  %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
 242   for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) {
 243     vmIntrinsics::ID id = (vmIntrinsics::ID) index;
 244     int   flags = _intrinsic_hist_flags[id];
 245     juint count = _intrinsic_hist_count[id];
 246     if ((flags | count) != 0) {
 247       PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
 248     }
 249   }
 250   PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf));
 251   if (xtty != NULL)  xtty->tail("statistics");
 252 }
 253 
 254 void Compile::print_statistics() {
 255   { ttyLocker ttyl;
 256     if (xtty != NULL)  xtty->head("statistics type='opto'");
 257     Parse::print_statistics();
 258     PhaseCCP::print_statistics();
 259     PhaseRegAlloc::print_statistics();
 260     Scheduling::print_statistics();
 261     PhasePeephole::print_statistics();
 262     PhaseIdealLoop::print_statistics();
 263     if (xtty != NULL)  xtty->tail("statistics");
 264   }
 265   if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) {
 266     // put this under its own <statistics> element.
 267     print_intrinsic_statistics();
 268   }
 269 }
 270 #endif //PRODUCT
 271 
 272 // Support for bundling info
 273 Bundle* Compile::node_bundling(const Node *n) {
 274   assert(valid_bundle_info(n), "oob");
 275   return &_node_bundling_base[n->_idx];
 276 }
 277 
 278 bool Compile::valid_bundle_info(const Node *n) {
 279   return (_node_bundling_limit > n->_idx);
 280 }
 281 
 282 
 283 void Compile::gvn_replace_by(Node* n, Node* nn) {
 284   for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
 285     Node* use = n->last_out(i);
 286     bool is_in_table = initial_gvn()->hash_delete(use);
 287     uint uses_found = 0;
 288     for (uint j = 0; j < use->len(); j++) {
 289       if (use->in(j) == n) {
 290         if (j < use->req())
 291           use->set_req(j, nn);
 292         else
 293           use->set_prec(j, nn);
 294         uses_found++;
 295       }
 296     }
 297     if (is_in_table) {
 298       // reinsert into table
 299       initial_gvn()->hash_find_insert(use);
 300     }
 301     record_for_igvn(use);
 302     i -= uses_found;    // we deleted 1 or more copies of this edge
 303   }
 304 }
 305 
 306 
 307 static inline bool not_a_node(const Node* n) {
 308   if (n == NULL)                   return true;
 309   if (((intptr_t)n & 1) != 0)      return true;  // uninitialized, etc.
 310   if (*(address*)n == badAddress)  return true;  // kill by Node::destruct
 311   return false;
 312 }
 313 
 314 // Identify all nodes that are reachable from below, useful.
 315 // Use breadth-first pass that records state in a Unique_Node_List,
 316 // recursive traversal is slower.
 317 void Compile::identify_useful_nodes(Unique_Node_List &useful) {
 318   int estimated_worklist_size = unique();
 319   useful.map( estimated_worklist_size, NULL );  // preallocate space
 320 
 321   // Initialize worklist
 322   if (root() != NULL)     { useful.push(root()); }
 323   // If 'top' is cached, declare it useful to preserve cached node
 324   if( cached_top_node() ) { useful.push(cached_top_node()); }
 325 
 326   // Push all useful nodes onto the list, breadthfirst
 327   for( uint next = 0; next < useful.size(); ++next ) {
 328     assert( next < unique(), "Unique useful nodes < total nodes");
 329     Node *n  = useful.at(next);
 330     uint max = n->len();
 331     for( uint i = 0; i < max; ++i ) {
 332       Node *m = n->in(i);
 333       if (not_a_node(m))  continue;
 334       useful.push(m);
 335     }
 336   }
 337 }
 338 
 339 // Update dead_node_list with any missing dead nodes using useful
 340 // list. Consider all non-useful nodes to be useless i.e., dead nodes.
 341 void Compile::update_dead_node_list(Unique_Node_List &useful) {
 342   uint max_idx = unique();
 343   VectorSet& useful_node_set = useful.member_set();
 344 
 345   for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
 346     // If node with index node_idx is not in useful set,
 347     // mark it as dead in dead node list.
 348     if (! useful_node_set.test(node_idx) ) {
 349       record_dead_node(node_idx);
 350     }
 351   }
 352 }
 353 
 354 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
 355   int shift = 0;
 356   for (int i = 0; i < inlines->length(); i++) {
 357     CallGenerator* cg = inlines->at(i);
 358     CallNode* call = cg->call_node();
 359     if (shift > 0) {
 360       inlines->at_put(i-shift, cg);
 361     }
 362     if (!useful.member(call)) {
 363       shift++;
 364     }
 365   }
 366   inlines->trunc_to(inlines->length()-shift);
 367 }
 368 
 369 // Disconnect all useless nodes by disconnecting those at the boundary.
 370 void Compile::remove_useless_nodes(Unique_Node_List &useful) {
 371   uint next = 0;
 372   while (next < useful.size()) {
 373     Node *n = useful.at(next++);
 374     if (n->is_SafePoint()) {
 375       // We're done with a parsing phase. Replaced nodes are not valid
 376       // beyond that point.
 377       n->as_SafePoint()->delete_replaced_nodes();
 378     }
 379     // Use raw traversal of out edges since this code removes out edges
 380     int max = n->outcnt();
 381     for (int j = 0; j < max; ++j) {
 382       Node* child = n->raw_out(j);
 383       if (! useful.member(child)) {
 384         assert(!child->is_top() || child != top(),
 385                "If top is cached in Compile object it is in useful list");
 386         // Only need to remove this out-edge to the useless node
 387         n->raw_del_out(j);
 388         --j;
 389         --max;
 390       }
 391     }
 392     if (n->outcnt() == 1 && n->has_special_unique_user()) {
 393       record_for_igvn(n->unique_out());
 394     }
 395   }
 396   // Remove useless macro and predicate opaq nodes
 397   for (int i = C->macro_count()-1; i >= 0; i--) {
 398     Node* n = C->macro_node(i);
 399     if (!useful.member(n)) {
 400       remove_macro_node(n);
 401     }
 402   }
 403   // Remove useless expensive node
 404   for (int i = C->expensive_count()-1; i >= 0; i--) {
 405     Node* n = C->expensive_node(i);
 406     if (!useful.member(n)) {
 407       remove_expensive_node(n);
 408     }
 409   }
 410   // clean up the late inline lists
 411   remove_useless_late_inlines(&_string_late_inlines, useful);
 412   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 413   remove_useless_late_inlines(&_late_inlines, useful);
 414   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 415 }
 416 
 417 //------------------------------frame_size_in_words-----------------------------
 418 // frame_slots in units of words
 419 int Compile::frame_size_in_words() const {
 420   // shift is 0 in LP32 and 1 in LP64
 421   const int shift = (LogBytesPerWord - LogBytesPerInt);
 422   int words = _frame_slots >> shift;
 423   assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
 424   return words;
 425 }
 426 
 427 // To bang the stack of this compiled method we use the stack size
 428 // that the interpreter would need in case of a deoptimization. This
 429 // removes the need to bang the stack in the deoptimization blob which
 430 // in turn simplifies stack overflow handling.
 431 int Compile::bang_size_in_bytes() const {
 432   return MAX2(_interpreter_frame_size, frame_size_in_bytes());
 433 }
 434 
 435 // ============================================================================
 436 //------------------------------CompileWrapper---------------------------------
 437 class CompileWrapper : public StackObj {
 438   Compile *const _compile;
 439  public:
 440   CompileWrapper(Compile* compile);
 441 
 442   ~CompileWrapper();
 443 };
 444 
 445 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
 446   // the Compile* pointer is stored in the current ciEnv:
 447   ciEnv* env = compile->env();
 448   assert(env == ciEnv::current(), "must already be a ciEnv active");
 449   assert(env->compiler_data() == NULL, "compile already active?");
 450   env->set_compiler_data(compile);
 451   assert(compile == Compile::current(), "sanity");
 452 
 453   compile->set_type_dict(NULL);
 454   compile->set_type_hwm(NULL);
 455   compile->set_type_last_size(0);
 456   compile->set_last_tf(NULL, NULL);
 457   compile->set_indexSet_arena(NULL);
 458   compile->set_indexSet_free_block_list(NULL);
 459   compile->init_type_arena();
 460   Type::Initialize(compile);
 461   _compile->set_scratch_buffer_blob(NULL);
 462   _compile->begin_method();
 463 }
 464 CompileWrapper::~CompileWrapper() {
 465   _compile->end_method();
 466   if (_compile->scratch_buffer_blob() != NULL)
 467     BufferBlob::free(_compile->scratch_buffer_blob());
 468   _compile->env()->set_compiler_data(NULL);
 469 }
 470 
 471 
 472 //----------------------------print_compile_messages---------------------------
 473 void Compile::print_compile_messages() {
 474 #ifndef PRODUCT
 475   // Check if recompiling
 476   if (_subsume_loads == false && PrintOpto) {
 477     // Recompiling without allowing machine instructions to subsume loads
 478     tty->print_cr("*********************************************************");
 479     tty->print_cr("** Bailout: Recompile without subsuming loads          **");
 480     tty->print_cr("*********************************************************");
 481   }
 482   if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) {
 483     // Recompiling without escape analysis
 484     tty->print_cr("*********************************************************");
 485     tty->print_cr("** Bailout: Recompile without escape analysis          **");
 486     tty->print_cr("*********************************************************");
 487   }
 488   if (_eliminate_boxing != EliminateAutoBox && PrintOpto) {
 489     // Recompiling without boxing elimination
 490     tty->print_cr("*********************************************************");
 491     tty->print_cr("** Bailout: Recompile without boxing elimination       **");
 492     tty->print_cr("*********************************************************");
 493   }
 494   if (env()->break_at_compile()) {
 495     // Open the debugger when compiling this method.
 496     tty->print("### Breaking when compiling: ");
 497     method()->print_short_name();
 498     tty->cr();
 499     BREAKPOINT;
 500   }
 501 
 502   if( PrintOpto ) {
 503     if (is_osr_compilation()) {
 504       tty->print("[OSR]%3d", _compile_id);
 505     } else {
 506       tty->print("%3d", _compile_id);
 507     }
 508   }
 509 #endif
 510 }
 511 
 512 
 513 //-----------------------init_scratch_buffer_blob------------------------------
 514 // Construct a temporary BufferBlob and cache it for this compile.
 515 void Compile::init_scratch_buffer_blob(int const_size) {
 516   // If there is already a scratch buffer blob allocated and the
 517   // constant section is big enough, use it.  Otherwise free the
 518   // current and allocate a new one.
 519   BufferBlob* blob = scratch_buffer_blob();
 520   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
 521     // Use the current blob.
 522   } else {
 523     if (blob != NULL) {
 524       BufferBlob::free(blob);
 525     }
 526 
 527     ResourceMark rm;
 528     _scratch_const_size = const_size;
 529     int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
 530     blob = BufferBlob::create("Compile::scratch_buffer", size);
 531     // Record the buffer blob for next time.
 532     set_scratch_buffer_blob(blob);
 533     // Have we run out of code space?
 534     if (scratch_buffer_blob() == NULL) {
 535       // Let CompilerBroker disable further compilations.
 536       record_failure("Not enough space for scratch buffer in CodeCache");
 537       return;
 538     }
 539   }
 540 
 541   // Initialize the relocation buffers
 542   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
 543   set_scratch_locs_memory(locs_buf);
 544 }
 545 
 546 
 547 //-----------------------scratch_emit_size-------------------------------------
 548 // Helper function that computes size by emitting code
 549 uint Compile::scratch_emit_size(const Node* n) {
 550   // Start scratch_emit_size section.
 551   set_in_scratch_emit_size(true);
 552 
 553   // Emit into a trash buffer and count bytes emitted.
 554   // This is a pretty expensive way to compute a size,
 555   // but it works well enough if seldom used.
 556   // All common fixed-size instructions are given a size
 557   // method by the AD file.
 558   // Note that the scratch buffer blob and locs memory are
 559   // allocated at the beginning of the compile task, and
 560   // may be shared by several calls to scratch_emit_size.
 561   // The allocation of the scratch buffer blob is particularly
 562   // expensive, since it has to grab the code cache lock.
 563   BufferBlob* blob = this->scratch_buffer_blob();
 564   assert(blob != NULL, "Initialize BufferBlob at start");
 565   assert(blob->size() > MAX_inst_size, "sanity");
 566   relocInfo* locs_buf = scratch_locs_memory();
 567   address blob_begin = blob->content_begin();
 568   address blob_end   = (address)locs_buf;
 569   assert(blob->content_contains(blob_end), "sanity");
 570   CodeBuffer buf(blob_begin, blob_end - blob_begin);
 571   buf.initialize_consts_size(_scratch_const_size);
 572   buf.initialize_stubs_size(MAX_stubs_size);
 573   assert(locs_buf != NULL, "sanity");
 574   int lsize = MAX_locs_size / 3;
 575   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
 576   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
 577   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
 578 
 579   // Do the emission.
 580 
 581   Label fakeL; // Fake label for branch instructions.
 582   Label*   saveL = NULL;
 583   uint save_bnum = 0;
 584   bool is_branch = n->is_MachBranch();
 585   if (is_branch) {
 586     MacroAssembler masm(&buf);
 587     masm.bind(fakeL);
 588     n->as_MachBranch()->save_label(&saveL, &save_bnum);
 589     n->as_MachBranch()->label_set(&fakeL, 0);
 590   }
 591   n->emit(buf, this->regalloc());
 592   if (is_branch) // Restore label.
 593     n->as_MachBranch()->label_set(saveL, save_bnum);
 594 
 595   // End scratch_emit_size section.
 596   set_in_scratch_emit_size(false);
 597 
 598   return buf.insts_size();
 599 }
 600 
 601 
 602 // ============================================================================
 603 //------------------------------Compile standard-------------------------------
 604 debug_only( int Compile::_debug_idx = 100000; )
 605 
 606 // Compile a method.  entry_bci is -1 for normal compilations and indicates
 607 // the continuation bci for on stack replacement.
 608 
 609 
 610 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
 611                   bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing )
 612                 : Phase(Compiler),
 613                   _env(ci_env),
 614                   _log(ci_env->log()),
 615                   _compile_id(ci_env->compile_id()),
 616                   _save_argument_registers(false),
 617                   _stub_name(NULL),
 618                   _stub_function(NULL),
 619                   _stub_entry_point(NULL),
 620                   _method(target),
 621                   _entry_bci(osr_bci),
 622                   _initial_gvn(NULL),
 623                   _for_igvn(NULL),
 624                   _warm_calls(NULL),
 625                   _subsume_loads(subsume_loads),
 626                   _do_escape_analysis(do_escape_analysis),
 627                   _eliminate_boxing(eliminate_boxing),
 628                   _failure_reason(NULL),
 629                   _code_buffer("Compile::Fill_buffer"),
 630                   _orig_pc_slot(0),
 631                   _orig_pc_slot_offset_in_bytes(0),
 632                   _has_method_handle_invokes(false),
 633                   _mach_constant_base_node(NULL),
 634                   _node_bundling_limit(0),
 635                   _node_bundling_base(NULL),
 636                   _java_calls(0),
 637                   _inner_loops(0),
 638                   _scratch_const_size(-1),
 639                   _in_scratch_emit_size(false),
 640                   _dead_node_list(comp_arena()),
 641                   _dead_node_count(0),
 642 #ifndef PRODUCT
 643                   _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
 644                   _in_dump_cnt(0),
 645                   _printer(IdealGraphPrinter::printer()),
 646 #endif
 647                   _congraph(NULL),
 648                   _replay_inline_data(NULL),
 649                   _late_inlines(comp_arena(), 2, 0, NULL),
 650                   _string_late_inlines(comp_arena(), 2, 0, NULL),
 651                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
 652                   _late_inlines_pos(0),
 653                   _number_of_mh_late_inlines(0),
 654                   _inlining_progress(false),
 655                   _inlining_incrementally(false),
 656                   _print_inlining_list(NULL),
 657                   _print_inlining_stream(NULL),
 658                   _print_inlining_idx(0),
 659                   _print_inlining_output(NULL),
 660                   _interpreter_frame_size(0) {
 661   C = this;
 662 
 663   CompileWrapper cw(this);
 664 #ifndef PRODUCT
 665   if (TimeCompiler2) {
 666     tty->print(" ");
 667     target->holder()->name()->print();
 668     tty->print(".");
 669     target->print_short_name();
 670     tty->print("  ");
 671   }
 672   TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
 673   TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);
 674   bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
 675   if (!print_opto_assembly) {
 676     bool print_assembly = (PrintAssembly || _method->should_print_assembly());
 677     if (print_assembly && !Disassembler::can_decode()) {
 678       tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
 679       print_opto_assembly = true;
 680     }
 681   }
 682   set_print_assembly(print_opto_assembly);
 683   set_parsed_irreducible_loop(false);
 684 
 685   if (method()->has_option("ReplayInline")) {
 686     _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
 687   }
 688 #endif
 689   set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
 690   set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
 691   set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
 692 
 693   if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {
 694     // Make sure the method being compiled gets its own MDO,
 695     // so we can at least track the decompile_count().
 696     // Need MDO to record RTM code generation state.
 697     method()->ensure_method_data();
 698   }
 699 
 700   Init(::AliasLevel);
 701 
 702 
 703   print_compile_messages();
 704 
 705   _ilt = InlineTree::build_inline_tree_root();
 706 
 707   // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
 708   assert(num_alias_types() >= AliasIdxRaw, "");
 709 
 710 #define MINIMUM_NODE_HASH  1023
 711   // Node list that Iterative GVN will start with
 712   Unique_Node_List for_igvn(comp_arena());
 713   set_for_igvn(&for_igvn);
 714 
 715   // GVN that will be run immediately on new nodes
 716   uint estimated_size = method()->code_size()*4+64;
 717   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 718   PhaseGVN gvn(node_arena(), estimated_size);
 719   set_initial_gvn(&gvn);
 720 
 721   print_inlining_init();
 722   { // Scope for timing the parser
 723     TracePhase t3("parse", &_t_parser, true);
 724 
 725     // Put top into the hash table ASAP.
 726     initial_gvn()->transform_no_reclaim(top());
 727 
 728     // Set up tf(), start(), and find a CallGenerator.
 729     CallGenerator* cg = NULL;
 730     if (is_osr_compilation()) {
 731       const TypeTuple *domain = StartOSRNode::osr_domain();
 732       const TypeTuple *range = TypeTuple::make_range(method()->signature());
 733       init_tf(TypeFunc::make(domain, range));
 734       StartNode* s = new StartOSRNode(root(), domain);
 735       initial_gvn()->set_type_bottom(s);
 736       init_start(s);
 737       cg = CallGenerator::for_osr(method(), entry_bci());
 738     } else {
 739       // Normal case.
 740       init_tf(TypeFunc::make(method()));
 741       StartNode* s = new StartNode(root(), tf()->domain());
 742       initial_gvn()->set_type_bottom(s);
 743       init_start(s);
 744       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
 745         // With java.lang.ref.reference.get() we must go through the
 746         // intrinsic when G1 is enabled - even when get() is the root
 747         // method of the compile - so that, if necessary, the value in
 748         // the referent field of the reference object gets recorded by
 749         // the pre-barrier code.
 750         // Specifically, if G1 is enabled, the value in the referent
 751         // field is recorded by the G1 SATB pre barrier. This will
 752         // result in the referent being marked live and the reference
 753         // object removed from the list of discovered references during
 754         // reference processing.
 755         cg = find_intrinsic(method(), false);
 756       }
 757       if (cg == NULL) {
 758         float past_uses = method()->interpreter_invocation_count();
 759         float expected_uses = past_uses;
 760         cg = CallGenerator::for_inline(method(), expected_uses);
 761       }
 762     }
 763     if (failing())  return;
 764     if (cg == NULL) {
 765       record_method_not_compilable_all_tiers("cannot parse method");
 766       return;
 767     }
 768     JVMState* jvms = build_start_state(start(), tf());
 769     if ((jvms = cg->generate(jvms)) == NULL) {
 770       record_method_not_compilable("method parse failed");
 771       return;
 772     }
 773     GraphKit kit(jvms);
 774 
 775     if (!kit.stopped()) {
 776       // Accept return values, and transfer control we know not where.
 777       // This is done by a special, unique ReturnNode bound to root.
 778       return_values(kit.jvms());
 779     }
 780 
 781     if (kit.has_exceptions()) {
 782       // Any exceptions that escape from this call must be rethrown
 783       // to whatever caller is dynamically above us on the stack.
 784       // This is done by a special, unique RethrowNode bound to root.
 785       rethrow_exceptions(kit.transfer_exceptions_into_jvms());
 786     }
 787 
 788     assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
 789 
 790     if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) {
 791       inline_string_calls(true);
 792     }
 793 
 794     if (failing())  return;
 795 
 796     print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
 797 
 798     // Remove clutter produced by parsing.
 799     if (!failing()) {
 800       ResourceMark rm;
 801       PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
 802     }
 803   }
 804 
 805   // Note:  Large methods are capped off in do_one_bytecode().
 806   if (failing())  return;
 807 
 808   // After parsing, node notes are no longer automagic.
 809   // They must be propagated by register_new_node_with_optimizer(),
 810   // clone(), or the like.
 811   set_default_node_notes(NULL);
 812 
 813   for (;;) {
 814     int successes = Inline_Warm();
 815     if (failing())  return;
 816     if (successes == 0)  break;
 817   }
 818 
 819   // Drain the list.
 820   Finish_Warm();
 821 #ifndef PRODUCT
 822   if (_printer) {
 823     _printer->print_inlining(this);
 824   }
 825 #endif
 826 
 827   if (failing())  return;
 828   NOT_PRODUCT( verify_graph_edges(); )
 829 
 830   // Now optimize
 831   Optimize();
 832   if (failing())  return;
 833   NOT_PRODUCT( verify_graph_edges(); )
 834 
 835 #ifndef PRODUCT
 836   if (PrintIdeal) {
 837     ttyLocker ttyl;  // keep the following output all in one block
 838     // This output goes directly to the tty, not the compiler log.
 839     // To enable tools to match it up with the compilation activity,
 840     // be sure to tag this tty output with the compile ID.
 841     if (xtty != NULL) {
 842       xtty->head("ideal compile_id='%d'%s", compile_id(),
 843                  is_osr_compilation()    ? " compile_kind='osr'" :
 844                  "");
 845     }
 846     root()->dump(9999);
 847     if (xtty != NULL) {
 848       xtty->tail("ideal");
 849     }
 850   }
 851 #endif
 852 
 853   NOT_PRODUCT( verify_barriers(); )
 854 
 855   // Dump compilation data to replay it.
 856   if (method()->has_option("DumpReplay")) {
 857     env()->dump_replay_data(_compile_id);
 858   }
 859   if (method()->has_option("DumpInline") && (ilt() != NULL)) {
 860     env()->dump_inline_data(_compile_id);
 861   }
 862 
 863   // Now that we know the size of all the monitors we can add a fixed slot
 864   // for the original deopt pc.
 865 
 866   _orig_pc_slot =  fixed_slots();
 867   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
 868   set_fixed_slots(next_slot);
 869 
 870   // Compute when to use implicit null checks. Used by matching trap based
 871   // nodes and NullCheck optimization.
 872   set_allowed_deopt_reasons();
 873 
 874   // Now generate code
 875   Code_Gen();
 876   if (failing())  return;
 877 
 878   // Check if we want to skip execution of all compiled code.
 879   {
 880 #ifndef PRODUCT
 881     if (OptoNoExecute) {
 882       record_method_not_compilable("+OptoNoExecute");  // Flag as failed
 883       return;
 884     }
 885     TracePhase t2("install_code", &_t_registerMethod, TimeCompiler);
 886 #endif
 887 
 888     if (is_osr_compilation()) {
 889       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
 890       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
 891     } else {
 892       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
 893       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
 894     }
 895 
 896     env()->register_method(_method, _entry_bci,
 897                            &_code_offsets,
 898                            _orig_pc_slot_offset_in_bytes,
 899                            code_buffer(),
 900                            frame_size_in_words(), _oop_map_set,
 901                            &_handler_table, &_inc_table,
 902                            compiler,
 903                            env()->comp_level(),
 904                            has_unsafe_access(),
 905                            SharedRuntime::is_wide_vector(max_vector_size()),
 906                            rtm_state()
 907                            );
 908 
 909     if (log() != NULL) // Print code cache state into compiler log
 910       log()->code_cache_state();
 911   }
 912 }
 913 
 914 //------------------------------Compile----------------------------------------
 915 // Compile a runtime stub
 916 Compile::Compile( ciEnv* ci_env,
 917                   TypeFunc_generator generator,
 918                   address stub_function,
 919                   const char *stub_name,
 920                   int is_fancy_jump,
 921                   bool pass_tls,
 922                   bool save_arg_registers,
 923                   bool return_pc )
 924   : Phase(Compiler),
 925     _env(ci_env),
 926     _log(ci_env->log()),
 927     _compile_id(0),
 928     _save_argument_registers(save_arg_registers),
 929     _method(NULL),
 930     _stub_name(stub_name),
 931     _stub_function(stub_function),
 932     _stub_entry_point(NULL),
 933     _entry_bci(InvocationEntryBci),
 934     _initial_gvn(NULL),
 935     _for_igvn(NULL),
 936     _warm_calls(NULL),
 937     _orig_pc_slot(0),
 938     _orig_pc_slot_offset_in_bytes(0),
 939     _subsume_loads(true),
 940     _do_escape_analysis(false),
 941     _eliminate_boxing(false),
 942     _failure_reason(NULL),
 943     _code_buffer("Compile::Fill_buffer"),
 944     _has_method_handle_invokes(false),
 945     _mach_constant_base_node(NULL),
 946     _node_bundling_limit(0),
 947     _node_bundling_base(NULL),
 948     _java_calls(0),
 949     _inner_loops(0),
 950 #ifndef PRODUCT
 951     _trace_opto_output(TraceOptoOutput),
 952     _in_dump_cnt(0),
 953     _printer(NULL),
 954 #endif
 955     _dead_node_list(comp_arena()),
 956     _dead_node_count(0),
 957     _congraph(NULL),
 958     _replay_inline_data(NULL),
 959     _number_of_mh_late_inlines(0),
 960     _inlining_progress(false),
 961     _inlining_incrementally(false),
 962     _print_inlining_list(NULL),
 963     _print_inlining_stream(NULL),
 964     _print_inlining_idx(0),
 965     _print_inlining_output(NULL),
 966     _allowed_reasons(0),
 967     _interpreter_frame_size(0) {
 968   C = this;
 969 
 970 #ifndef PRODUCT
 971   TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
 972   TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
 973   set_print_assembly(PrintFrameConverterAssembly);
 974   set_parsed_irreducible_loop(false);
 975 #endif
 976   set_has_irreducible_loop(false); // no loops
 977 
 978   CompileWrapper cw(this);
 979   Init(/*AliasLevel=*/ 0);
 980   init_tf((*generator)());
 981 
 982   {
 983     // The following is a dummy for the sake of GraphKit::gen_stub
 984     Unique_Node_List for_igvn(comp_arena());
 985     set_for_igvn(&for_igvn);  // not used, but some GraphKit guys push on this
 986     PhaseGVN gvn(Thread::current()->resource_area(),255);
 987     set_initial_gvn(&gvn);    // not significant, but GraphKit guys use it pervasively
 988     gvn.transform_no_reclaim(top());
 989 
 990     GraphKit kit;
 991     kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
 992   }
 993 
 994   NOT_PRODUCT( verify_graph_edges(); )
 995   Code_Gen();
 996   if (failing())  return;
 997 
 998 
 999   // Entry point will be accessed using compile->stub_entry_point();
1000   if (code_buffer() == NULL) {
1001     Matcher::soft_match_failure();
1002   } else {
1003     if (PrintAssembly && (WizardMode || Verbose))
1004       tty->print_cr("### Stub::%s", stub_name);
1005 
1006     if (!failing()) {
1007       assert(_fixed_slots == 0, "no fixed slots used for runtime stubs");
1008 
1009       // Make the NMethod
1010       // For now we mark the frame as never safe for profile stackwalking
1011       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
1012                                                       code_buffer(),
1013                                                       CodeOffsets::frame_never_safe,
1014                                                       // _code_offsets.value(CodeOffsets::Frame_Complete),
1015                                                       frame_size_in_words(),
1016                                                       _oop_map_set,
1017                                                       save_arg_registers);
1018       assert(rs != NULL && rs->is_runtime_stub(), "sanity check");
1019 
1020       _stub_entry_point = rs->entry_point();
1021     }
1022   }
1023 }
1024 
1025 //------------------------------Init-------------------------------------------
1026 // Prepare for a single compilation
1027 void Compile::Init(int aliaslevel) {
1028   _unique  = 0;
1029   _regalloc = NULL;
1030 
1031   _tf      = NULL;  // filled in later
1032   _top     = NULL;  // cached later
1033   _matcher = NULL;  // filled in later
1034   _cfg     = NULL;  // filled in later
1035 
1036   set_24_bit_selection_and_mode(Use24BitFP, false);
1037 
1038   _node_note_array = NULL;
1039   _default_node_notes = NULL;
1040 
1041   _immutable_memory = NULL; // filled in at first inquiry
1042 
1043   // Globally visible Nodes
1044   // First set TOP to NULL to give safe behavior during creation of RootNode
1045   set_cached_top_node(NULL);
1046   set_root(new RootNode());
1047   // Now that you have a Root to point to, create the real TOP
1048   set_cached_top_node( new ConNode(Type::TOP) );
1049   set_recent_alloc(NULL, NULL);
1050 
1051   // Create Debug Information Recorder to record scopes, oopmaps, etc.
1052   env()->set_oop_recorder(new OopRecorder(env()->arena()));
1053   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1054   env()->set_dependencies(new Dependencies(env()));
1055 
1056   _fixed_slots = 0;
1057   set_has_split_ifs(false);
1058   set_has_loops(has_method() && method()->has_loops()); // first approximation
1059   set_has_stringbuilder(false);
1060   set_has_boxed_value(false);
1061   _trap_can_recompile = false;  // no traps emitted yet
1062   _major_progress = true; // start out assuming good things will happen
1063   set_has_unsafe_access(false);
1064   set_max_vector_size(0);
1065   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1066   set_decompile_count(0);
1067 
1068   set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
1069   set_num_loop_opts(LoopOptsCount);
1070   set_do_inlining(Inline);
1071   set_max_inline_size(MaxInlineSize);
1072   set_freq_inline_size(FreqInlineSize);
1073   set_do_scheduling(OptoScheduling);
1074   set_do_count_invocations(false);
1075   set_do_method_data_update(false);
1076   set_age_code(has_method() && method()->profile_aging());
1077   set_rtm_state(NoRTM); // No RTM lock eliding by default
1078 #if INCLUDE_RTM_OPT
1079   if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) {
1080     int rtm_state = method()->method_data()->rtm_state();
1081     if (method_has_option("NoRTMLockEliding") || ((rtm_state & NoRTM) != 0)) {
1082       // Don't generate RTM lock eliding code.
1083       set_rtm_state(NoRTM);
1084     } else if (method_has_option("UseRTMLockEliding") || ((rtm_state & UseRTM) != 0) || !UseRTMDeopt) {
1085       // Generate RTM lock eliding code without abort ratio calculation code.
1086       set_rtm_state(UseRTM);
1087     } else if (UseRTMDeopt) {
1088       // Generate RTM lock eliding code and include abort ratio calculation
1089       // code if UseRTMDeopt is on.
1090       set_rtm_state(ProfileRTM);
1091     }
1092   }
1093 #endif
1094   if (debug_info()->recording_non_safepoints()) {
1095     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1096                         (comp_arena(), 8, 0, NULL));
1097     set_default_node_notes(Node_Notes::make(this));
1098   }
1099 
1100   // // -- Initialize types before each compile --
1101   // // Update cached type information
1102   // if( _method && _method->constants() )
1103   //   Type::update_loaded_types(_method, _method->constants());
1104 
1105   // Init alias_type map.
1106   if (!_do_escape_analysis && aliaslevel == 3)
1107     aliaslevel = 2;  // No unique types without escape analysis
1108   _AliasLevel = aliaslevel;
1109   const int grow_ats = 16;
1110   _max_alias_types = grow_ats;
1111   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1112   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1113   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1114   {
1115     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1116   }
1117   // Initialize the first few types.
1118   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1119   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1120   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1121   _num_alias_types = AliasIdxRaw+1;
1122   // Zero out the alias type cache.
1123   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1124   // A NULL adr_type hits in the cache right away.  Preload the right answer.
1125   probe_alias_cache(NULL)->_index = AliasIdxTop;
1126 
1127   _intrinsics = NULL;
1128   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1129   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1130   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1131   register_library_intrinsics();
1132 }
1133 
1134 //---------------------------init_start----------------------------------------
1135 // Install the StartNode on this compile object.
1136 void Compile::init_start(StartNode* s) {
1137   if (failing())
1138     return; // already failing
1139   assert(s == start(), "");
1140 }
1141 
1142 StartNode* Compile::start() const {
1143   assert(!failing(), "");
1144   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1145     Node* start = root()->fast_out(i);
1146     if( start->is_Start() )
1147       return start->as_Start();
1148   }
1149   fatal("Did not find Start node!");
1150   return NULL;
1151 }
1152 
1153 //-------------------------------immutable_memory-------------------------------------
1154 // Access immutable memory
1155 Node* Compile::immutable_memory() {
1156   if (_immutable_memory != NULL) {
1157     return _immutable_memory;
1158   }
1159   StartNode* s = start();
1160   for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
1161     Node *p = s->fast_out(i);
1162     if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
1163       _immutable_memory = p;
1164       return _immutable_memory;
1165     }
1166   }
1167   ShouldNotReachHere();
1168   return NULL;
1169 }
1170 
1171 //----------------------set_cached_top_node------------------------------------
1172 // Install the cached top node, and make sure Node::is_top works correctly.
1173 void Compile::set_cached_top_node(Node* tn) {
1174   if (tn != NULL)  verify_top(tn);
1175   Node* old_top = _top;
1176   _top = tn;
1177   // Calling Node::setup_is_top allows the nodes the chance to adjust
1178   // their _out arrays.
1179   if (_top != NULL)     _top->setup_is_top();
1180   if (old_top != NULL)  old_top->setup_is_top();
1181   assert(_top == NULL || top()->is_top(), "");
1182 }
1183 
1184 #ifdef ASSERT
1185 uint Compile::count_live_nodes_by_graph_walk() {
1186   Unique_Node_List useful(comp_arena());
1187   // Get useful node list by walking the graph.
1188   identify_useful_nodes(useful);
1189   return useful.size();
1190 }
1191 
1192 void Compile::print_missing_nodes() {
1193 
1194   // Return if CompileLog is NULL and PrintIdealNodeCount is false.
1195   if ((_log == NULL) && (! PrintIdealNodeCount)) {
1196     return;
1197   }
1198 
1199   // This is an expensive function. It is executed only when the user
1200   // specifies VerifyIdealNodeCount option or otherwise knows the
1201   // additional work that needs to be done to identify reachable nodes
1202   // by walking the flow graph and find the missing ones using
1203   // _dead_node_list.
1204 
1205   Unique_Node_List useful(comp_arena());
1206   // Get useful node list by walking the graph.
1207   identify_useful_nodes(useful);
1208 
1209   uint l_nodes = C->live_nodes();
1210   uint l_nodes_by_walk = useful.size();
1211 
1212   if (l_nodes != l_nodes_by_walk) {
1213     if (_log != NULL) {
1214       _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
1215       _log->stamp();
1216       _log->end_head();
1217     }
1218     VectorSet& useful_member_set = useful.member_set();
1219     int last_idx = l_nodes_by_walk;
1220     for (int i = 0; i < last_idx; i++) {
1221       if (useful_member_set.test(i)) {
1222         if (_dead_node_list.test(i)) {
1223           if (_log != NULL) {
1224             _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
1225           }
1226           if (PrintIdealNodeCount) {
1227             // Print the log message to tty
1228               tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
1229               useful.at(i)->dump();
1230           }
1231         }
1232       }
1233       else if (! _dead_node_list.test(i)) {
1234         if (_log != NULL) {
1235           _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
1236         }
1237         if (PrintIdealNodeCount) {
1238           // Print the log message to tty
1239           tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
1240         }
1241       }
1242     }
1243     if (_log != NULL) {
1244       _log->tail("mismatched_nodes");
1245     }
1246   }
1247 }
1248 #endif
1249 
1250 #ifndef PRODUCT
1251 void Compile::verify_top(Node* tn) const {
1252   if (tn != NULL) {
1253     assert(tn->is_Con(), "top node must be a constant");
1254     assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
1255     assert(tn->in(0) != NULL, "must have live top node");
1256   }
1257 }
1258 #endif
1259 
1260 
1261 ///-------------------Managing Per-Node Debug & Profile Info-------------------
1262 
1263 void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
1264   guarantee(arr != NULL, "");
1265   int num_blocks = arr->length();
1266   if (grow_by < num_blocks)  grow_by = num_blocks;
1267   int num_notes = grow_by * _node_notes_block_size;
1268   Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
1269   Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
1270   while (num_notes > 0) {
1271     arr->append(notes);
1272     notes     += _node_notes_block_size;
1273     num_notes -= _node_notes_block_size;
1274   }
1275   assert(num_notes == 0, "exact multiple, please");
1276 }
1277 
1278 bool Compile::copy_node_notes_to(Node* dest, Node* source) {
1279   if (source == NULL || dest == NULL)  return false;
1280 
1281   if (dest->is_Con())
1282     return false;               // Do not push debug info onto constants.
1283 
1284 #ifdef ASSERT
1285   // Leave a bread crumb trail pointing to the original node:
1286   if (dest != NULL && dest != source && dest->debug_orig() == NULL) {
1287     dest->set_debug_orig(source);
1288   }
1289 #endif
1290 
1291   if (node_note_array() == NULL)
1292     return false;               // Not collecting any notes now.
1293 
1294   // This is a copy onto a pre-existing node, which may already have notes.
1295   // If both nodes have notes, do not overwrite any pre-existing notes.
1296   Node_Notes* source_notes = node_notes_at(source->_idx);
1297   if (source_notes == NULL || source_notes->is_clear())  return false;
1298   Node_Notes* dest_notes   = node_notes_at(dest->_idx);
1299   if (dest_notes == NULL || dest_notes->is_clear()) {
1300     return set_node_notes_at(dest->_idx, source_notes);
1301   }
1302 
1303   Node_Notes merged_notes = (*source_notes);
1304   // The order of operations here ensures that dest notes will win...
1305   merged_notes.update_from(dest_notes);
1306   return set_node_notes_at(dest->_idx, &merged_notes);
1307 }
1308 
1309 
1310 //--------------------------allow_range_check_smearing-------------------------
1311 // Gating condition for coalescing similar range checks.
1312 // Sometimes we try 'speculatively' replacing a series of a range checks by a
1313 // single covering check that is at least as strong as any of them.
1314 // If the optimization succeeds, the simplified (strengthened) range check
1315 // will always succeed.  If it fails, we will deopt, and then give up
1316 // on the optimization.
1317 bool Compile::allow_range_check_smearing() const {
1318   // If this method has already thrown a range-check,
1319   // assume it was because we already tried range smearing
1320   // and it failed.
1321   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1322   return !already_trapped;
1323 }
1324 
1325 
1326 //------------------------------flatten_alias_type-----------------------------
1327 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1328   int offset = tj->offset();
1329   TypePtr::PTR ptr = tj->ptr();
1330 
1331   // Known instance (scalarizable allocation) alias only with itself.
1332   bool is_known_inst = tj->isa_oopptr() != NULL &&
1333                        tj->is_oopptr()->is_known_instance();
1334 
1335   // Process weird unsafe references.
1336   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1337     assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
1338     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1339     tj = TypeOopPtr::BOTTOM;
1340     ptr = tj->ptr();
1341     offset = tj->offset();
1342   }
1343 
1344   // Array pointers need some flattening
1345   const TypeAryPtr *ta = tj->isa_aryptr();
1346   if (ta && ta->is_stable()) {
1347     // Erase stability property for alias analysis.
1348     tj = ta = ta->cast_to_stable(false);
1349   }
1350   if( ta && is_known_inst ) {
1351     if ( offset != Type::OffsetBot &&
1352          offset > arrayOopDesc::length_offset_in_bytes() ) {
1353       offset = Type::OffsetBot; // Flatten constant access into array body only
1354       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
1355     }
1356   } else if( ta && _AliasLevel >= 2 ) {
1357     // For arrays indexed by constant indices, we flatten the alias
1358     // space to include all of the array body.  Only the header, klass
1359     // and array length can be accessed un-aliased.
1360     if( offset != Type::OffsetBot ) {
1361       if( ta->const_oop() ) { // MethodData* or Method*
1362         offset = Type::OffsetBot;   // Flatten constant access into array body
1363         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
1364       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1365         // range is OK as-is.
1366         tj = ta = TypeAryPtr::RANGE;
1367       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1368         tj = TypeInstPtr::KLASS; // all klass loads look alike
1369         ta = TypeAryPtr::RANGE; // generic ignored junk
1370         ptr = TypePtr::BotPTR;
1371       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1372         tj = TypeInstPtr::MARK;
1373         ta = TypeAryPtr::RANGE; // generic ignored junk
1374         ptr = TypePtr::BotPTR;
1375       } else {                  // Random constant offset into array body
1376         offset = Type::OffsetBot;   // Flatten constant access into array body
1377         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1378       }
1379     }
1380     // Arrays of fixed size alias with arrays of unknown size.
1381     if (ta->size() != TypeInt::POS) {
1382       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1383       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
1384     }
1385     // Arrays of known objects become arrays of unknown objects.
1386     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1387       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1388       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1389     }
1390     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1391       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1392       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1393     }
1394     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1395     // cannot be distinguished by bytecode alone.
1396     if (ta->elem() == TypeInt::BOOL) {
1397       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1398       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1399       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1400     }
1401     // During the 2nd round of IterGVN, NotNull castings are removed.
1402     // Make sure the Bottom and NotNull variants alias the same.
1403     // Also, make sure exact and non-exact variants alias the same.
1404     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1405       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
1406     }
1407   }
1408 
1409   // Oop pointers need some flattening
1410   const TypeInstPtr *to = tj->isa_instptr();
1411   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1412     ciInstanceKlass *k = to->klass()->as_instance_klass();
1413     if( ptr == TypePtr::Constant ) {
1414       if (to->klass() != ciEnv::current()->Class_klass() ||
1415           offset < k->size_helper() * wordSize) {
1416         // No constant oop pointers (such as Strings); they alias with
1417         // unknown strings.
1418         assert(!is_known_inst, "not scalarizable allocation");
1419         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1420       }
1421     } else if( is_known_inst ) {
1422       tj = to; // Keep NotNull and klass_is_exact for instance type
1423     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1424       // During the 2nd round of IterGVN, NotNull castings are removed.
1425       // Make sure the Bottom and NotNull variants alias the same.
1426       // Also, make sure exact and non-exact variants alias the same.
1427       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1428     }
1429     if (to->speculative() != NULL) {
1430       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
1431     }
1432     // Canonicalize the holder of this field
1433     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1434       // First handle header references such as a LoadKlassNode, even if the
1435       // object's klass is unloaded at compile time (4965979).
1436       if (!is_known_inst) { // Do it only for non-instance types
1437         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1438       }
1439     } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
1440       // Static fields are in the space above the normal instance
1441       // fields in the java.lang.Class instance.
1442       if (to->klass() != ciEnv::current()->Class_klass()) {
1443         to = NULL;
1444         tj = TypeOopPtr::BOTTOM;
1445         offset = tj->offset();
1446       }
1447     } else {
1448       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1449       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1450         if( is_known_inst ) {
1451           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1452         } else {
1453           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1454         }
1455       }
1456     }
1457   }
1458 
1459   // Klass pointers to object array klasses need some flattening
1460   const TypeKlassPtr *tk = tj->isa_klassptr();
1461   if( tk ) {
1462     // If we are referencing a field within a Klass, we need
1463     // to assume the worst case of an Object.  Both exact and
1464     // inexact types must flatten to the same alias class so
1465     // use NotNull as the PTR.
1466     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1467 
1468       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1469                                    TypeKlassPtr::OBJECT->klass(),
1470                                    offset);
1471     }
1472 
1473     ciKlass* klass = tk->klass();
1474     if( klass->is_obj_array_klass() ) {
1475       ciKlass* k = TypeAryPtr::OOPS->klass();
1476       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
1477         k = TypeInstPtr::BOTTOM->klass();
1478       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
1479     }
1480 
1481     // Check for precise loads from the primary supertype array and force them
1482     // to the supertype cache alias index.  Check for generic array loads from
1483     // the primary supertype array and also force them to the supertype cache
1484     // alias index.  Since the same load can reach both, we need to merge
1485     // these 2 disparate memories into the same alias class.  Since the
1486     // primary supertype array is read-only, there's no chance of confusion
1487     // where we bypass an array load and an array store.
1488     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1489     if (offset == Type::OffsetBot ||
1490         (offset >= primary_supers_offset &&
1491          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1492         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1493       offset = in_bytes(Klass::secondary_super_cache_offset());
1494       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
1495     }
1496   }
1497 
1498   // Flatten all Raw pointers together.
1499   if (tj->base() == Type::RawPtr)
1500     tj = TypeRawPtr::BOTTOM;
1501 
1502   if (tj->base() == Type::AnyPtr)
1503     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1504 
1505   // Flatten all to bottom for now
1506   switch( _AliasLevel ) {
1507   case 0:
1508     tj = TypePtr::BOTTOM;
1509     break;
1510   case 1:                       // Flatten to: oop, static, field or array
1511     switch (tj->base()) {
1512     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
1513     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
1514     case Type::AryPtr:   // do not distinguish arrays at all
1515     case Type::InstPtr:  tj = TypeInstPtr::BOTTOM;  break;
1516     case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break;
1517     case Type::AnyPtr:   tj = TypePtr::BOTTOM;      break;  // caller checks it
1518     default: ShouldNotReachHere();
1519     }
1520     break;
1521   case 2:                       // No collapsing at level 2; keep all splits
1522   case 3:                       // No collapsing at level 3; keep all splits
1523     break;
1524   default:
1525     Unimplemented();
1526   }
1527 
1528   offset = tj->offset();
1529   assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
1530 
1531   assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
1532           (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
1533           (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
1534           (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
1535           (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1536           (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1537           (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr)  ,
1538           "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
1539   assert( tj->ptr() != TypePtr::TopPTR &&
1540           tj->ptr() != TypePtr::AnyNull &&
1541           tj->ptr() != TypePtr::Null, "No imprecise addresses" );
1542 //    assert( tj->ptr() != TypePtr::Constant ||
1543 //            tj->base() == Type::RawPtr ||
1544 //            tj->base() == Type::KlassPtr, "No constant oop addresses" );
1545 
1546   return tj;
1547 }
1548 
1549 void Compile::AliasType::Init(int i, const TypePtr* at) {
1550   _index = i;
1551   _adr_type = at;
1552   _field = NULL;
1553   _element = NULL;
1554   _is_rewritable = true; // default
1555   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
1556   if (atoop != NULL && atoop->is_known_instance()) {
1557     const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
1558     _general_index = Compile::current()->get_alias_index(gt);
1559   } else {
1560     _general_index = 0;
1561   }
1562 }
1563 
1564 //---------------------------------print_on------------------------------------
1565 #ifndef PRODUCT
1566 void Compile::AliasType::print_on(outputStream* st) {
1567   if (index() < 10)
1568         st->print("@ <%d> ", index());
1569   else  st->print("@ <%d>",  index());
1570   st->print(is_rewritable() ? "   " : " RO");
1571   int offset = adr_type()->offset();
1572   if (offset == Type::OffsetBot)
1573         st->print(" +any");
1574   else  st->print(" +%-3d", offset);
1575   st->print(" in ");
1576   adr_type()->dump_on(st);
1577   const TypeOopPtr* tjp = adr_type()->isa_oopptr();
1578   if (field() != NULL && tjp) {
1579     if (tjp->klass()  != field()->holder() ||
1580         tjp->offset() != field()->offset_in_bytes()) {
1581       st->print(" != ");
1582       field()->print();
1583       st->print(" ***");
1584     }
1585   }
1586 }
1587 
1588 void print_alias_types() {
1589   Compile* C = Compile::current();
1590   tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
1591   for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
1592     C->alias_type(idx)->print_on(tty);
1593     tty->cr();
1594   }
1595 }
1596 #endif
1597 
1598 
1599 //----------------------------probe_alias_cache--------------------------------
1600 Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
1601   intptr_t key = (intptr_t) adr_type;
1602   key ^= key >> logAliasCacheSize;
1603   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1604 }
1605 
1606 
1607 //-----------------------------grow_alias_types--------------------------------
1608 void Compile::grow_alias_types() {
1609   const int old_ats  = _max_alias_types; // how many before?
1610   const int new_ats  = old_ats;          // how many more?
1611   const int grow_ats = old_ats+new_ats;  // how many now?
1612   _max_alias_types = grow_ats;
1613   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1614   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1615   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1616   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1617 }
1618 
1619 
1620 //--------------------------------find_alias_type------------------------------
1621 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1622   if (_AliasLevel == 0)
1623     return alias_type(AliasIdxBot);
1624 
1625   AliasCacheEntry* ace = probe_alias_cache(adr_type);
1626   if (ace->_adr_type == adr_type) {
1627     return alias_type(ace->_index);
1628   }
1629 
1630   // Handle special cases.
1631   if (adr_type == NULL)             return alias_type(AliasIdxTop);
1632   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1633 
1634   // Do it the slow way.
1635   const TypePtr* flat = flatten_alias_type(adr_type);
1636 
1637 #ifdef ASSERT
1638   assert(flat == flatten_alias_type(flat), "idempotent");
1639   assert(flat != TypePtr::BOTTOM,     "cannot alias-analyze an untyped ptr");
1640   if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1641     const TypeOopPtr* foop = flat->is_oopptr();
1642     // Scalarizable allocations have exact klass always.
1643     bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1644     const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
1645     assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type");
1646   }
1647   assert(flat == flatten_alias_type(flat), "exact bit doesn't matter");
1648 #endif
1649 
1650   int idx = AliasIdxTop;
1651   for (int i = 0; i < num_alias_types(); i++) {
1652     if (alias_type(i)->adr_type() == flat) {
1653       idx = i;
1654       break;
1655     }
1656   }
1657 
1658   if (idx == AliasIdxTop) {
1659     if (no_create)  return NULL;
1660     // Grow the array if necessary.
1661     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1662     // Add a new alias type.
1663     idx = _num_alias_types++;
1664     _alias_types[idx]->Init(idx, flat);
1665     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1666     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1667     if (flat->isa_instptr()) {
1668       if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
1669           && flat->is_instptr()->klass() == env()->Class_klass())
1670         alias_type(idx)->set_rewritable(false);
1671     }
1672     if (flat->isa_aryptr()) {
1673 #ifdef ASSERT
1674       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1675       // (T_BYTE has the weakest alignment and size restrictions...)
1676       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1677 #endif
1678       if (flat->offset() == TypePtr::OffsetBot) {
1679         alias_type(idx)->set_element(flat->is_aryptr()->elem());
1680       }
1681     }
1682     if (flat->isa_klassptr()) {
1683       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1684         alias_type(idx)->set_rewritable(false);
1685       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1686         alias_type(idx)->set_rewritable(false);
1687       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1688         alias_type(idx)->set_rewritable(false);
1689       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1690         alias_type(idx)->set_rewritable(false);
1691     }
1692     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1693     // but the base pointer type is not distinctive enough to identify
1694     // references into JavaThread.)
1695 
1696     // Check for final fields.
1697     const TypeInstPtr* tinst = flat->isa_instptr();
1698     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1699       ciField* field;
1700       if (tinst->const_oop() != NULL &&
1701           tinst->klass() == ciEnv::current()->Class_klass() &&
1702           tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
1703         // static field
1704         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1705         field = k->get_field_by_offset(tinst->offset(), true);
1706       } else {
1707         ciInstanceKlass *k = tinst->klass()->as_instance_klass();
1708         field = k->get_field_by_offset(tinst->offset(), false);
1709       }
1710       assert(field == NULL ||
1711              original_field == NULL ||
1712              (field->holder() == original_field->holder() &&
1713               field->offset() == original_field->offset() &&
1714               field->is_static() == original_field->is_static()), "wrong field?");
1715       // Set field() and is_rewritable() attributes.
1716       if (field != NULL)  alias_type(idx)->set_field(field);
1717     }
1718   }
1719 
1720   // Fill the cache for next time.
1721   ace->_adr_type = adr_type;
1722   ace->_index    = idx;
1723   assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
1724 
1725   // Might as well try to fill the cache for the flattened version, too.
1726   AliasCacheEntry* face = probe_alias_cache(flat);
1727   if (face->_adr_type == NULL) {
1728     face->_adr_type = flat;
1729     face->_index    = idx;
1730     assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1731   }
1732 
1733   return alias_type(idx);
1734 }
1735 
1736 
1737 Compile::AliasType* Compile::alias_type(ciField* field) {
1738   const TypeOopPtr* t;
1739   if (field->is_static())
1740     t = TypeInstPtr::make(field->holder()->java_mirror());
1741   else
1742     t = TypeOopPtr::make_from_klass_raw(field->holder());
1743   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1744   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1745   return atp;
1746 }
1747 
1748 
1749 //------------------------------have_alias_type--------------------------------
1750 bool Compile::have_alias_type(const TypePtr* adr_type) {
1751   AliasCacheEntry* ace = probe_alias_cache(adr_type);
1752   if (ace->_adr_type == adr_type) {
1753     return true;
1754   }
1755 
1756   // Handle special cases.
1757   if (adr_type == NULL)             return true;
1758   if (adr_type == TypePtr::BOTTOM)  return true;
1759 
1760   return find_alias_type(adr_type, true, NULL) != NULL;
1761 }
1762 
1763 //-----------------------------must_alias--------------------------------------
1764 // True if all values of the given address type are in the given alias category.
1765 bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
1766   if (alias_idx == AliasIdxBot)         return true;  // the universal category
1767   if (adr_type == NULL)                 return true;  // NULL serves as TypePtr::TOP
1768   if (alias_idx == AliasIdxTop)         return false; // the empty category
1769   if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins
1770 
1771   // the only remaining possible overlap is identity
1772   int adr_idx = get_alias_index(adr_type);
1773   assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1774   assert(adr_idx == alias_idx ||
1775          (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
1776           && adr_type                       != TypeOopPtr::BOTTOM),
1777          "should not be testing for overlap with an unsafe pointer");
1778   return adr_idx == alias_idx;
1779 }
1780 
1781 //------------------------------can_alias--------------------------------------
1782 // True if any values of the given address type are in the given alias category.
1783 bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
1784   if (alias_idx == AliasIdxTop)         return false; // the empty category
1785   if (adr_type == NULL)                 return false; // NULL serves as TypePtr::TOP
1786   if (alias_idx == AliasIdxBot)         return true;  // the universal category
1787   if (adr_type->base() == Type::AnyPtr) return true;  // TypePtr::BOTTOM or its twins
1788 
1789   // the only remaining possible overlap is identity
1790   int adr_idx = get_alias_index(adr_type);
1791   assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1792   return adr_idx == alias_idx;
1793 }
1794 
1795 
1796 
1797 //---------------------------pop_warm_call-------------------------------------
1798 WarmCallInfo* Compile::pop_warm_call() {
1799   WarmCallInfo* wci = _warm_calls;
1800   if (wci != NULL)  _warm_calls = wci->remove_from(wci);
1801   return wci;
1802 }
1803 
1804 //----------------------------Inline_Warm--------------------------------------
1805 int Compile::Inline_Warm() {
1806   // If there is room, try to inline some more warm call sites.
1807   // %%% Do a graph index compaction pass when we think we're out of space?
1808   if (!InlineWarmCalls)  return 0;
1809 
1810   int calls_made_hot = 0;
1811   int room_to_grow   = NodeCountInliningCutoff - unique();
1812   int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep);
1813   int amount_grown   = 0;
1814   WarmCallInfo* call;
1815   while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) {
1816     int est_size = (int)call->size();
1817     if (est_size > (room_to_grow - amount_grown)) {
1818       // This one won't fit anyway.  Get rid of it.
1819       call->make_cold();
1820       continue;
1821     }
1822     call->make_hot();
1823     calls_made_hot++;
1824     amount_grown   += est_size;
1825     amount_to_grow -= est_size;
1826   }
1827 
1828   if (calls_made_hot > 0)  set_major_progress();
1829   return calls_made_hot;
1830 }
1831 
1832 
1833 //----------------------------Finish_Warm--------------------------------------
1834 void Compile::Finish_Warm() {
1835   if (!InlineWarmCalls)  return;
1836   if (failing())  return;
1837   if (warm_calls() == NULL)  return;
1838 
1839   // Clean up loose ends, if we are out of space for inlining.
1840   WarmCallInfo* call;
1841   while ((call = pop_warm_call()) != NULL) {
1842     call->make_cold();
1843   }
1844 }
1845 
1846 //---------------------cleanup_loop_predicates-----------------------
1847 // Remove the opaque nodes that protect the predicates so that all unused
1848 // checks and uncommon_traps will be eliminated from the ideal graph
1849 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1850   if (predicate_count()==0) return;
1851   for (int i = predicate_count(); i > 0; i--) {
1852     Node * n = predicate_opaque1_node(i-1);
1853     assert(n->Opcode() == Op_Opaque1, "must be");
1854     igvn.replace_node(n, n->in(1));
1855   }
1856   assert(predicate_count()==0, "should be clean!");
1857 }
1858 
1859 // StringOpts and late inlining of string methods
1860 void Compile::inline_string_calls(bool parse_time) {
1861   {
1862     // remove useless nodes to make the usage analysis simpler
1863     ResourceMark rm;
1864     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1865   }
1866 
1867   {
1868     ResourceMark rm;
1869     print_method(PHASE_BEFORE_STRINGOPTS, 3);
1870     PhaseStringOpts pso(initial_gvn(), for_igvn());
1871     print_method(PHASE_AFTER_STRINGOPTS, 3);
1872   }
1873 
1874   // now inline anything that we skipped the first time around
1875   if (!parse_time) {
1876     _late_inlines_pos = _late_inlines.length();
1877   }
1878 
1879   while (_string_late_inlines.length() > 0) {
1880     CallGenerator* cg = _string_late_inlines.pop();
1881     cg->do_late_inline();
1882     if (failing())  return;
1883   }
1884   _string_late_inlines.trunc_to(0);
1885 }
1886 
1887 // Late inlining of boxing methods
1888 void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
1889   if (_boxing_late_inlines.length() > 0) {
1890     assert(has_boxed_value(), "inconsistent");
1891 
1892     PhaseGVN* gvn = initial_gvn();
1893     set_inlining_incrementally(true);
1894 
1895     assert( igvn._worklist.size() == 0, "should be done with igvn" );
1896     for_igvn()->clear();
1897     gvn->replace_with(&igvn);
1898 
1899     _late_inlines_pos = _late_inlines.length();
1900 
1901     while (_boxing_late_inlines.length() > 0) {
1902       CallGenerator* cg = _boxing_late_inlines.pop();
1903       cg->do_late_inline();
1904       if (failing())  return;
1905     }
1906     _boxing_late_inlines.trunc_to(0);
1907 
1908     {
1909       ResourceMark rm;
1910       PhaseRemoveUseless pru(gvn, for_igvn());
1911     }
1912 
1913     igvn = PhaseIterGVN(gvn);
1914     igvn.optimize();
1915 
1916     set_inlining_progress(false);
1917     set_inlining_incrementally(false);
1918   }
1919 }
1920 
1921 void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
1922   assert(IncrementalInline, "incremental inlining should be on");
1923   PhaseGVN* gvn = initial_gvn();
1924 
1925   set_inlining_progress(false);
1926   for_igvn()->clear();
1927   gvn->replace_with(&igvn);
1928 
1929   int i = 0;
1930 
1931   for (; i <_late_inlines.length() && !inlining_progress(); i++) {
1932     CallGenerator* cg = _late_inlines.at(i);
1933     _late_inlines_pos = i+1;
1934     cg->do_late_inline();
1935     if (failing())  return;
1936   }
1937   int j = 0;
1938   for (; i < _late_inlines.length(); i++, j++) {
1939     _late_inlines.at_put(j, _late_inlines.at(i));
1940   }
1941   _late_inlines.trunc_to(j);
1942 
1943   {
1944     ResourceMark rm;
1945     PhaseRemoveUseless pru(gvn, for_igvn());
1946   }
1947 
1948   igvn = PhaseIterGVN(gvn);
1949 }
1950 
1951 // Perform incremental inlining until bound on number of live nodes is reached
1952 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
1953   PhaseGVN* gvn = initial_gvn();
1954 
1955   set_inlining_incrementally(true);
1956   set_inlining_progress(true);
1957   uint low_live_nodes = 0;
1958 
1959   while(inlining_progress() && _late_inlines.length() > 0) {
1960 
1961     if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1962       if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
1963         // PhaseIdealLoop is expensive so we only try it once we are
1964         // out of live nodes and we only try it again if the previous
1965         // helped got the number of nodes down significantly
1966         PhaseIdealLoop ideal_loop( igvn, false, true );
1967         if (failing())  return;
1968         low_live_nodes = live_nodes();
1969         _major_progress = true;
1970       }
1971 
1972       if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1973         break;
1974       }
1975     }
1976 
1977     inline_incrementally_one(igvn);
1978 
1979     if (failing())  return;
1980 
1981     igvn.optimize();
1982 
1983     if (failing())  return;
1984   }
1985 
1986   assert( igvn._worklist.size() == 0, "should be done with igvn" );
1987 
1988   if (_string_late_inlines.length() > 0) {
1989     assert(has_stringbuilder(), "inconsistent");
1990     for_igvn()->clear();
1991     initial_gvn()->replace_with(&igvn);
1992 
1993     inline_string_calls(false);
1994 
1995     if (failing())  return;
1996 
1997     {
1998       ResourceMark rm;
1999       PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2000     }
2001 
2002     igvn = PhaseIterGVN(gvn);
2003 
2004     igvn.optimize();
2005   }
2006 
2007   set_inlining_incrementally(false);
2008 }
2009 
2010 
2011 //------------------------------Optimize---------------------------------------
2012 // Given a graph, optimize it.
2013 void Compile::Optimize() {
2014   TracePhase t1("optimizer", &_t_optimizer, true);
2015 
2016 #ifndef PRODUCT
2017   if (env()->break_at_compile()) {
2018     BREAKPOINT;
2019   }
2020 
2021 #endif
2022 
2023   ResourceMark rm;
2024   int          loop_opts_cnt;
2025 
2026   print_inlining_reinit();
2027 
2028   NOT_PRODUCT( verify_graph_edges(); )
2029 
2030   print_method(PHASE_AFTER_PARSING);
2031 
2032  {
2033   // Iterative Global Value Numbering, including ideal transforms
2034   // Initialize IterGVN with types and values from parse-time GVN
2035   PhaseIterGVN igvn(initial_gvn());
2036   {
2037     NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
2038     igvn.optimize();
2039   }
2040 
2041   print_method(PHASE_ITER_GVN1, 2);
2042 
2043   if (failing())  return;
2044 
2045   {
2046     NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
2047     inline_incrementally(igvn);
2048   }
2049 
2050   print_method(PHASE_INCREMENTAL_INLINE, 2);
2051 
2052   if (failing())  return;
2053 
2054   if (eliminate_boxing()) {
2055     NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
2056     // Inline valueOf() methods now.
2057     inline_boxing_calls(igvn);
2058 
2059     if (AlwaysIncrementalInline) {
2060       inline_incrementally(igvn);
2061     }
2062 
2063     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
2064 
2065     if (failing())  return;
2066   }
2067 
2068   // Remove the speculative part of types and clean up the graph from
2069   // the extra CastPP nodes whose only purpose is to carry them. Do
2070   // that early so that optimizations are not disrupted by the extra
2071   // CastPP nodes.
2072   remove_speculative_types(igvn);
2073 
2074   // No more new expensive nodes will be added to the list from here
2075   // so keep only the actual candidates for optimizations.
2076   cleanup_expensive_nodes(igvn);
2077 
2078   // Perform escape analysis
2079   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2080     if (has_loops()) {
2081       // Cleanup graph (remove dead nodes).
2082       TracePhase t2("idealLoop", &_t_idealLoop, true);
2083       PhaseIdealLoop ideal_loop( igvn, false, true );
2084       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2085       if (failing())  return;
2086     }
2087     ConnectionGraph::do_analysis(this, &igvn);
2088 
2089     if (failing())  return;
2090 
2091     // Optimize out fields loads from scalar replaceable allocations.
2092     igvn.optimize();
2093     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2094 
2095     if (failing())  return;
2096 
2097     if (congraph() != NULL && macro_count() > 0) {
2098       NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); )
2099       PhaseMacroExpand mexp(igvn);
2100       mexp.eliminate_macro_nodes();
2101       igvn.set_delay_transform(false);
2102 
2103       igvn.optimize();
2104       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2105 
2106       if (failing())  return;
2107     }
2108   }
2109 
2110   // Loop transforms on the ideal graph.  Range Check Elimination,
2111   // peeling, unrolling, etc.
2112 
2113   // Set loop opts counter
2114   loop_opts_cnt = num_loop_opts();
2115   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2116     {
2117       TracePhase t2("idealLoop", &_t_idealLoop, true);
2118       PhaseIdealLoop ideal_loop( igvn, true );
2119       loop_opts_cnt--;
2120       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2121       if (failing())  return;
2122     }
2123     // Loop opts pass if partial peeling occurred in previous pass
2124     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
2125       TracePhase t3("idealLoop", &_t_idealLoop, true);
2126       PhaseIdealLoop ideal_loop( igvn, false );
2127       loop_opts_cnt--;
2128       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2129       if (failing())  return;
2130     }
2131     // Loop opts pass for loop-unrolling before CCP
2132     if(major_progress() && (loop_opts_cnt > 0)) {
2133       TracePhase t4("idealLoop", &_t_idealLoop, true);
2134       PhaseIdealLoop ideal_loop( igvn, false );
2135       loop_opts_cnt--;
2136       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
2137     }
2138     if (!failing()) {
2139       // Verify that last round of loop opts produced a valid graph
2140       NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
2141       PhaseIdealLoop::verify(igvn);
2142     }
2143   }
2144   if (failing())  return;
2145 
2146   // Conditional Constant Propagation;
2147   PhaseCCP ccp( &igvn );
2148   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2149   {
2150     TracePhase t2("ccp", &_t_ccp, true);
2151     ccp.do_transform();
2152   }
2153   print_method(PHASE_CPP1, 2);
2154 
2155   assert( true, "Break here to ccp.dump_old2new_map()");
2156 
2157   // Iterative Global Value Numbering, including ideal transforms
2158   {
2159     NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); )
2160     igvn = ccp;
2161     igvn.optimize();
2162   }
2163 
2164   print_method(PHASE_ITER_GVN2, 2);
2165 
2166   if (failing())  return;
2167 
2168   // Loop transforms on the ideal graph.  Range Check Elimination,
2169   // peeling, unrolling, etc.
2170   if(loop_opts_cnt > 0) {
2171     debug_only( int cnt = 0; );
2172     while(major_progress() && (loop_opts_cnt > 0)) {
2173       TracePhase t2("idealLoop", &_t_idealLoop, true);
2174       assert( cnt++ < 40, "infinite cycle in loop optimization" );
2175       PhaseIdealLoop ideal_loop( igvn, true);
2176       loop_opts_cnt--;
2177       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2178       if (failing())  return;
2179     }
2180   }
2181 
2182   {
2183     // Verify that all previous optimizations produced a valid graph
2184     // at least to this point, even if no loop optimizations were done.
2185     NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
2186     PhaseIdealLoop::verify(igvn);
2187   }
2188 
2189   {
2190     NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
2191     PhaseMacroExpand  mex(igvn);
2192     if (mex.expand_macro_nodes()) {
2193       assert(failing(), "must bail out w/ explicit message");
2194       return;
2195     }
2196   }
2197 
2198  } // (End scope of igvn; run destructor if necessary for asserts.)
2199 
2200   process_print_inlining();
2201   // A method with only infinite loops has no edges entering loops from root
2202   {
2203     NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
2204     if (final_graph_reshaping()) {
2205       assert(failing(), "must bail out w/ explicit message");
2206       return;
2207     }
2208   }
2209 
2210   print_method(PHASE_OPTIMIZE_FINISHED, 2);
2211 }
2212 
2213 
2214 //------------------------------Code_Gen---------------------------------------
2215 // Given a graph, generate code for it
2216 void Compile::Code_Gen() {
2217   if (failing()) {
2218     return;
2219   }
2220 
2221   // Perform instruction selection.  You might think we could reclaim Matcher
2222   // memory PDQ, but actually the Matcher is used in generating spill code.
2223   // Internals of the Matcher (including some VectorSets) must remain live
2224   // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
2225   // set a bit in reclaimed memory.
2226 
2227   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2228   // nodes.  Mapping is only valid at the root of each matched subtree.
2229   NOT_PRODUCT( verify_graph_edges(); )
2230 
2231   Matcher matcher;
2232   _matcher = &matcher;
2233   {
2234     TracePhase t2("matcher", &_t_matcher, true);
2235     matcher.match();
2236   }
2237   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2238   // nodes.  Mapping is only valid at the root of each matched subtree.
2239   NOT_PRODUCT( verify_graph_edges(); )
2240 
2241   // If you have too many nodes, or if matching has failed, bail out
2242   check_node_count(0, "out of nodes matching instructions");
2243   if (failing()) {
2244     return;
2245   }
2246 
2247   // Build a proper-looking CFG
2248   PhaseCFG cfg(node_arena(), root(), matcher);
2249   _cfg = &cfg;
2250   {
2251     NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
2252     bool success = cfg.do_global_code_motion();
2253     if (!success) {
2254       return;
2255     }
2256 
2257     print_method(PHASE_GLOBAL_CODE_MOTION, 2);
2258     NOT_PRODUCT( verify_graph_edges(); )
2259     debug_only( cfg.verify(); )
2260   }
2261 
2262   PhaseChaitin regalloc(unique(), cfg, matcher);
2263   _regalloc = &regalloc;
2264   {
2265     TracePhase t2("regalloc", &_t_registerAllocation, true);
2266     // Perform register allocation.  After Chaitin, use-def chains are
2267     // no longer accurate (at spill code) and so must be ignored.
2268     // Node->LRG->reg mappings are still accurate.
2269     _regalloc->Register_Allocate();
2270 
2271     // Bail out if the allocator builds too many nodes
2272     if (failing()) {
2273       return;
2274     }
2275   }
2276 
2277   // Prior to register allocation we kept empty basic blocks in case the
2278   // the allocator needed a place to spill.  After register allocation we
2279   // are not adding any new instructions.  If any basic block is empty, we
2280   // can now safely remove it.
2281   {
2282     NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
2283     cfg.remove_empty_blocks();
2284     if (do_freq_based_layout()) {
2285       PhaseBlockLayout layout(cfg);
2286     } else {
2287       cfg.set_loop_alignment();
2288     }
2289     cfg.fixup_flow();
2290   }
2291 
2292   // Apply peephole optimizations
2293   if( OptoPeephole ) {
2294     NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
2295     PhasePeephole peep( _regalloc, cfg);
2296     peep.do_transform();
2297   }
2298 
2299   // Do late expand if CPU requires this.
2300   if (Matcher::require_postalloc_expand) {
2301     NOT_PRODUCT(TracePhase t2c("postalloc_expand", &_t_postalloc_expand, true));
2302     cfg.postalloc_expand(_regalloc);
2303   }
2304 
2305   // Convert Nodes to instruction bits in a buffer
2306   {
2307     // %%%% workspace merge brought two timers together for one job
2308     TracePhase t2a("output", &_t_output, true);
2309     NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); )
2310     Output();
2311   }
2312 
2313   print_method(PHASE_FINAL_CODE);
2314 
2315   // He's dead, Jim.
2316   _cfg     = (PhaseCFG*)0xdeadbeef;
2317   _regalloc = (PhaseChaitin*)0xdeadbeef;
2318 }
2319 
2320 
2321 //------------------------------dump_asm---------------------------------------
2322 // Dump formatted assembly
2323 #ifndef PRODUCT
2324 void Compile::dump_asm(int *pcs, uint pc_limit) {
2325   bool cut_short = false;
2326   tty->print_cr("#");
2327   tty->print("#  ");  _tf->dump();  tty->cr();
2328   tty->print_cr("#");
2329 
2330   // For all blocks
2331   int pc = 0x0;                 // Program counter
2332   char starts_bundle = ' ';
2333   _regalloc->dump_frame();
2334 
2335   Node *n = NULL;
2336   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
2337     if (VMThread::should_terminate()) {
2338       cut_short = true;
2339       break;
2340     }
2341     Block* block = _cfg->get_block(i);
2342     if (block->is_connector() && !Verbose) {
2343       continue;
2344     }
2345     n = block->head();
2346     if (pcs && n->_idx < pc_limit) {
2347       tty->print("%3.3x   ", pcs[n->_idx]);
2348     } else {
2349       tty->print("      ");
2350     }
2351     block->dump_head(_cfg);
2352     if (block->is_connector()) {
2353       tty->print_cr("        # Empty connector block");
2354     } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
2355       tty->print_cr("        # Block is sole successor of call");
2356     }
2357 
2358     // For all instructions
2359     Node *delay = NULL;
2360     for (uint j = 0; j < block->number_of_nodes(); j++) {
2361       if (VMThread::should_terminate()) {
2362         cut_short = true;
2363         break;
2364       }
2365       n = block->get_node(j);
2366       if (valid_bundle_info(n)) {
2367         Bundle* bundle = node_bundling(n);
2368         if (bundle->used_in_unconditional_delay()) {
2369           delay = n;
2370           continue;
2371         }
2372         if (bundle->starts_bundle()) {
2373           starts_bundle = '+';
2374         }
2375       }
2376 
2377       if (WizardMode) {
2378         n->dump();
2379       }
2380 
2381       if( !n->is_Region() &&    // Dont print in the Assembly
2382           !n->is_Phi() &&       // a few noisely useless nodes
2383           !n->is_Proj() &&
2384           !n->is_MachTemp() &&
2385           !n->is_SafePointScalarObject() &&
2386           !n->is_Catch() &&     // Would be nice to print exception table targets
2387           !n->is_MergeMem() &&  // Not very interesting
2388           !n->is_top() &&       // Debug info table constants
2389           !(n->is_Con() && !n->is_Mach())// Debug info table constants
2390           ) {
2391         if (pcs && n->_idx < pc_limit)
2392           tty->print("%3.3x", pcs[n->_idx]);
2393         else
2394           tty->print("   ");
2395         tty->print(" %c ", starts_bundle);
2396         starts_bundle = ' ';
2397         tty->print("\t");
2398         n->format(_regalloc, tty);
2399         tty->cr();
2400       }
2401 
2402       // If we have an instruction with a delay slot, and have seen a delay,
2403       // then back up and print it
2404       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
2405         assert(delay != NULL, "no unconditional delay instruction");
2406         if (WizardMode) delay->dump();
2407 
2408         if (node_bundling(delay)->starts_bundle())
2409           starts_bundle = '+';
2410         if (pcs && n->_idx < pc_limit)
2411           tty->print("%3.3x", pcs[n->_idx]);
2412         else
2413           tty->print("   ");
2414         tty->print(" %c ", starts_bundle);
2415         starts_bundle = ' ';
2416         tty->print("\t");
2417         delay->format(_regalloc, tty);
2418         tty->cr();
2419         delay = NULL;
2420       }
2421 
2422       // Dump the exception table as well
2423       if( n->is_Catch() && (Verbose || WizardMode) ) {
2424         // Print the exception table for this offset
2425         _handler_table.print_subtable_for(pc);
2426       }
2427     }
2428 
2429     if (pcs && n->_idx < pc_limit)
2430       tty->print_cr("%3.3x", pcs[n->_idx]);
2431     else
2432       tty->cr();
2433 
2434     assert(cut_short || delay == NULL, "no unconditional delay branch");
2435 
2436   } // End of per-block dump
2437   tty->cr();
2438 
2439   if (cut_short)  tty->print_cr("*** disassembly is cut short ***");
2440 }
2441 #endif
2442 
2443 //------------------------------Final_Reshape_Counts---------------------------
2444 // This class defines counters to help identify when a method
2445 // may/must be executed using hardware with only 24-bit precision.
2446 struct Final_Reshape_Counts : public StackObj {
2447   int  _call_count;             // count non-inlined 'common' calls
2448   int  _float_count;            // count float ops requiring 24-bit precision
2449   int  _double_count;           // count double ops requiring more precision
2450   int  _java_call_count;        // count non-inlined 'java' calls
2451   int  _inner_loop_count;       // count loops which need alignment
2452   VectorSet _visited;           // Visitation flags
2453   Node_List _tests;             // Set of IfNodes & PCTableNodes
2454 
2455   Final_Reshape_Counts() :
2456     _call_count(0), _float_count(0), _double_count(0),
2457     _java_call_count(0), _inner_loop_count(0),
2458     _visited( Thread::current()->resource_area() ) { }
2459 
2460   void inc_call_count  () { _call_count  ++; }
2461   void inc_float_count () { _float_count ++; }
2462   void inc_double_count() { _double_count++; }
2463   void inc_java_call_count() { _java_call_count++; }
2464   void inc_inner_loop_count() { _inner_loop_count++; }
2465 
2466   int  get_call_count  () const { return _call_count  ; }
2467   int  get_float_count () const { return _float_count ; }
2468   int  get_double_count() const { return _double_count; }
2469   int  get_java_call_count() const { return _java_call_count; }
2470   int  get_inner_loop_count() const { return _inner_loop_count; }
2471 };
2472 
2473 #ifdef ASSERT
2474 static bool oop_offset_is_sane(const TypeInstPtr* tp) {
2475   ciInstanceKlass *k = tp->klass()->as_instance_klass();
2476   // Make sure the offset goes inside the instance layout.
2477   return k->contains_field_offset(tp->offset());
2478   // Note that OffsetBot and OffsetTop are very negative.
2479 }
2480 #endif
2481 
2482 // Eliminate trivially redundant StoreCMs and accumulate their
2483 // precedence edges.
2484 void Compile::eliminate_redundant_card_marks(Node* n) {
2485   assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
2486   if (n->in(MemNode::Address)->outcnt() > 1) {
2487     // There are multiple users of the same address so it might be
2488     // possible to eliminate some of the StoreCMs
2489     Node* mem = n->in(MemNode::Memory);
2490     Node* adr = n->in(MemNode::Address);
2491     Node* val = n->in(MemNode::ValueIn);
2492     Node* prev = n;
2493     bool done = false;
2494     // Walk the chain of StoreCMs eliminating ones that match.  As
2495     // long as it's a chain of single users then the optimization is
2496     // safe.  Eliminating partially redundant StoreCMs would require
2497     // cloning copies down the other paths.
2498     while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
2499       if (adr == mem->in(MemNode::Address) &&
2500           val == mem->in(MemNode::ValueIn)) {
2501         // redundant StoreCM
2502         if (mem->req() > MemNode::OopStore) {
2503           // Hasn't been processed by this code yet.
2504           n->add_prec(mem->in(MemNode::OopStore));
2505         } else {
2506           // Already converted to precedence edge
2507           for (uint i = mem->req(); i < mem->len(); i++) {
2508             // Accumulate any precedence edges
2509             if (mem->in(i) != NULL) {
2510               n->add_prec(mem->in(i));
2511             }
2512           }
2513           // Everything above this point has been processed.
2514           done = true;
2515         }
2516         // Eliminate the previous StoreCM
2517         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
2518         assert(mem->outcnt() == 0, "should be dead");
2519         mem->disconnect_inputs(NULL, this);
2520       } else {
2521         prev = mem;
2522       }
2523       mem = prev->in(MemNode::Memory);
2524     }
2525   }
2526 }
2527 
2528 //------------------------------final_graph_reshaping_impl----------------------
2529 // Implement items 1-5 from final_graph_reshaping below.
2530 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
2531 
2532   if ( n->outcnt() == 0 ) return; // dead node
2533   uint nop = n->Opcode();
2534 
2535   // Check for 2-input instruction with "last use" on right input.
2536   // Swap to left input.  Implements item (2).
2537   if( n->req() == 3 &&          // two-input instruction
2538       n->in(1)->outcnt() > 1 && // left use is NOT a last use
2539       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
2540       n->in(2)->outcnt() == 1 &&// right use IS a last use
2541       !n->in(2)->is_Con() ) {   // right use is not a constant
2542     // Check for commutative opcode
2543     switch( nop ) {
2544     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
2545     case Op_MaxI:  case Op_MinI:
2546     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:
2547     case Op_AndL:  case Op_XorL:  case Op_OrL:
2548     case Op_AndI:  case Op_XorI:  case Op_OrI: {
2549       // Move "last use" input to left by swapping inputs
2550       n->swap_edges(1, 2);
2551       break;
2552     }
2553     default:
2554       break;
2555     }
2556   }
2557 
2558 #ifdef ASSERT
2559   if( n->is_Mem() ) {
2560     int alias_idx = get_alias_index(n->as_Mem()->adr_type());
2561     assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
2562             // oop will be recorded in oop map if load crosses safepoint
2563             n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
2564                              LoadNode::is_immutable_value(n->in(MemNode::Address))),
2565             "raw memory operations should have control edge");
2566   }
2567 #endif
2568   // Count FPU ops and common calls, implements item (3)
2569   switch( nop ) {
2570   // Count all float operations that may use FPU
2571   case Op_AddF:
2572   case Op_SubF:
2573   case Op_MulF:
2574   case Op_DivF:
2575   case Op_NegF:
2576   case Op_ModF:
2577   case Op_ConvI2F:
2578   case Op_ConF:
2579   case Op_CmpF:
2580   case Op_CmpF3:
2581   // case Op_ConvL2F: // longs are split into 32-bit halves
2582     frc.inc_float_count();
2583     break;
2584 
2585   case Op_ConvF2D:
2586   case Op_ConvD2F:
2587     frc.inc_float_count();
2588     frc.inc_double_count();
2589     break;
2590 
2591   // Count all double operations that may use FPU
2592   case Op_AddD:
2593   case Op_SubD:
2594   case Op_MulD:
2595   case Op_DivD:
2596   case Op_NegD:
2597   case Op_ModD:
2598   case Op_ConvI2D:
2599   case Op_ConvD2I:
2600   // case Op_ConvL2D: // handled by leaf call
2601   // case Op_ConvD2L: // handled by leaf call
2602   case Op_ConD:
2603   case Op_CmpD:
2604   case Op_CmpD3:
2605     frc.inc_double_count();
2606     break;
2607   case Op_Opaque1:              // Remove Opaque Nodes before matching
2608   case Op_Opaque2:              // Remove Opaque Nodes before matching
2609   case Op_Opaque3:
2610     n->subsume_by(n->in(1), this);
2611     break;
2612   case Op_CallStaticJava:
2613   case Op_CallJava:
2614   case Op_CallDynamicJava:
2615     frc.inc_java_call_count(); // Count java call site;
2616   case Op_CallRuntime:
2617   case Op_CallLeaf:
2618   case Op_CallLeafNoFP: {
2619     assert( n->is_Call(), "" );
2620     CallNode *call = n->as_Call();
2621     // Count call sites where the FP mode bit would have to be flipped.
2622     // Do not count uncommon runtime calls:
2623     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2624     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2625     if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
2626       frc.inc_call_count();   // Count the call site
2627     } else {                  // See if uncommon argument is shared
2628       Node *n = call->in(TypeFunc::Parms);
2629       int nop = n->Opcode();
2630       // Clone shared simple arguments to uncommon calls, item (1).
2631       if( n->outcnt() > 1 &&
2632           !n->is_Proj() &&
2633           nop != Op_CreateEx &&
2634           nop != Op_CheckCastPP &&
2635           nop != Op_DecodeN &&
2636           nop != Op_DecodeNKlass &&
2637           !n->is_Mem() ) {
2638         Node *x = n->clone();
2639         call->set_req( TypeFunc::Parms, x );
2640       }
2641     }
2642     break;
2643   }
2644 
2645   case Op_StoreD:
2646   case Op_LoadD:
2647   case Op_LoadD_unaligned:
2648     frc.inc_double_count();
2649     goto handle_mem;
2650   case Op_StoreF:
2651   case Op_LoadF:
2652     frc.inc_float_count();
2653     goto handle_mem;
2654 
2655   case Op_StoreCM:
2656     {
2657       // Convert OopStore dependence into precedence edge
2658       Node* prec = n->in(MemNode::OopStore);
2659       n->del_req(MemNode::OopStore);
2660       n->add_prec(prec);
2661       eliminate_redundant_card_marks(n);
2662     }
2663 
2664     // fall through
2665 
2666   case Op_StoreB:
2667   case Op_StoreC:
2668   case Op_StorePConditional:
2669   case Op_StoreI:
2670   case Op_StoreL:
2671   case Op_StoreIConditional:
2672   case Op_StoreLConditional:
2673   case Op_CompareAndSwapI:
2674   case Op_CompareAndSwapL:
2675   case Op_CompareAndSwapP:
2676   case Op_CompareAndSwapN:
2677   case Op_GetAndAddI:
2678   case Op_GetAndAddL:
2679   case Op_GetAndSetI:
2680   case Op_GetAndSetL:
2681   case Op_GetAndSetP:
2682   case Op_GetAndSetN:
2683   case Op_StoreP:
2684   case Op_StoreN:
2685   case Op_StoreNKlass:
2686   case Op_LoadB:
2687   case Op_LoadUB:
2688   case Op_LoadUS:
2689   case Op_LoadI:
2690   case Op_LoadKlass:
2691   case Op_LoadNKlass:
2692   case Op_LoadL:
2693   case Op_LoadL_unaligned:
2694   case Op_LoadPLocked:
2695   case Op_LoadP:
2696   case Op_LoadN:
2697   case Op_LoadRange:
2698   case Op_LoadS: {
2699   handle_mem:
2700 #ifdef ASSERT
2701     if( VerifyOptoOopOffsets ) {
2702       assert( n->is_Mem(), "" );
2703       MemNode *mem  = (MemNode*)n;
2704       // Check to see if address types have grounded out somehow.
2705       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
2706       assert( !tp || oop_offset_is_sane(tp), "" );
2707     }
2708 #endif
2709     break;
2710   }
2711 
2712   case Op_AddP: {               // Assert sane base pointers
2713     Node *addp = n->in(AddPNode::Address);
2714     assert( !addp->is_AddP() ||
2715             addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
2716             addp->in(AddPNode::Base) == n->in(AddPNode::Base),
2717             "Base pointers must match" );
2718 #ifdef _LP64
2719     if ((UseCompressedOops || UseCompressedClassPointers) &&
2720         addp->Opcode() == Op_ConP &&
2721         addp == n->in(AddPNode::Base) &&
2722         n->in(AddPNode::Offset)->is_Con()) {
2723       // Use addressing with narrow klass to load with offset on x86.
2724       // On sparc loading 32-bits constant and decoding it have less
2725       // instructions (4) then load 64-bits constant (7).
2726       // Do this transformation here since IGVN will convert ConN back to ConP.
2727       const Type* t = addp->bottom_type();
2728       if (t->isa_oopptr() || t->isa_klassptr()) {
2729         Node* nn = NULL;
2730 
2731         int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
2732 
2733         // Look for existing ConN node of the same exact type.
2734         Node* r  = root();
2735         uint cnt = r->outcnt();
2736         for (uint i = 0; i < cnt; i++) {
2737           Node* m = r->raw_out(i);
2738           if (m!= NULL && m->Opcode() == op &&
2739               m->bottom_type()->make_ptr() == t) {
2740             nn = m;
2741             break;
2742           }
2743         }
2744         if (nn != NULL) {
2745           // Decode a narrow oop to match address
2746           // [R12 + narrow_oop_reg<<3 + offset]
2747           if (t->isa_oopptr()) {
2748             nn = new DecodeNNode(nn, t);
2749           } else {
2750             nn = new DecodeNKlassNode(nn, t);
2751           }
2752           n->set_req(AddPNode::Base, nn);
2753           n->set_req(AddPNode::Address, nn);
2754           if (addp->outcnt() == 0) {
2755             addp->disconnect_inputs(NULL, this);
2756           }
2757         }
2758       }
2759     }
2760 #endif
2761     break;
2762   }
2763 
2764 #ifdef _LP64
2765   case Op_CastPP:
2766     if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
2767       Node* in1 = n->in(1);
2768       const Type* t = n->bottom_type();
2769       Node* new_in1 = in1->clone();
2770       new_in1->as_DecodeN()->set_type(t);
2771 
2772       if (!Matcher::narrow_oop_use_complex_address()) {
2773         //
2774         // x86, ARM and friends can handle 2 adds in addressing mode
2775         // and Matcher can fold a DecodeN node into address by using
2776         // a narrow oop directly and do implicit NULL check in address:
2777         //
2778         // [R12 + narrow_oop_reg<<3 + offset]
2779         // NullCheck narrow_oop_reg
2780         //
2781         // On other platforms (Sparc) we have to keep new DecodeN node and
2782         // use it to do implicit NULL check in address:
2783         //
2784         // decode_not_null narrow_oop_reg, base_reg
2785         // [base_reg + offset]
2786         // NullCheck base_reg
2787         //
2788         // Pin the new DecodeN node to non-null path on these platform (Sparc)
2789         // to keep the information to which NULL check the new DecodeN node
2790         // corresponds to use it as value in implicit_null_check().
2791         //
2792         new_in1->set_req(0, n->in(0));
2793       }
2794 
2795       n->subsume_by(new_in1, this);
2796       if (in1->outcnt() == 0) {
2797         in1->disconnect_inputs(NULL, this);
2798       }
2799     }
2800     break;
2801 
2802   case Op_CmpP:
2803     // Do this transformation here to preserve CmpPNode::sub() and
2804     // other TypePtr related Ideal optimizations (for example, ptr nullness).
2805     if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
2806       Node* in1 = n->in(1);
2807       Node* in2 = n->in(2);
2808       if (!in1->is_DecodeNarrowPtr()) {
2809         in2 = in1;
2810         in1 = n->in(2);
2811       }
2812       assert(in1->is_DecodeNarrowPtr(), "sanity");
2813 
2814       Node* new_in2 = NULL;
2815       if (in2->is_DecodeNarrowPtr()) {
2816         assert(in2->Opcode() == in1->Opcode(), "must be same node type");
2817         new_in2 = in2->in(1);
2818       } else if (in2->Opcode() == Op_ConP) {
2819         const Type* t = in2->bottom_type();
2820         if (t == TypePtr::NULL_PTR) {
2821           assert(in1->is_DecodeN(), "compare klass to null?");
2822           // Don't convert CmpP null check into CmpN if compressed
2823           // oops implicit null check is not generated.
2824           // This will allow to generate normal oop implicit null check.
2825           if (Matcher::gen_narrow_oop_implicit_null_checks())
2826             new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR);
2827           //
2828           // This transformation together with CastPP transformation above
2829           // will generated code for implicit NULL checks for compressed oops.
2830           //
2831           // The original code after Optimize()
2832           //
2833           //    LoadN memory, narrow_oop_reg
2834           //    decode narrow_oop_reg, base_reg
2835           //    CmpP base_reg, NULL
2836           //    CastPP base_reg // NotNull
2837           //    Load [base_reg + offset], val_reg
2838           //
2839           // after these transformations will be
2840           //
2841           //    LoadN memory, narrow_oop_reg
2842           //    CmpN narrow_oop_reg, NULL
2843           //    decode_not_null narrow_oop_reg, base_reg
2844           //    Load [base_reg + offset], val_reg
2845           //
2846           // and the uncommon path (== NULL) will use narrow_oop_reg directly
2847           // since narrow oops can be used in debug info now (see the code in
2848           // final_graph_reshaping_walk()).
2849           //
2850           // At the end the code will be matched to
2851           // on x86:
2852           //
2853           //    Load_narrow_oop memory, narrow_oop_reg
2854           //    Load [R12 + narrow_oop_reg<<3 + offset], val_reg
2855           //    NullCheck narrow_oop_reg
2856           //
2857           // and on sparc:
2858           //
2859           //    Load_narrow_oop memory, narrow_oop_reg
2860           //    decode_not_null narrow_oop_reg, base_reg
2861           //    Load [base_reg + offset], val_reg
2862           //    NullCheck base_reg
2863           //
2864         } else if (t->isa_oopptr()) {
2865           new_in2 = ConNode::make(this, t->make_narrowoop());
2866         } else if (t->isa_klassptr()) {
2867           new_in2 = ConNode::make(this, t->make_narrowklass());
2868         }
2869       }
2870       if (new_in2 != NULL) {
2871         Node* cmpN = new CmpNNode(in1->in(1), new_in2);
2872         n->subsume_by(cmpN, this);
2873         if (in1->outcnt() == 0) {
2874           in1->disconnect_inputs(NULL, this);
2875         }
2876         if (in2->outcnt() == 0) {
2877           in2->disconnect_inputs(NULL, this);
2878         }
2879       }
2880     }
2881     break;
2882 
2883   case Op_DecodeN:
2884   case Op_DecodeNKlass:
2885     assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
2886     // DecodeN could be pinned when it can't be fold into
2887     // an address expression, see the code for Op_CastPP above.
2888     assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
2889     break;
2890 
2891   case Op_EncodeP:
2892   case Op_EncodePKlass: {
2893     Node* in1 = n->in(1);
2894     if (in1->is_DecodeNarrowPtr()) {
2895       n->subsume_by(in1->in(1), this);
2896     } else if (in1->Opcode() == Op_ConP) {
2897       const Type* t = in1->bottom_type();
2898       if (t == TypePtr::NULL_PTR) {
2899         assert(t->isa_oopptr(), "null klass?");
2900         n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this);
2901       } else if (t->isa_oopptr()) {
2902         n->subsume_by(ConNode::make(this, t->make_narrowoop()), this);
2903       } else if (t->isa_klassptr()) {
2904         n->subsume_by(ConNode::make(this, t->make_narrowklass()), this);
2905       }
2906     }
2907     if (in1->outcnt() == 0) {
2908       in1->disconnect_inputs(NULL, this);
2909     }
2910     break;
2911   }
2912 
2913   case Op_Proj: {
2914     if (OptimizeStringConcat) {
2915       ProjNode* p = n->as_Proj();
2916       if (p->_is_io_use) {
2917         // Separate projections were used for the exception path which
2918         // are normally removed by a late inline.  If it wasn't inlined
2919         // then they will hang around and should just be replaced with
2920         // the original one.
2921         Node* proj = NULL;
2922         // Replace with just one
2923         for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
2924           Node *use = i.get();
2925           if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
2926             proj = use;
2927             break;
2928           }
2929         }
2930         assert(proj != NULL, "must be found");
2931         p->subsume_by(proj, this);
2932       }
2933     }
2934     break;
2935   }
2936 
2937   case Op_Phi:
2938     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
2939       // The EncodeP optimization may create Phi with the same edges
2940       // for all paths. It is not handled well by Register Allocator.
2941       Node* unique_in = n->in(1);
2942       assert(unique_in != NULL, "");
2943       uint cnt = n->req();
2944       for (uint i = 2; i < cnt; i++) {
2945         Node* m = n->in(i);
2946         assert(m != NULL, "");
2947         if (unique_in != m)
2948           unique_in = NULL;
2949       }
2950       if (unique_in != NULL) {
2951         n->subsume_by(unique_in, this);
2952       }
2953     }
2954     break;
2955 
2956 #endif
2957 
2958   case Op_ModI:
2959     if (UseDivMod) {
2960       // Check if a%b and a/b both exist
2961       Node* d = n->find_similar(Op_DivI);
2962       if (d) {
2963         // Replace them with a fused divmod if supported
2964         if (Matcher::has_match_rule(Op_DivModI)) {
2965           DivModINode* divmod = DivModINode::make(this, n);
2966           d->subsume_by(divmod->div_proj(), this);
2967           n->subsume_by(divmod->mod_proj(), this);
2968         } else {
2969           // replace a%b with a-((a/b)*b)
2970           Node* mult = new MulINode(d, d->in(2));
2971           Node* sub  = new SubINode(d->in(1), mult);
2972           n->subsume_by(sub, this);
2973         }
2974       }
2975     }
2976     break;
2977 
2978   case Op_ModL:
2979     if (UseDivMod) {
2980       // Check if a%b and a/b both exist
2981       Node* d = n->find_similar(Op_DivL);
2982       if (d) {
2983         // Replace them with a fused divmod if supported
2984         if (Matcher::has_match_rule(Op_DivModL)) {
2985           DivModLNode* divmod = DivModLNode::make(this, n);
2986           d->subsume_by(divmod->div_proj(), this);
2987           n->subsume_by(divmod->mod_proj(), this);
2988         } else {
2989           // replace a%b with a-((a/b)*b)
2990           Node* mult = new MulLNode(d, d->in(2));
2991           Node* sub  = new SubLNode(d->in(1), mult);
2992           n->subsume_by(sub, this);
2993         }
2994       }
2995     }
2996     break;
2997 
2998   case Op_LoadVector:
2999   case Op_StoreVector:
3000     break;
3001 
3002   case Op_PackB:
3003   case Op_PackS:
3004   case Op_PackI:
3005   case Op_PackF:
3006   case Op_PackL:
3007   case Op_PackD:
3008     if (n->req()-1 > 2) {
3009       // Replace many operand PackNodes with a binary tree for matching
3010       PackNode* p = (PackNode*) n;
3011       Node* btp = p->binary_tree_pack(this, 1, n->req());
3012       n->subsume_by(btp, this);
3013     }
3014     break;
3015   case Op_Loop:
3016   case Op_CountedLoop:
3017     if (n->as_Loop()->is_inner_loop()) {
3018       frc.inc_inner_loop_count();
3019     }
3020     break;
3021   case Op_LShiftI:
3022   case Op_RShiftI:
3023   case Op_URShiftI:
3024   case Op_LShiftL:
3025   case Op_RShiftL:
3026   case Op_URShiftL:
3027     if (Matcher::need_masked_shift_count) {
3028       // The cpu's shift instructions don't restrict the count to the
3029       // lower 5/6 bits. We need to do the masking ourselves.
3030       Node* in2 = n->in(2);
3031       juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
3032       const TypeInt* t = in2->find_int_type();
3033       if (t != NULL && t->is_con()) {
3034         juint shift = t->get_con();
3035         if (shift > mask) { // Unsigned cmp
3036           n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
3037         }
3038       } else {
3039         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
3040           Node* shift = new AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
3041           n->set_req(2, shift);
3042         }
3043       }
3044       if (in2->outcnt() == 0) { // Remove dead node
3045         in2->disconnect_inputs(NULL, this);
3046       }
3047     }
3048     break;
3049   case Op_MemBarStoreStore:
3050   case Op_MemBarRelease:
3051     // Break the link with AllocateNode: it is no longer useful and
3052     // confuses register allocation.
3053     if (n->req() > MemBarNode::Precedent) {
3054       n->set_req(MemBarNode::Precedent, top());
3055     }
3056     break;
3057   default:
3058     assert( !n->is_Call(), "" );
3059     assert( !n->is_Mem(), "" );
3060     break;
3061   }
3062 
3063   // Collect CFG split points
3064   if (n->is_MultiBranch())
3065     frc._tests.push(n);
3066 }
3067 
3068 //------------------------------final_graph_reshaping_walk---------------------
3069 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3070 // requires that the walk visits a node's inputs before visiting the node.
3071 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3072   ResourceArea *area = Thread::current()->resource_area();
3073   Unique_Node_List sfpt(area);
3074 
3075   frc._visited.set(root->_idx); // first, mark node as visited
3076   uint cnt = root->req();
3077   Node *n = root;
3078   uint  i = 0;
3079   while (true) {
3080     if (i < cnt) {
3081       // Place all non-visited non-null inputs onto stack
3082       Node* m = n->in(i);
3083       ++i;
3084       if (m != NULL && !frc._visited.test_set(m->_idx)) {
3085         if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) {
3086           // compute worst case interpreter size in case of a deoptimization
3087           update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());
3088 
3089           sfpt.push(m);
3090         }
3091         cnt = m->req();
3092         nstack.push(n, i); // put on stack parent and next input's index
3093         n = m;
3094         i = 0;
3095       }
3096     } else {
3097       // Now do post-visit work
3098       final_graph_reshaping_impl( n, frc );
3099       if (nstack.is_empty())
3100         break;             // finished
3101       n = nstack.node();   // Get node from stack
3102       cnt = n->req();
3103       i = nstack.index();
3104       nstack.pop();        // Shift to the next node on stack
3105     }
3106   }
3107 
3108   // Skip next transformation if compressed oops are not used.
3109   if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
3110       (!UseCompressedOops && !UseCompressedClassPointers))
3111     return;
3112 
3113   // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
3114   // It could be done for an uncommon traps or any safepoints/calls
3115   // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
3116   while (sfpt.size() > 0) {
3117     n = sfpt.pop();
3118     JVMState *jvms = n->as_SafePoint()->jvms();
3119     assert(jvms != NULL, "sanity");
3120     int start = jvms->debug_start();
3121     int end   = n->req();
3122     bool is_uncommon = (n->is_CallStaticJava() &&
3123                         n->as_CallStaticJava()->uncommon_trap_request() != 0);
3124     for (int j = start; j < end; j++) {
3125       Node* in = n->in(j);
3126       if (in->is_DecodeNarrowPtr()) {
3127         bool safe_to_skip = true;
3128         if (!is_uncommon ) {
3129           // Is it safe to skip?
3130           for (uint i = 0; i < in->outcnt(); i++) {
3131             Node* u = in->raw_out(i);
3132             if (!u->is_SafePoint() ||
3133                  u->is_Call() && u->as_Call()->has_non_debug_use(n)) {
3134               safe_to_skip = false;
3135             }
3136           }
3137         }
3138         if (safe_to_skip) {
3139           n->set_req(j, in->in(1));
3140         }
3141         if (in->outcnt() == 0) {
3142           in->disconnect_inputs(NULL, this);
3143         }
3144       }
3145     }
3146   }
3147 }
3148 
3149 //------------------------------final_graph_reshaping--------------------------
3150 // Final Graph Reshaping.
3151 //
3152 // (1) Clone simple inputs to uncommon calls, so they can be scheduled late
3153 //     and not commoned up and forced early.  Must come after regular
3154 //     optimizations to avoid GVN undoing the cloning.  Clone constant
3155 //     inputs to Loop Phis; these will be split by the allocator anyways.
3156 //     Remove Opaque nodes.
3157 // (2) Move last-uses by commutative operations to the left input to encourage
3158 //     Intel update-in-place two-address operations and better register usage
3159 //     on RISCs.  Must come after regular optimizations to avoid GVN Ideal
3160 //     calls canonicalizing them back.
3161 // (3) Count the number of double-precision FP ops, single-precision FP ops
3162 //     and call sites.  On Intel, we can get correct rounding either by
3163 //     forcing singles to memory (requires extra stores and loads after each
3164 //     FP bytecode) or we can set a rounding mode bit (requires setting and
3165 //     clearing the mode bit around call sites).  The mode bit is only used
3166 //     if the relative frequency of single FP ops to calls is low enough.
3167 //     This is a key transform for SPEC mpeg_audio.
3168 // (4) Detect infinite loops; blobs of code reachable from above but not
3169 //     below.  Several of the Code_Gen algorithms fail on such code shapes,
3170 //     so we simply bail out.  Happens a lot in ZKM.jar, but also happens
3171 //     from time to time in other codes (such as -Xcomp finalizer loops, etc).
3172 //     Detection is by looking for IfNodes where only 1 projection is
3173 //     reachable from below or CatchNodes missing some targets.
3174 // (5) Assert for insane oop offsets in debug mode.
3175 
3176 bool Compile::final_graph_reshaping() {
3177   // an infinite loop may have been eliminated by the optimizer,
3178   // in which case the graph will be empty.
3179   if (root()->req() == 1) {
3180     record_method_not_compilable("trivial infinite loop");
3181     return true;
3182   }
3183 
3184   // Expensive nodes have their control input set to prevent the GVN
3185   // from freely commoning them. There's no GVN beyond this point so
3186   // no need to keep the control input. We want the expensive nodes to
3187   // be freely moved to the least frequent code path by gcm.
3188   assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
3189   for (int i = 0; i < expensive_count(); i++) {
3190     _expensive_nodes->at(i)->set_req(0, NULL);
3191   }
3192 
3193   Final_Reshape_Counts frc;
3194 
3195   // Visit everybody reachable!
3196   // Allocate stack of size C->unique()/2 to avoid frequent realloc
3197   Node_Stack nstack(unique() >> 1);
3198   final_graph_reshaping_walk(nstack, root(), frc);
3199 
3200   // Check for unreachable (from below) code (i.e., infinite loops).
3201   for( uint i = 0; i < frc._tests.size(); i++ ) {
3202     MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
3203     // Get number of CFG targets.
3204     // Note that PCTables include exception targets after calls.
3205     uint required_outcnt = n->required_outcnt();
3206     if (n->outcnt() != required_outcnt) {
3207       // Check for a few special cases.  Rethrow Nodes never take the
3208       // 'fall-thru' path, so expected kids is 1 less.
3209       if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
3210         if (n->in(0)->in(0)->is_Call()) {
3211           CallNode *call = n->in(0)->in(0)->as_Call();
3212           if (call->entry_point() == OptoRuntime::rethrow_stub()) {
3213             required_outcnt--;      // Rethrow always has 1 less kid
3214           } else if (call->req() > TypeFunc::Parms &&
3215                      call->is_CallDynamicJava()) {
3216             // Check for null receiver. In such case, the optimizer has
3217             // detected that the virtual call will always result in a null
3218             // pointer exception. The fall-through projection of this CatchNode
3219             // will not be populated.
3220             Node *arg0 = call->in(TypeFunc::Parms);
3221             if (arg0->is_Type() &&
3222                 arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
3223               required_outcnt--;
3224             }
3225           } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
3226                      call->req() > TypeFunc::Parms+1 &&
3227                      call->is_CallStaticJava()) {
3228             // Check for negative array length. In such case, the optimizer has
3229             // detected that the allocation attempt will always result in an
3230             // exception. There is no fall-through projection of this CatchNode .
3231             Node *arg1 = call->in(TypeFunc::Parms+1);
3232             if (arg1->is_Type() &&
3233                 arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
3234               required_outcnt--;
3235             }
3236           }
3237         }
3238       }
3239       // Recheck with a better notion of 'required_outcnt'
3240       if (n->outcnt() != required_outcnt) {
3241         record_method_not_compilable("malformed control flow");
3242         return true;            // Not all targets reachable!
3243       }
3244     }
3245     // Check that I actually visited all kids.  Unreached kids
3246     // must be infinite loops.
3247     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
3248       if (!frc._visited.test(n->fast_out(j)->_idx)) {
3249         record_method_not_compilable("infinite loop");
3250         return true;            // Found unvisited kid; must be unreach
3251       }
3252   }
3253 
3254   // If original bytecodes contained a mixture of floats and doubles
3255   // check if the optimizer has made it homogenous, item (3).
3256   if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
3257       frc.get_float_count() > 32 &&
3258       frc.get_double_count() == 0 &&
3259       (10 * frc.get_call_count() < frc.get_float_count()) ) {
3260     set_24_bit_selection_and_mode( false,  true );
3261   }
3262 
3263   set_java_calls(frc.get_java_call_count());
3264   set_inner_loops(frc.get_inner_loop_count());
3265 
3266   // No infinite loops, no reason to bail out.
3267   return false;
3268 }
3269 
3270 //-----------------------------too_many_traps----------------------------------
3271 // Report if there are too many traps at the current method and bci.
3272 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
3273 bool Compile::too_many_traps(ciMethod* method,
3274                              int bci,
3275                              Deoptimization::DeoptReason reason) {
3276   ciMethodData* md = method->method_data();
3277   if (md->is_empty()) {
3278     // Assume the trap has not occurred, or that it occurred only
3279     // because of a transient condition during start-up in the interpreter.
3280     return false;
3281   }
3282   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
3283   if (md->has_trap_at(bci, m, reason) != 0) {
3284     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
3285     // Also, if there are multiple reasons, or if there is no per-BCI record,
3286     // assume the worst.
3287     if (log())
3288       log()->elem("observe trap='%s' count='%d'",
3289                   Deoptimization::trap_reason_name(reason),
3290                   md->trap_count(reason));
3291     return true;
3292   } else {
3293     // Ignore method/bci and see if there have been too many globally.
3294     return too_many_traps(reason, md);
3295   }
3296 }
3297 
3298 // Less-accurate variant which does not require a method and bci.
3299 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
3300                              ciMethodData* logmd) {
3301   if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
3302     // Too many traps globally.
3303     // Note that we use cumulative trap_count, not just md->trap_count.
3304     if (log()) {
3305       int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
3306       log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
3307                   Deoptimization::trap_reason_name(reason),
3308                   mcount, trap_count(reason));
3309     }
3310     return true;
3311   } else {
3312     // The coast is clear.
3313     return false;
3314   }
3315 }
3316 
3317 //--------------------------too_many_recompiles--------------------------------
3318 // Report if there are too many recompiles at the current method and bci.
3319 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
3320 // Is not eager to return true, since this will cause the compiler to use
3321 // Action_none for a trap point, to avoid too many recompilations.
3322 bool Compile::too_many_recompiles(ciMethod* method,
3323                                   int bci,
3324                                   Deoptimization::DeoptReason reason) {
3325   ciMethodData* md = method->method_data();
3326   if (md->is_empty()) {
3327     // Assume the trap has not occurred, or that it occurred only
3328     // because of a transient condition during start-up in the interpreter.
3329     return false;
3330   }
3331   // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
3332   uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
3333   uint m_cutoff  = (uint) PerMethodRecompilationCutoff / 2 + 1;  // not zero
3334   Deoptimization::DeoptReason per_bc_reason
3335     = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
3336   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
3337   if ((per_bc_reason == Deoptimization::Reason_none
3338        || md->has_trap_at(bci, m, reason) != 0)
3339       // The trap frequency measure we care about is the recompile count:
3340       && md->trap_recompiled_at(bci, m)
3341       && md->overflow_recompile_count() >= bc_cutoff) {
3342     // Do not emit a trap here if it has already caused recompilations.
3343     // Also, if there are multiple reasons, or if there is no per-BCI record,
3344     // assume the worst.
3345     if (log())
3346       log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
3347                   Deoptimization::trap_reason_name(reason),
3348                   md->trap_count(reason),
3349                   md->overflow_recompile_count());
3350     return true;
3351   } else if (trap_count(reason) != 0
3352              && decompile_count() >= m_cutoff) {
3353     // Too many recompiles globally, and we have seen this sort of trap.
3354     // Use cumulative decompile_count, not just md->decompile_count.
3355     if (log())
3356       log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
3357                   Deoptimization::trap_reason_name(reason),
3358                   md->trap_count(reason), trap_count(reason),
3359                   md->decompile_count(), decompile_count());
3360     return true;
3361   } else {
3362     // The coast is clear.
3363     return false;
3364   }
3365 }
3366 
3367 // Compute when not to trap. Used by matching trap based nodes and
3368 // NullCheck optimization.
3369 void Compile::set_allowed_deopt_reasons() {
3370   _allowed_reasons = 0;
3371   if (is_method_compilation()) {
3372     for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
3373       assert(rs < BitsPerInt, "recode bit map");
3374       if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
3375         _allowed_reasons |= nth_bit(rs);
3376       }
3377     }
3378   }
3379 }
3380 
3381 #ifndef PRODUCT
3382 //------------------------------verify_graph_edges---------------------------
3383 // Walk the Graph and verify that there is a one-to-one correspondence
3384 // between Use-Def edges and Def-Use edges in the graph.
3385 void Compile::verify_graph_edges(bool no_dead_code) {
3386   if (VerifyGraphEdges) {
3387     ResourceArea *area = Thread::current()->resource_area();
3388     Unique_Node_List visited(area);
3389     // Call recursive graph walk to check edges
3390     _root->verify_edges(visited);
3391     if (no_dead_code) {
3392       // Now make sure that no visited node is used by an unvisited node.
3393       bool dead_nodes = false;
3394       Unique_Node_List checked(area);
3395       while (visited.size() > 0) {
3396         Node* n = visited.pop();
3397         checked.push(n);
3398         for (uint i = 0; i < n->outcnt(); i++) {
3399           Node* use = n->raw_out(i);
3400           if (checked.member(use))  continue;  // already checked
3401           if (visited.member(use))  continue;  // already in the graph
3402           if (use->is_Con())        continue;  // a dead ConNode is OK
3403           // At this point, we have found a dead node which is DU-reachable.
3404           if (!dead_nodes) {
3405             tty->print_cr("*** Dead nodes reachable via DU edges:");
3406             dead_nodes = true;
3407           }
3408           use->dump(2);
3409           tty->print_cr("---");
3410           checked.push(use);  // No repeats; pretend it is now checked.
3411         }
3412       }
3413       assert(!dead_nodes, "using nodes must be reachable from root");
3414     }
3415   }
3416 }
3417 
3418 // Verify GC barriers consistency
3419 // Currently supported:
3420 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
3421 void Compile::verify_barriers() {
3422   if (UseG1GC) {
3423     // Verify G1 pre-barriers
3424     const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
3425 
3426     ResourceArea *area = Thread::current()->resource_area();
3427     Unique_Node_List visited(area);
3428     Node_List worklist(area);
3429     // We're going to walk control flow backwards starting from the Root
3430     worklist.push(_root);
3431     while (worklist.size() > 0) {
3432       Node* x = worklist.pop();
3433       if (x == NULL || x == top()) continue;
3434       if (visited.member(x)) {
3435         continue;
3436       } else {
3437         visited.push(x);
3438       }
3439 
3440       if (x->is_Region()) {
3441         for (uint i = 1; i < x->req(); i++) {
3442           worklist.push(x->in(i));
3443         }
3444       } else {
3445         worklist.push(x->in(0));
3446         // We are looking for the pattern:
3447         //                            /->ThreadLocal
3448         // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
3449         //              \->ConI(0)
3450         // We want to verify that the If and the LoadB have the same control
3451         // See GraphKit::g1_write_barrier_pre()
3452         if (x->is_If()) {
3453           IfNode *iff = x->as_If();
3454           if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
3455             CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
3456             if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
3457                 && cmp->in(1)->is_Load()) {
3458               LoadNode* load = cmp->in(1)->as_Load();
3459               if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
3460                   && load->in(2)->in(3)->is_Con()
3461                   && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
3462 
3463                 Node* if_ctrl = iff->in(0);
3464                 Node* load_ctrl = load->in(0);
3465 
3466                 if (if_ctrl != load_ctrl) {
3467                   // Skip possible CProj->NeverBranch in infinite loops
3468                   if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
3469                       && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
3470                     if_ctrl = if_ctrl->in(0)->in(0);
3471                   }
3472                 }
3473                 assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
3474               }
3475             }
3476           }
3477         }
3478       }
3479     }
3480   }
3481 }
3482 
3483 #endif
3484 
3485 // The Compile object keeps track of failure reasons separately from the ciEnv.
3486 // This is required because there is not quite a 1-1 relation between the
3487 // ciEnv and its compilation task and the Compile object.  Note that one
3488 // ciEnv might use two Compile objects, if C2Compiler::compile_method decides
3489 // to backtrack and retry without subsuming loads.  Other than this backtracking
3490 // behavior, the Compile's failure reason is quietly copied up to the ciEnv
3491 // by the logic in C2Compiler.
3492 void Compile::record_failure(const char* reason) {
3493   if (log() != NULL) {
3494     log()->elem("failure reason='%s' phase='compile'", reason);
3495   }
3496   if (_failure_reason == NULL) {
3497     // Record the first failure reason.
3498     _failure_reason = reason;
3499   }
3500 
3501   EventCompilerFailure event;
3502   if (event.should_commit()) {
3503     event.set_compileID(Compile::compile_id());
3504     event.set_failure(reason);
3505     event.commit();
3506   }
3507 
3508   if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
3509     C->print_method(PHASE_FAILURE);
3510   }
3511   _root = NULL;  // flush the graph, too
3512 }
3513 
3514 Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog)
3515   : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false),
3516     _phase_name(name), _dolog(dolog)
3517 {
3518   if (dolog) {
3519     C = Compile::current();
3520     _log = C->log();
3521   } else {
3522     C = NULL;
3523     _log = NULL;
3524   }
3525   if (_log != NULL) {
3526     _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
3527     _log->stamp();
3528     _log->end_head();
3529   }
3530 }
3531 
3532 Compile::TracePhase::~TracePhase() {
3533 
3534   C = Compile::current();
3535   if (_dolog) {
3536     _log = C->log();
3537   } else {
3538     _log = NULL;
3539   }
3540 
3541 #ifdef ASSERT
3542   if (PrintIdealNodeCount) {
3543     tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
3544                   _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
3545   }
3546 
3547   if (VerifyIdealNodeCount) {
3548     Compile::current()->print_missing_nodes();
3549   }
3550 #endif
3551 
3552   if (_log != NULL) {
3553     _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
3554   }
3555 }
3556 
3557 //=============================================================================
3558 // Two Constant's are equal when the type and the value are equal.
3559 bool Compile::Constant::operator==(const Constant& other) {
3560   if (type()          != other.type()         )  return false;
3561   if (can_be_reused() != other.can_be_reused())  return false;
3562   // For floating point values we compare the bit pattern.
3563   switch (type()) {
3564   case T_FLOAT:   return (_v._value.i == other._v._value.i);
3565   case T_LONG:
3566   case T_DOUBLE:  return (_v._value.j == other._v._value.j);
3567   case T_OBJECT:
3568   case T_ADDRESS: return (_v._value.l == other._v._value.l);
3569   case T_VOID:    return (_v._value.l == other._v._value.l);  // jump-table entries
3570   case T_METADATA: return (_v._metadata == other._v._metadata);
3571   default: ShouldNotReachHere();
3572   }
3573   return false;
3574 }
3575 
3576 static int type_to_size_in_bytes(BasicType t) {
3577   switch (t) {
3578   case T_LONG:    return sizeof(jlong  );
3579   case T_FLOAT:   return sizeof(jfloat );
3580   case T_DOUBLE:  return sizeof(jdouble);
3581   case T_METADATA: return sizeof(Metadata*);
3582     // We use T_VOID as marker for jump-table entries (labels) which
3583     // need an internal word relocation.
3584   case T_VOID:
3585   case T_ADDRESS:
3586   case T_OBJECT:  return sizeof(jobject);
3587   }
3588 
3589   ShouldNotReachHere();
3590   return -1;
3591 }
3592 
3593 int Compile::ConstantTable::qsort_comparator(Constant* a, Constant* b) {
3594   // sort descending
3595   if (a->freq() > b->freq())  return -1;
3596   if (a->freq() < b->freq())  return  1;
3597   return 0;
3598 }
3599 
3600 void Compile::ConstantTable::calculate_offsets_and_size() {
3601   // First, sort the array by frequencies.
3602   _constants.sort(qsort_comparator);
3603 
3604 #ifdef ASSERT
3605   // Make sure all jump-table entries were sorted to the end of the
3606   // array (they have a negative frequency).
3607   bool found_void = false;
3608   for (int i = 0; i < _constants.length(); i++) {
3609     Constant con = _constants.at(i);
3610     if (con.type() == T_VOID)
3611       found_void = true;  // jump-tables
3612     else
3613       assert(!found_void, "wrong sorting");
3614   }
3615 #endif
3616 
3617   int offset = 0;
3618   for (int i = 0; i < _constants.length(); i++) {
3619     Constant* con = _constants.adr_at(i);
3620 
3621     // Align offset for type.
3622     int typesize = type_to_size_in_bytes(con->type());
3623     offset = align_size_up(offset, typesize);
3624     con->set_offset(offset);   // set constant's offset
3625 
3626     if (con->type() == T_VOID) {
3627       MachConstantNode* n = (MachConstantNode*) con->get_jobject();
3628       offset = offset + typesize * n->outcnt();  // expand jump-table
3629     } else {
3630       offset = offset + typesize;
3631     }
3632   }
3633 
3634   // Align size up to the next section start (which is insts; see
3635   // CodeBuffer::align_at_start).
3636   assert(_size == -1, "already set?");
3637   _size = align_size_up(offset, CodeEntryAlignment);
3638 }
3639 
3640 void Compile::ConstantTable::emit(CodeBuffer& cb) {
3641   MacroAssembler _masm(&cb);
3642   for (int i = 0; i < _constants.length(); i++) {
3643     Constant con = _constants.at(i);
3644     address constant_addr;
3645     switch (con.type()) {
3646     case T_LONG:   constant_addr = _masm.long_constant(  con.get_jlong()  ); break;
3647     case T_FLOAT:  constant_addr = _masm.float_constant( con.get_jfloat() ); break;
3648     case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
3649     case T_OBJECT: {
3650       jobject obj = con.get_jobject();
3651       int oop_index = _masm.oop_recorder()->find_index(obj);
3652       constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
3653       break;
3654     }
3655     case T_ADDRESS: {
3656       address addr = (address) con.get_jobject();
3657       constant_addr = _masm.address_constant(addr);
3658       break;
3659     }
3660     // We use T_VOID as marker for jump-table entries (labels) which
3661     // need an internal word relocation.
3662     case T_VOID: {
3663       MachConstantNode* n = (MachConstantNode*) con.get_jobject();
3664       // Fill the jump-table with a dummy word.  The real value is
3665       // filled in later in fill_jump_table.
3666       address dummy = (address) n;
3667       constant_addr = _masm.address_constant(dummy);
3668       // Expand jump-table
3669       for (uint i = 1; i < n->outcnt(); i++) {
3670         address temp_addr = _masm.address_constant(dummy + i);
3671         assert(temp_addr, "consts section too small");
3672       }
3673       break;
3674     }
3675     case T_METADATA: {
3676       Metadata* obj = con.get_metadata();
3677       int metadata_index = _masm.oop_recorder()->find_index(obj);
3678       constant_addr = _masm.address_constant((address) obj, metadata_Relocation::spec(metadata_index));
3679       break;
3680     }
3681     default: ShouldNotReachHere();
3682     }
3683     assert(constant_addr, "consts section too small");
3684     assert((constant_addr - _masm.code()->consts()->start()) == con.offset(),
3685             err_msg_res("must be: %d == %d", (int) (constant_addr - _masm.code()->consts()->start()), (int)(con.offset())));
3686   }
3687 }
3688 
3689 int Compile::ConstantTable::find_offset(Constant& con) const {
3690   int idx = _constants.find(con);
3691   assert(idx != -1, "constant must be in constant table");
3692   int offset = _constants.at(idx).offset();
3693   assert(offset != -1, "constant table not emitted yet?");
3694   return offset;
3695 }
3696 
3697 void Compile::ConstantTable::add(Constant& con) {
3698   if (con.can_be_reused()) {
3699     int idx = _constants.find(con);
3700     if (idx != -1 && _constants.at(idx).can_be_reused()) {
3701       _constants.adr_at(idx)->inc_freq(con.freq());  // increase the frequency by the current value
3702       return;
3703     }
3704   }
3705   (void) _constants.append(con);
3706 }
3707 
3708 Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
3709   Block* b = Compile::current()->cfg()->get_block_for_node(n);
3710   Constant con(type, value, b->_freq);
3711   add(con);
3712   return con;
3713 }
3714 
3715 Compile::Constant Compile::ConstantTable::add(Metadata* metadata) {
3716   Constant con(metadata);
3717   add(con);
3718   return con;
3719 }
3720 
3721 Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, MachOper* oper) {
3722   jvalue value;
3723   BasicType type = oper->type()->basic_type();
3724   switch (type) {
3725   case T_LONG:    value.j = oper->constantL(); break;
3726   case T_FLOAT:   value.f = oper->constantF(); break;
3727   case T_DOUBLE:  value.d = oper->constantD(); break;
3728   case T_OBJECT:
3729   case T_ADDRESS: value.l = (jobject) oper->constant(); break;
3730   case T_METADATA: return add((Metadata*)oper->constant()); break;
3731   default: guarantee(false, err_msg_res("unhandled type: %s", type2name(type)));
3732   }
3733   return add(n, type, value);
3734 }
3735 
3736 Compile::Constant Compile::ConstantTable::add_jump_table(MachConstantNode* n) {
3737   jvalue value;
3738   // We can use the node pointer here to identify the right jump-table
3739   // as this method is called from Compile::Fill_buffer right before
3740   // the MachNodes are emitted and the jump-table is filled (means the
3741   // MachNode pointers do not change anymore).
3742   value.l = (jobject) n;
3743   Constant con(T_VOID, value, next_jump_table_freq(), false);  // Labels of a jump-table cannot be reused.
3744   add(con);
3745   return con;
3746 }
3747 
3748 void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const {
3749   // If called from Compile::scratch_emit_size do nothing.
3750   if (Compile::current()->in_scratch_emit_size())  return;
3751 
3752   assert(labels.is_nonempty(), "must be");
3753   assert((uint) labels.length() == n->outcnt(), err_msg_res("must be equal: %d == %d", labels.length(), n->outcnt()));
3754 
3755   // Since MachConstantNode::constant_offset() also contains
3756   // table_base_offset() we need to subtract the table_base_offset()
3757   // to get the plain offset into the constant table.
3758   int offset = n->constant_offset() - table_base_offset();
3759 
3760   MacroAssembler _masm(&cb);
3761   address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset);
3762 
3763   for (uint i = 0; i < n->outcnt(); i++) {
3764     address* constant_addr = &jump_table_base[i];
3765     assert(*constant_addr == (((address) n) + i), err_msg_res("all jump-table entries must contain adjusted node pointer: " INTPTR_FORMAT " == " INTPTR_FORMAT, p2i(*constant_addr), p2i(((address) n) + i)));
3766     *constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr);
3767     cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
3768   }
3769 }
3770 
3771 // The message about the current inlining is accumulated in
3772 // _print_inlining_stream and transfered into the _print_inlining_list
3773 // once we know whether inlining succeeds or not. For regular
3774 // inlining, messages are appended to the buffer pointed by
3775 // _print_inlining_idx in the _print_inlining_list. For late inlining,
3776 // a new buffer is added after _print_inlining_idx in the list. This
3777 // way we can update the inlining message for late inlining call site
3778 // when the inlining is attempted again.
3779 void Compile::print_inlining_init() {
3780   if (print_inlining() || print_intrinsics()) {
3781     _print_inlining_stream = new stringStream();
3782     _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
3783   }
3784 }
3785 
3786 void Compile::print_inlining_reinit() {
3787   if (print_inlining() || print_intrinsics()) {
3788     // Re allocate buffer when we change ResourceMark
3789     _print_inlining_stream = new stringStream();
3790   }
3791 }
3792 
3793 void Compile::print_inlining_reset() {
3794   _print_inlining_stream->reset();
3795 }
3796 
3797 void Compile::print_inlining_commit() {
3798   assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
3799   // Transfer the message from _print_inlining_stream to the current
3800   // _print_inlining_list buffer and clear _print_inlining_stream.
3801   _print_inlining_list->at(_print_inlining_idx).ss()->write(_print_inlining_stream->as_string(), _print_inlining_stream->size());
3802   print_inlining_reset();
3803 }
3804 
3805 void Compile::print_inlining_push() {
3806   // Add new buffer to the _print_inlining_list at current position
3807   _print_inlining_idx++;
3808   _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
3809 }
3810 
3811 Compile::PrintInliningBuffer& Compile::print_inlining_current() {
3812   return _print_inlining_list->at(_print_inlining_idx);
3813 }
3814 
3815 void Compile::print_inlining_update(CallGenerator* cg) {
3816   if (print_inlining() || print_intrinsics()) {
3817     if (!cg->is_late_inline()) {
3818       if (print_inlining_current().cg() != NULL) {
3819         print_inlining_push();
3820       }
3821       print_inlining_commit();
3822     } else {
3823       if (print_inlining_current().cg() != cg &&
3824           (print_inlining_current().cg() != NULL ||
3825            print_inlining_current().ss()->size() != 0)) {
3826         print_inlining_push();
3827       }
3828       print_inlining_commit();
3829       print_inlining_current().set_cg(cg);
3830     }
3831   }
3832 }
3833 
3834 void Compile::print_inlining_move_to(CallGenerator* cg) {
3835   // We resume inlining at a late inlining call site. Locate the
3836   // corresponding inlining buffer so that we can update it.
3837   if (print_inlining()) {
3838     for (int i = 0; i < _print_inlining_list->length(); i++) {
3839       if (_print_inlining_list->adr_at(i)->cg() == cg) {
3840         _print_inlining_idx = i;
3841         return;
3842       }
3843     }
3844     ShouldNotReachHere();
3845   }
3846 }
3847 
3848 void Compile::print_inlining_update_delayed(CallGenerator* cg) {
3849   if (print_inlining()) {
3850     assert(_print_inlining_stream->size() > 0, "missing inlining msg");
3851     assert(print_inlining_current().cg() == cg, "wrong entry");
3852     // replace message with new message
3853     _print_inlining_list->at_put(_print_inlining_idx, PrintInliningBuffer());
3854     print_inlining_commit();
3855     print_inlining_current().set_cg(cg);
3856   }
3857 }
3858 
3859 void Compile::print_inlining_assert_ready() {
3860   assert(!_print_inlining || _print_inlining_stream->size() == 0, "loosing data");
3861 }
3862 
3863 void Compile::process_print_inlining() {
3864   bool do_print_inlining = print_inlining() || print_intrinsics();
3865   if (do_print_inlining || log() != NULL) {
3866     // Print inlining message for candidates that we couldn't inline
3867     // for lack of space
3868     for (int i = 0; i < _late_inlines.length(); i++) {
3869       CallGenerator* cg = _late_inlines.at(i);
3870       if (!cg->is_mh_late_inline()) {
3871         const char* msg = "live nodes > LiveNodeCountInliningCutoff";
3872         if (do_print_inlining) {
3873           cg->print_inlining_late(msg);
3874         }
3875         log_late_inline_failure(cg, msg);
3876       }
3877     }
3878   }
3879   if (do_print_inlining) {
3880     ResourceMark rm;
3881     stringStream ss;
3882     for (int i = 0; i < _print_inlining_list->length(); i++) {
3883       ss.print("%s", _print_inlining_list->adr_at(i)->ss()->as_string());
3884     }
3885     size_t end = ss.size();
3886     _print_inlining_output = NEW_ARENA_ARRAY(comp_arena(), char, end+1);
3887     strncpy(_print_inlining_output, ss.base(), end+1);
3888     _print_inlining_output[end] = 0;
3889   }
3890 }
3891 
3892 void Compile::dump_print_inlining() {
3893   if (_print_inlining_output != NULL) {
3894     tty->print_raw(_print_inlining_output);
3895   }
3896 }
3897 
3898 void Compile::log_late_inline(CallGenerator* cg) {
3899   if (log() != NULL) {
3900     log()->head("late_inline method='%d'  inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
3901                 cg->unique_id());
3902     JVMState* p = cg->call_node()->jvms();
3903     while (p != NULL) {
3904       log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
3905       p = p->caller();
3906     }
3907     log()->tail("late_inline");
3908   }
3909 }
3910 
3911 void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
3912   log_late_inline(cg);
3913   if (log() != NULL) {
3914     log()->inline_fail(msg);
3915   }
3916 }
3917 
3918 void Compile::log_inline_id(CallGenerator* cg) {
3919   if (log() != NULL) {
3920     // The LogCompilation tool needs a unique way to identify late
3921     // inline call sites. This id must be unique for this call site in
3922     // this compilation. Try to have it unique across compilations as
3923     // well because it can be convenient when grepping through the log
3924     // file.
3925     // Distinguish OSR compilations from others in case CICountOSR is
3926     // on.
3927     jlong id = ((jlong)unique()) + (((jlong)compile_id()) << 33) + (CICountOSR && is_osr_compilation() ? ((jlong)1) << 32 : 0);
3928     cg->set_unique_id(id);
3929     log()->elem("inline_id id='" JLONG_FORMAT "'", id);
3930   }
3931 }
3932 
3933 void Compile::log_inline_failure(const char* msg) {
3934   if (C->log() != NULL) {
3935     C->log()->inline_fail(msg);
3936   }
3937 }
3938 
3939 
3940 // Dump inlining replay data to the stream.
3941 // Don't change thread state and acquire any locks.
3942 void Compile::dump_inline_data(outputStream* out) {
3943   InlineTree* inl_tree = ilt();
3944   if (inl_tree != NULL) {
3945     out->print(" inline %d", inl_tree->count());
3946     inl_tree->dump_replay_data(out);
3947   }
3948 }
3949 
3950 int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
3951   if (n1->Opcode() < n2->Opcode())      return -1;
3952   else if (n1->Opcode() > n2->Opcode()) return 1;
3953 
3954   assert(n1->req() == n2->req(), err_msg_res("can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req()));
3955   for (uint i = 1; i < n1->req(); i++) {
3956     if (n1->in(i) < n2->in(i))      return -1;
3957     else if (n1->in(i) > n2->in(i)) return 1;
3958   }
3959 
3960   return 0;
3961 }
3962 
3963 int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
3964   Node* n1 = *n1p;
3965   Node* n2 = *n2p;
3966 
3967   return cmp_expensive_nodes(n1, n2);
3968 }
3969 
3970 void Compile::sort_expensive_nodes() {
3971   if (!expensive_nodes_sorted()) {
3972     _expensive_nodes->sort(cmp_expensive_nodes);
3973   }
3974 }
3975 
3976 bool Compile::expensive_nodes_sorted() const {
3977   for (int i = 1; i < _expensive_nodes->length(); i++) {
3978     if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i-1)) < 0) {
3979       return false;
3980     }
3981   }
3982   return true;
3983 }
3984 
3985 bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
3986   if (_expensive_nodes->length() == 0) {
3987     return false;
3988   }
3989 
3990   assert(OptimizeExpensiveOps, "optimization off?");
3991 
3992   // Take this opportunity to remove dead nodes from the list
3993   int j = 0;
3994   for (int i = 0; i < _expensive_nodes->length(); i++) {
3995     Node* n = _expensive_nodes->at(i);
3996     if (!n->is_unreachable(igvn)) {
3997       assert(n->is_expensive(), "should be expensive");
3998       _expensive_nodes->at_put(j, n);
3999       j++;
4000     }
4001   }
4002   _expensive_nodes->trunc_to(j);
4003 
4004   // Then sort the list so that similar nodes are next to each other
4005   // and check for at least two nodes of identical kind with same data
4006   // inputs.
4007   sort_expensive_nodes();
4008 
4009   for (int i = 0; i < _expensive_nodes->length()-1; i++) {
4010     if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i+1)) == 0) {
4011       return true;
4012     }
4013   }
4014 
4015   return false;
4016 }
4017 
4018 void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
4019   if (_expensive_nodes->length() == 0) {
4020     return;
4021   }
4022 
4023   assert(OptimizeExpensiveOps, "optimization off?");
4024 
4025   // Sort to bring similar nodes next to each other and clear the
4026   // control input of nodes for which there's only a single copy.
4027   sort_expensive_nodes();
4028 
4029   int j = 0;
4030   int identical = 0;
4031   int i = 0;
4032   for (; i < _expensive_nodes->length()-1; i++) {
4033     assert(j <= i, "can't write beyond current index");
4034     if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) {
4035       identical++;
4036       _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
4037       continue;
4038     }
4039     if (identical > 0) {
4040       _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
4041       identical = 0;
4042     } else {
4043       Node* n = _expensive_nodes->at(i);
4044       igvn.hash_delete(n);
4045       n->set_req(0, NULL);
4046       igvn.hash_insert(n);
4047     }
4048   }
4049   if (identical > 0) {
4050     _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
4051   } else if (_expensive_nodes->length() >= 1) {
4052     Node* n = _expensive_nodes->at(i);
4053     igvn.hash_delete(n);
4054     n->set_req(0, NULL);
4055     igvn.hash_insert(n);
4056   }
4057   _expensive_nodes->trunc_to(j);
4058 }
4059 
4060 void Compile::add_expensive_node(Node * n) {
4061   assert(!_expensive_nodes->contains(n), "duplicate entry in expensive list");
4062   assert(n->is_expensive(), "expensive nodes with non-null control here only");
4063   assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
4064   if (OptimizeExpensiveOps) {
4065     _expensive_nodes->append(n);
4066   } else {
4067     // Clear control input and let IGVN optimize expensive nodes if
4068     // OptimizeExpensiveOps is off.
4069     n->set_req(0, NULL);
4070   }
4071 }
4072 
4073 /**
4074  * Remove the speculative part of types and clean up the graph
4075  */
4076 void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
4077   if (UseTypeSpeculation) {
4078     Unique_Node_List worklist;
4079     worklist.push(root());
4080     int modified = 0;
4081     // Go over all type nodes that carry a speculative type, drop the
4082     // speculative part of the type and enqueue the node for an igvn
4083     // which may optimize it out.
4084     for (uint next = 0; next < worklist.size(); ++next) {
4085       Node *n  = worklist.at(next);
4086       if (n->is_Type()) {
4087         TypeNode* tn = n->as_Type();
4088         const Type* t = tn->type();
4089         const Type* t_no_spec = t->remove_speculative();
4090         if (t_no_spec != t) {
4091           bool in_hash = igvn.hash_delete(n);
4092           assert(in_hash, "node should be in igvn hash table");
4093           tn->set_type(t_no_spec);
4094           igvn.hash_insert(n);
4095           igvn._worklist.push(n); // give it a chance to go away
4096           modified++;
4097         }
4098       }
4099       uint max = n->len();
4100       for( uint i = 0; i < max; ++i ) {
4101         Node *m = n->in(i);
4102         if (not_a_node(m))  continue;
4103         worklist.push(m);
4104       }
4105     }
4106     // Drop the speculative part of all types in the igvn's type table
4107     igvn.remove_speculative_types();
4108     if (modified > 0) {
4109       igvn.optimize();
4110     }
4111 #ifdef ASSERT
4112     // Verify that after the IGVN is over no speculative type has resurfaced
4113     worklist.clear();
4114     worklist.push(root());
4115     for (uint next = 0; next < worklist.size(); ++next) {
4116       Node *n  = worklist.at(next);
4117       const Type* t = igvn.type_or_null(n);
4118       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4119       if (n->is_Type()) {
4120         t = n->as_Type()->type();
4121         assert(t == t->remove_speculative(), "no more speculative types");
4122       }
4123       uint max = n->len();
4124       for( uint i = 0; i < max; ++i ) {
4125         Node *m = n->in(i);
4126         if (not_a_node(m))  continue;
4127         worklist.push(m);
4128       }
4129     }
4130     igvn.check_no_speculative_types();
4131 #endif
4132   }
4133 }
4134 
4135 // Auxiliary method to support randomized stressing/fuzzing.
4136 //
4137 // This method can be called the arbitrary number of times, with current count
4138 // as the argument. The logic allows selecting a single candidate from the
4139 // running list of candidates as follows:
4140 //    int count = 0;
4141 //    Cand* selected = null;
4142 //    while(cand = cand->next()) {
4143 //      if (randomized_select(++count)) {
4144 //        selected = cand;
4145 //      }
4146 //    }
4147 //
4148 // Including count equalizes the chances any candidate is "selected".
4149 // This is useful when we don't have the complete list of candidates to choose
4150 // from uniformly. In this case, we need to adjust the randomicity of the
4151 // selection, or else we will end up biasing the selection towards the latter
4152 // candidates.
4153 //
4154 // Quick back-envelope calculation shows that for the list of n candidates
4155 // the equal probability for the candidate to persist as "best" can be
4156 // achieved by replacing it with "next" k-th candidate with the probability
4157 // of 1/k. It can be easily shown that by the end of the run, the
4158 // probability for any candidate is converged to 1/n, thus giving the
4159 // uniform distribution among all the candidates.
4160 //
4161 // We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
4162 #define RANDOMIZED_DOMAIN_POW 29
4163 #define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
4164 #define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
4165 bool Compile::randomized_select(int count) {
4166   assert(count > 0, "only positive");
4167   return (os::random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);
4168 }