1 /*
   2  * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CFGPrinter.hpp"
  27 #include "c1/c1_Canonicalizer.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_GraphBuilder.hpp"
  30 #include "c1/c1_InstructionPrinter.hpp"
  31 #include "ci/ciField.hpp"
  32 #include "ci/ciKlass.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "interpreter/bytecode.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "utilities/bitMap.inline.hpp"
  37 
  38 class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
  39  private:
  40   Compilation* _compilation;
  41   IRScope*     _scope;
  42 
  43   BlockList    _blocks;                // internal list of all blocks
  44   BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
  45 
  46   // fields used by mark_loops
  47   BitMap       _active;                // for iteration of control flow graph
  48   BitMap       _visited;               // for iteration of control flow graph
  49   intArray     _loop_map;              // caches the information if a block is contained in a loop
  50   int          _next_loop_index;       // next free loop number
  51   int          _next_block_number;     // for reverse postorder numbering of blocks
  52 
  53   // accessors
  54   Compilation*  compilation() const              { return _compilation; }
  55   IRScope*      scope() const                    { return _scope; }
  56   ciMethod*     method() const                   { return scope()->method(); }
  57   XHandlers*    xhandlers() const                { return scope()->xhandlers(); }
  58 
  59   // unified bailout support
  60   void          bailout(const char* msg) const   { compilation()->bailout(msg); }
  61   bool          bailed_out() const               { return compilation()->bailed_out(); }
  62 
  63   // helper functions
  64   BlockBegin* make_block_at(int bci, BlockBegin* predecessor);
  65   void handle_exceptions(BlockBegin* current, int cur_bci);
  66   void handle_jsr(BlockBegin* current, int sr_bci, int next_bci);
  67   void store_one(BlockBegin* current, int local);
  68   void store_two(BlockBegin* current, int local);
  69   void set_entries(int osr_bci);
  70   void set_leaders();
  71 
  72   void make_loop_header(BlockBegin* block);
  73   void mark_loops();
  74   int  mark_loops(BlockBegin* b, bool in_subroutine);
  75 
  76   // debugging
  77 #ifndef PRODUCT
  78   void print();
  79 #endif
  80 
  81  public:
  82   // creation
  83   BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci);
  84 
  85   // accessors for GraphBuilder
  86   BlockList*    bci2block() const                { return _bci2block; }
  87 };
  88 
  89 
  90 // Implementation of BlockListBuilder
  91 
  92 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
  93  : _compilation(compilation)
  94  , _scope(scope)
  95  , _blocks(16)
  96  , _bci2block(new BlockList(scope->method()->code_size(), NULL))
  97  , _next_block_number(0)
  98  , _active()         // size not known yet
  99  , _visited()        // size not known yet
 100  , _next_loop_index(0)
 101  , _loop_map() // size not known yet
 102 {
 103   set_entries(osr_bci);
 104   set_leaders();
 105   CHECK_BAILOUT();
 106 
 107   mark_loops();
 108   NOT_PRODUCT(if (PrintInitialBlockList) print());
 109 
 110 #ifndef PRODUCT
 111   if (PrintCFGToFile) {
 112     stringStream title;
 113     title.print("BlockListBuilder ");
 114     scope->method()->print_name(&title);
 115     CFGPrinter::print_cfg(_bci2block, title.as_string(), false, false);
 116   }
 117 #endif
 118 }
 119 
 120 
 121 void BlockListBuilder::set_entries(int osr_bci) {
 122   // generate start blocks
 123   BlockBegin* std_entry = make_block_at(0, NULL);
 124   if (scope()->caller() == NULL) {
 125     std_entry->set(BlockBegin::std_entry_flag);
 126   }
 127   if (osr_bci != -1) {
 128     BlockBegin* osr_entry = make_block_at(osr_bci, NULL);
 129     osr_entry->set(BlockBegin::osr_entry_flag);
 130   }
 131 
 132   // generate exception entry blocks
 133   XHandlers* list = xhandlers();
 134   const int n = list->length();
 135   for (int i = 0; i < n; i++) {
 136     XHandler* h = list->handler_at(i);
 137     BlockBegin* entry = make_block_at(h->handler_bci(), NULL);
 138     entry->set(BlockBegin::exception_entry_flag);
 139     h->set_entry_block(entry);
 140   }
 141 }
 142 
 143 
 144 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) {
 145   assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer");
 146 
 147   BlockBegin* block = _bci2block->at(cur_bci);
 148   if (block == NULL) {
 149     block = new BlockBegin(cur_bci);
 150     block->init_stores_to_locals(method()->max_locals());
 151     _bci2block->at_put(cur_bci, block);
 152     _blocks.append(block);
 153 
 154     assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
 155   }
 156 
 157   if (predecessor != NULL) {
 158     if (block->is_set(BlockBegin::exception_entry_flag)) {
 159       BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block);
 160     }
 161 
 162     predecessor->add_successor(block);
 163     block->increment_total_preds();
 164   }
 165 
 166   return block;
 167 }
 168 
 169 
 170 inline void BlockListBuilder::store_one(BlockBegin* current, int local) {
 171   current->stores_to_locals().set_bit(local);
 172 }
 173 inline void BlockListBuilder::store_two(BlockBegin* current, int local) {
 174   store_one(current, local);
 175   store_one(current, local + 1);
 176 }
 177 
 178 
 179 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) {
 180   // Draws edges from a block to its exception handlers
 181   XHandlers* list = xhandlers();
 182   const int n = list->length();
 183 
 184   for (int i = 0; i < n; i++) {
 185     XHandler* h = list->handler_at(i);
 186 
 187     if (h->covers(cur_bci)) {
 188       BlockBegin* entry = h->entry_block();
 189       assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set");
 190       assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set");
 191 
 192       // add each exception handler only once
 193       if (!current->is_successor(entry)) {
 194         current->add_successor(entry);
 195         entry->increment_total_preds();
 196       }
 197 
 198       // stop when reaching catchall
 199       if (h->catch_type() == 0) break;
 200     }
 201   }
 202 }
 203 
 204 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) {
 205   // start a new block after jsr-bytecode and link this block into cfg
 206   make_block_at(next_bci, current);
 207 
 208   // start a new block at the subroutine entry at mark it with special flag
 209   BlockBegin* sr_block = make_block_at(sr_bci, current);
 210   if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) {
 211     sr_block->set(BlockBegin::subroutine_entry_flag);
 212   }
 213 }
 214 
 215 
 216 void BlockListBuilder::set_leaders() {
 217   bool has_xhandlers = xhandlers()->has_handlers();
 218   BlockBegin* current = NULL;
 219 
 220   // The information which bci starts a new block simplifies the analysis
 221   // Without it, backward branches could jump to a bci where no block was created
 222   // during bytecode iteration. This would require the creation of a new block at the
 223   // branch target and a modification of the successor lists.
 224   BitMap bci_block_start = method()->bci_block_start();
 225 
 226   ciBytecodeStream s(method());
 227   while (s.next() != ciBytecodeStream::EOBC()) {
 228     int cur_bci = s.cur_bci();
 229 
 230     if (bci_block_start.at(cur_bci)) {
 231       current = make_block_at(cur_bci, current);
 232     }
 233     assert(current != NULL, "must have current block");
 234 
 235     if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) {
 236       handle_exceptions(current, cur_bci);
 237     }
 238 
 239     switch (s.cur_bc()) {
 240       // track stores to local variables for selective creation of phi functions
 241       case Bytecodes::_iinc:     store_one(current, s.get_index()); break;
 242       case Bytecodes::_istore:   store_one(current, s.get_index()); break;
 243       case Bytecodes::_lstore:   store_two(current, s.get_index()); break;
 244       case Bytecodes::_fstore:   store_one(current, s.get_index()); break;
 245       case Bytecodes::_dstore:   store_two(current, s.get_index()); break;
 246       case Bytecodes::_astore:   store_one(current, s.get_index()); break;
 247       case Bytecodes::_istore_0: store_one(current, 0); break;
 248       case Bytecodes::_istore_1: store_one(current, 1); break;
 249       case Bytecodes::_istore_2: store_one(current, 2); break;
 250       case Bytecodes::_istore_3: store_one(current, 3); break;
 251       case Bytecodes::_lstore_0: store_two(current, 0); break;
 252       case Bytecodes::_lstore_1: store_two(current, 1); break;
 253       case Bytecodes::_lstore_2: store_two(current, 2); break;
 254       case Bytecodes::_lstore_3: store_two(current, 3); break;
 255       case Bytecodes::_fstore_0: store_one(current, 0); break;
 256       case Bytecodes::_fstore_1: store_one(current, 1); break;
 257       case Bytecodes::_fstore_2: store_one(current, 2); break;
 258       case Bytecodes::_fstore_3: store_one(current, 3); break;
 259       case Bytecodes::_dstore_0: store_two(current, 0); break;
 260       case Bytecodes::_dstore_1: store_two(current, 1); break;
 261       case Bytecodes::_dstore_2: store_two(current, 2); break;
 262       case Bytecodes::_dstore_3: store_two(current, 3); break;
 263       case Bytecodes::_astore_0: store_one(current, 0); break;
 264       case Bytecodes::_astore_1: store_one(current, 1); break;
 265       case Bytecodes::_astore_2: store_one(current, 2); break;
 266       case Bytecodes::_astore_3: store_one(current, 3); break;
 267 
 268       // track bytecodes that affect the control flow
 269       case Bytecodes::_athrow:  // fall through
 270       case Bytecodes::_ret:     // fall through
 271       case Bytecodes::_ireturn: // fall through
 272       case Bytecodes::_lreturn: // fall through
 273       case Bytecodes::_freturn: // fall through
 274       case Bytecodes::_dreturn: // fall through
 275       case Bytecodes::_areturn: // fall through
 276       case Bytecodes::_return:
 277         current = NULL;
 278         break;
 279 
 280       case Bytecodes::_ifeq:      // fall through
 281       case Bytecodes::_ifne:      // fall through
 282       case Bytecodes::_iflt:      // fall through
 283       case Bytecodes::_ifge:      // fall through
 284       case Bytecodes::_ifgt:      // fall through
 285       case Bytecodes::_ifle:      // fall through
 286       case Bytecodes::_if_icmpeq: // fall through
 287       case Bytecodes::_if_icmpne: // fall through
 288       case Bytecodes::_if_icmplt: // fall through
 289       case Bytecodes::_if_icmpge: // fall through
 290       case Bytecodes::_if_icmpgt: // fall through
 291       case Bytecodes::_if_icmple: // fall through
 292       case Bytecodes::_if_acmpeq: // fall through
 293       case Bytecodes::_if_acmpne: // fall through
 294       case Bytecodes::_ifnull:    // fall through
 295       case Bytecodes::_ifnonnull:
 296         make_block_at(s.next_bci(), current);
 297         make_block_at(s.get_dest(), current);
 298         current = NULL;
 299         break;
 300 
 301       case Bytecodes::_goto:
 302         make_block_at(s.get_dest(), current);
 303         current = NULL;
 304         break;
 305 
 306       case Bytecodes::_goto_w:
 307         make_block_at(s.get_far_dest(), current);
 308         current = NULL;
 309         break;
 310 
 311       case Bytecodes::_jsr:
 312         handle_jsr(current, s.get_dest(), s.next_bci());
 313         current = NULL;
 314         break;
 315 
 316       case Bytecodes::_jsr_w:
 317         handle_jsr(current, s.get_far_dest(), s.next_bci());
 318         current = NULL;
 319         break;
 320 
 321       case Bytecodes::_tableswitch: {
 322         // set block for each case
 323         Bytecode_tableswitch sw(&s);
 324         int l = sw.length();
 325         for (int i = 0; i < l; i++) {
 326           make_block_at(cur_bci + sw.dest_offset_at(i), current);
 327         }
 328         make_block_at(cur_bci + sw.default_offset(), current);
 329         current = NULL;
 330         break;
 331       }
 332 
 333       case Bytecodes::_lookupswitch: {
 334         // set block for each case
 335         Bytecode_lookupswitch sw(&s);
 336         int l = sw.number_of_pairs();
 337         for (int i = 0; i < l; i++) {
 338           make_block_at(cur_bci + sw.pair_at(i).offset(), current);
 339         }
 340         make_block_at(cur_bci + sw.default_offset(), current);
 341         current = NULL;
 342         break;
 343       }
 344     }
 345   }
 346 }
 347 
 348 
 349 void BlockListBuilder::mark_loops() {
 350   ResourceMark rm;
 351 
 352   _active = BitMap(BlockBegin::number_of_blocks());         _active.clear();
 353   _visited = BitMap(BlockBegin::number_of_blocks());        _visited.clear();
 354   _loop_map = intArray(BlockBegin::number_of_blocks(), 0);
 355   _next_loop_index = 0;
 356   _next_block_number = _blocks.length();
 357 
 358   // recursively iterate the control flow graph
 359   mark_loops(_bci2block->at(0), false);
 360   assert(_next_block_number >= 0, "invalid block numbers");
 361 }
 362 
 363 void BlockListBuilder::make_loop_header(BlockBegin* block) {
 364   if (block->is_set(BlockBegin::exception_entry_flag)) {
 365     // exception edges may look like loops but don't mark them as such
 366     // since it screws up block ordering.
 367     return;
 368   }
 369   if (!block->is_set(BlockBegin::parser_loop_header_flag)) {
 370     block->set(BlockBegin::parser_loop_header_flag);
 371 
 372     assert(_loop_map.at(block->block_id()) == 0, "must not be set yet");
 373     assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer");
 374     _loop_map.at_put(block->block_id(), 1 << _next_loop_index);
 375     if (_next_loop_index < 31) _next_loop_index++;
 376   } else {
 377     // block already marked as loop header
 378     assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
 379   }
 380 }
 381 
 382 int BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) {
 383   int block_id = block->block_id();
 384 
 385   if (_visited.at(block_id)) {
 386     if (_active.at(block_id)) {
 387       // reached block via backward branch
 388       make_loop_header(block);
 389     }
 390     // return cached loop information for this block
 391     return _loop_map.at(block_id);
 392   }
 393 
 394   if (block->is_set(BlockBegin::subroutine_entry_flag)) {
 395     in_subroutine = true;
 396   }
 397 
 398   // set active and visited bits before successors are processed
 399   _visited.set_bit(block_id);
 400   _active.set_bit(block_id);
 401 
 402   intptr_t loop_state = 0;
 403   for (int i = block->number_of_sux() - 1; i >= 0; i--) {
 404     // recursively process all successors
 405     loop_state |= mark_loops(block->sux_at(i), in_subroutine);
 406   }
 407 
 408   // clear active-bit after all successors are processed
 409   _active.clear_bit(block_id);
 410 
 411   // reverse-post-order numbering of all blocks
 412   block->set_depth_first_number(_next_block_number);
 413   _next_block_number--;
 414 
 415   if (loop_state != 0 || in_subroutine ) {
 416     // block is contained at least in one loop, so phi functions are necessary
 417     // phi functions are also necessary for all locals stored in a subroutine
 418     scope()->requires_phi_function().set_union(block->stores_to_locals());
 419   }
 420 
 421   if (block->is_set(BlockBegin::parser_loop_header_flag)) {
 422     int header_loop_state = _loop_map.at(block_id);
 423     assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set");
 424 
 425     // If the highest bit is set (i.e. when integer value is negative), the method
 426     // has 32 or more loops. This bit is never cleared because it is used for multiple loops
 427     if (header_loop_state >= 0) {
 428       clear_bits(loop_state, header_loop_state);
 429     }
 430   }
 431 
 432   // cache and return loop information for this block
 433   _loop_map.at_put(block_id, loop_state);
 434   return loop_state;
 435 }
 436 
 437 
 438 #ifndef PRODUCT
 439 
 440 int compare_depth_first(BlockBegin** a, BlockBegin** b) {
 441   return (*a)->depth_first_number() - (*b)->depth_first_number();
 442 }
 443 
 444 void BlockListBuilder::print() {
 445   tty->print("----- initial block list of BlockListBuilder for method ");
 446   method()->print_short_name();
 447   tty->cr();
 448 
 449   // better readability if blocks are sorted in processing order
 450   _blocks.sort(compare_depth_first);
 451 
 452   for (int i = 0; i < _blocks.length(); i++) {
 453     BlockBegin* cur = _blocks.at(i);
 454     tty->print("%4d: B%-4d bci: %-4d  preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
 455 
 456     tty->print(cur->is_set(BlockBegin::std_entry_flag)               ? " std" : "    ");
 457     tty->print(cur->is_set(BlockBegin::osr_entry_flag)               ? " osr" : "    ");
 458     tty->print(cur->is_set(BlockBegin::exception_entry_flag)         ? " ex" : "   ");
 459     tty->print(cur->is_set(BlockBegin::subroutine_entry_flag)        ? " sr" : "   ");
 460     tty->print(cur->is_set(BlockBegin::parser_loop_header_flag)      ? " lh" : "   ");
 461 
 462     if (cur->number_of_sux() > 0) {
 463       tty->print("    sux: ");
 464       for (int j = 0; j < cur->number_of_sux(); j++) {
 465         BlockBegin* sux = cur->sux_at(j);
 466         tty->print("B%d ", sux->block_id());
 467       }
 468     }
 469     tty->cr();
 470   }
 471 }
 472 
 473 #endif
 474 
 475 
 476 // A simple growable array of Values indexed by ciFields
 477 class FieldBuffer: public CompilationResourceObj {
 478  private:
 479   GrowableArray<Value> _values;
 480 
 481  public:
 482   FieldBuffer() {}
 483 
 484   void kill() {
 485     _values.trunc_to(0);
 486   }
 487 
 488   Value at(ciField* field) {
 489     assert(field->holder()->is_loaded(), "must be a loaded field");
 490     int offset = field->offset();
 491     if (offset < _values.length()) {
 492       return _values.at(offset);
 493     } else {
 494       return NULL;
 495     }
 496   }
 497 
 498   void at_put(ciField* field, Value value) {
 499     assert(field->holder()->is_loaded(), "must be a loaded field");
 500     int offset = field->offset();
 501     _values.at_put_grow(offset, value, NULL);
 502   }
 503 
 504 };
 505 
 506 
 507 // MemoryBuffer is fairly simple model of the current state of memory.
 508 // It partitions memory into several pieces.  The first piece is
 509 // generic memory where little is known about the owner of the memory.
 510 // This is conceptually represented by the tuple <O, F, V> which says
 511 // that the field F of object O has value V.  This is flattened so
 512 // that F is represented by the offset of the field and the parallel
 513 // arrays _objects and _values are used for O and V.  Loads of O.F can
 514 // simply use V.  Newly allocated objects are kept in a separate list
 515 // along with a parallel array for each object which represents the
 516 // current value of its fields.  Stores of the default value to fields
 517 // which have never been stored to before are eliminated since they
 518 // are redundant.  Once newly allocated objects are stored into
 519 // another object or they are passed out of the current compile they
 520 // are treated like generic memory.
 521 
 522 class MemoryBuffer: public CompilationResourceObj {
 523  private:
 524   FieldBuffer                 _values;
 525   GrowableArray<Value>        _objects;
 526   GrowableArray<Value>        _newobjects;
 527   GrowableArray<FieldBuffer*> _fields;
 528 
 529  public:
 530   MemoryBuffer() {}
 531 
 532   StoreField* store(StoreField* st) {
 533     if (!EliminateFieldAccess) {
 534       return st;
 535     }
 536 
 537     Value object = st->obj();
 538     Value value = st->value();
 539     ciField* field = st->field();
 540     if (field->holder()->is_loaded()) {
 541       int offset = field->offset();
 542       int index = _newobjects.find(object);
 543       if (index != -1) {
 544         // newly allocated object with no other stores performed on this field
 545         FieldBuffer* buf = _fields.at(index);
 546         if (buf->at(field) == NULL && is_default_value(value)) {
 547 #ifndef PRODUCT
 548           if (PrintIRDuringConstruction && Verbose) {
 549             tty->print_cr("Eliminated store for object %d:", index);
 550             st->print_line();
 551           }
 552 #endif
 553           return NULL;
 554         } else {
 555           buf->at_put(field, value);
 556         }
 557       } else {
 558         _objects.at_put_grow(offset, object, NULL);
 559         _values.at_put(field, value);
 560       }
 561 
 562       store_value(value);
 563     } else {
 564       // if we held onto field names we could alias based on names but
 565       // we don't know what's being stored to so kill it all.
 566       kill();
 567     }
 568     return st;
 569   }
 570 
 571 
 572   // return true if this value correspond to the default value of a field.
 573   bool is_default_value(Value value) {
 574     Constant* con = value->as_Constant();
 575     if (con) {
 576       switch (con->type()->tag()) {
 577         case intTag:    return con->type()->as_IntConstant()->value() == 0;
 578         case longTag:   return con->type()->as_LongConstant()->value() == 0;
 579         case floatTag:  return jint_cast(con->type()->as_FloatConstant()->value()) == 0;
 580         case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0);
 581         case objectTag: return con->type() == objectNull;
 582         default:  ShouldNotReachHere();
 583       }
 584     }
 585     return false;
 586   }
 587 
 588 
 589   // return either the actual value of a load or the load itself
 590   Value load(LoadField* load) {
 591     if (!EliminateFieldAccess) {
 592       return load;
 593     }
 594 
 595     if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) {
 596       // can't skip load since value might get rounded as a side effect
 597       return load;
 598     }
 599 
 600     ciField* field = load->field();
 601     Value object   = load->obj();
 602     if (field->holder()->is_loaded() && !field->is_volatile()) {
 603       int offset = field->offset();
 604       Value result = NULL;
 605       int index = _newobjects.find(object);
 606       if (index != -1) {
 607         result = _fields.at(index)->at(field);
 608       } else if (_objects.at_grow(offset, NULL) == object) {
 609         result = _values.at(field);
 610       }
 611       if (result != NULL) {
 612 #ifndef PRODUCT
 613         if (PrintIRDuringConstruction && Verbose) {
 614           tty->print_cr("Eliminated load: ");
 615           load->print_line();
 616         }
 617 #endif
 618         assert(result->type()->tag() == load->type()->tag(), "wrong types");
 619         return result;
 620       }
 621     }
 622     return load;
 623   }
 624 
 625   // Record this newly allocated object
 626   void new_instance(NewInstance* object) {
 627     int index = _newobjects.length();
 628     _newobjects.append(object);
 629     if (_fields.at_grow(index, NULL) == NULL) {
 630       _fields.at_put(index, new FieldBuffer());
 631     } else {
 632       _fields.at(index)->kill();
 633     }
 634   }
 635 
 636   void store_value(Value value) {
 637     int index = _newobjects.find(value);
 638     if (index != -1) {
 639       // stored a newly allocated object into another object.
 640       // Assume we've lost track of it as separate slice of memory.
 641       // We could do better by keeping track of whether individual
 642       // fields could alias each other.
 643       _newobjects.remove_at(index);
 644       // pull out the field info and store it at the end up the list
 645       // of field info list to be reused later.
 646       _fields.append(_fields.at(index));
 647       _fields.remove_at(index);
 648     }
 649   }
 650 
 651   void kill() {
 652     _newobjects.trunc_to(0);
 653     _objects.trunc_to(0);
 654     _values.kill();
 655   }
 656 };
 657 
 658 
 659 // Implementation of GraphBuilder's ScopeData
 660 
 661 GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
 662   : _parent(parent)
 663   , _bci2block(NULL)
 664   , _scope(NULL)
 665   , _has_handler(false)
 666   , _stream(NULL)
 667   , _work_list(NULL)
 668   , _parsing_jsr(false)
 669   , _jsr_xhandlers(NULL)
 670   , _caller_stack_size(-1)
 671   , _continuation(NULL)
 672   , _num_returns(0)
 673   , _cleanup_block(NULL)
 674   , _cleanup_return_prev(NULL)
 675   , _cleanup_state(NULL)
 676 {
 677   if (parent != NULL) {
 678     _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
 679   } else {
 680     _max_inline_size = MaxInlineSize;
 681   }
 682   if (_max_inline_size < MaxTrivialSize) {
 683     _max_inline_size = MaxTrivialSize;
 684   }
 685 }
 686 
 687 
 688 void GraphBuilder::kill_all() {
 689   if (UseLocalValueNumbering) {
 690     vmap()->kill_all();
 691   }
 692   _memory->kill();
 693 }
 694 
 695 
 696 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
 697   if (parsing_jsr()) {
 698     // It is necessary to clone all blocks associated with a
 699     // subroutine, including those for exception handlers in the scope
 700     // of the method containing the jsr (because those exception
 701     // handlers may contain ret instructions in some cases).
 702     BlockBegin* block = bci2block()->at(bci);
 703     if (block != NULL && block == parent()->bci2block()->at(bci)) {
 704       BlockBegin* new_block = new BlockBegin(block->bci());
 705 #ifndef PRODUCT
 706       if (PrintInitialBlockList) {
 707         tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
 708                       block->block_id(), block->bci(), new_block->block_id());
 709       }
 710 #endif
 711       // copy data from cloned blocked
 712       new_block->set_depth_first_number(block->depth_first_number());
 713       if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
 714       // Preserve certain flags for assertion checking
 715       if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
 716       if (block->is_set(BlockBegin::exception_entry_flag))  new_block->set(BlockBegin::exception_entry_flag);
 717 
 718       // copy was_visited_flag to allow early detection of bailouts
 719       // if a block that is used in a jsr has already been visited before,
 720       // it is shared between the normal control flow and a subroutine
 721       // BlockBegin::try_merge returns false when the flag is set, this leads
 722       // to a compilation bailout
 723       if (block->is_set(BlockBegin::was_visited_flag))  new_block->set(BlockBegin::was_visited_flag);
 724 
 725       bci2block()->at_put(bci, new_block);
 726       block = new_block;
 727     }
 728     return block;
 729   } else {
 730     return bci2block()->at(bci);
 731   }
 732 }
 733 
 734 
 735 XHandlers* GraphBuilder::ScopeData::xhandlers() const {
 736   if (_jsr_xhandlers == NULL) {
 737     assert(!parsing_jsr(), "");
 738     return scope()->xhandlers();
 739   }
 740   assert(parsing_jsr(), "");
 741   return _jsr_xhandlers;
 742 }
 743 
 744 
 745 void GraphBuilder::ScopeData::set_scope(IRScope* scope) {
 746   _scope = scope;
 747   bool parent_has_handler = false;
 748   if (parent() != NULL) {
 749     parent_has_handler = parent()->has_handler();
 750   }
 751   _has_handler = parent_has_handler || scope->xhandlers()->has_handlers();
 752 }
 753 
 754 
 755 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block,
 756                                                       Instruction* return_prev,
 757                                                       ValueStack* return_state) {
 758   _cleanup_block       = block;
 759   _cleanup_return_prev = return_prev;
 760   _cleanup_state       = return_state;
 761 }
 762 
 763 
 764 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) {
 765   if (_work_list == NULL) {
 766     _work_list = new BlockList();
 767   }
 768 
 769   if (!block->is_set(BlockBegin::is_on_work_list_flag)) {
 770     // Do not start parsing the continuation block while in a
 771     // sub-scope
 772     if (parsing_jsr()) {
 773       if (block == jsr_continuation()) {
 774         return;
 775       }
 776     } else {
 777       if (block == continuation()) {
 778         return;
 779       }
 780     }
 781     block->set(BlockBegin::is_on_work_list_flag);
 782     _work_list->push(block);
 783 
 784     sort_top_into_worklist(_work_list, block);
 785   }
 786 }
 787 
 788 
 789 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) {
 790   assert(worklist->top() == top, "");
 791   // sort block descending into work list
 792   const int dfn = top->depth_first_number();
 793   assert(dfn != -1, "unknown depth first number");
 794   int i = worklist->length()-2;
 795   while (i >= 0) {
 796     BlockBegin* b = worklist->at(i);
 797     if (b->depth_first_number() < dfn) {
 798       worklist->at_put(i+1, b);
 799     } else {
 800       break;
 801     }
 802     i --;
 803   }
 804   if (i >= -1) worklist->at_put(i + 1, top);
 805 }
 806 
 807 
 808 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
 809   if (is_work_list_empty()) {
 810     return NULL;
 811   }
 812   return _work_list->pop();
 813 }
 814 
 815 
 816 bool GraphBuilder::ScopeData::is_work_list_empty() const {
 817   return (_work_list == NULL || _work_list->length() == 0);
 818 }
 819 
 820 
 821 void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
 822   assert(parsing_jsr(), "");
 823   // clone all the exception handlers from the scope
 824   XHandlers* handlers = new XHandlers(scope()->xhandlers());
 825   const int n = handlers->length();
 826   for (int i = 0; i < n; i++) {
 827     // The XHandlers need to be adjusted to dispatch to the cloned
 828     // handler block instead of the default one but the synthetic
 829     // unlocker needs to be handled specially.  The synthetic unlocker
 830     // should be left alone since there can be only one and all code
 831     // should dispatch to the same one.
 832     XHandler* h = handlers->handler_at(i);
 833     assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
 834     h->set_entry_block(block_at(h->handler_bci()));
 835   }
 836   _jsr_xhandlers = handlers;
 837 }
 838 
 839 
 840 int GraphBuilder::ScopeData::num_returns() {
 841   if (parsing_jsr()) {
 842     return parent()->num_returns();
 843   }
 844   return _num_returns;
 845 }
 846 
 847 
 848 void GraphBuilder::ScopeData::incr_num_returns() {
 849   if (parsing_jsr()) {
 850     parent()->incr_num_returns();
 851   } else {
 852     ++_num_returns;
 853   }
 854 }
 855 
 856 
 857 // Implementation of GraphBuilder
 858 
 859 #define INLINE_BAILOUT(msg)        { inline_bailout(msg); return false; }
 860 
 861 
 862 void GraphBuilder::load_constant() {
 863   ciConstant con = stream()->get_constant();
 864   if (con.basic_type() == T_ILLEGAL) {
 865     BAILOUT("could not resolve a constant");
 866   } else {
 867     ValueType* t = illegalType;
 868     ValueStack* patch_state = NULL;
 869     switch (con.basic_type()) {
 870       case T_BOOLEAN: t = new IntConstant     (con.as_boolean()); break;
 871       case T_BYTE   : t = new IntConstant     (con.as_byte   ()); break;
 872       case T_CHAR   : t = new IntConstant     (con.as_char   ()); break;
 873       case T_SHORT  : t = new IntConstant     (con.as_short  ()); break;
 874       case T_INT    : t = new IntConstant     (con.as_int    ()); break;
 875       case T_LONG   : t = new LongConstant    (con.as_long   ()); break;
 876       case T_FLOAT  : t = new FloatConstant   (con.as_float  ()); break;
 877       case T_DOUBLE : t = new DoubleConstant  (con.as_double ()); break;
 878       case T_ARRAY  : t = new ArrayConstant   (con.as_object ()->as_array   ()); break;
 879       case T_OBJECT :
 880        {
 881         ciObject* obj = con.as_object();
 882         if (!obj->is_loaded()
 883             || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
 884           patch_state = copy_state_before();
 885           t = new ObjectConstant(obj);
 886         } else {
 887           assert(!obj->is_klass(), "must be java_mirror of klass");
 888           t = new InstanceConstant(obj->as_instance());
 889         }
 890         break;
 891        }
 892       default       : ShouldNotReachHere();
 893     }
 894     Value x;
 895     if (patch_state != NULL) {
 896       x = new Constant(t, patch_state);
 897     } else {
 898       x = new Constant(t);
 899     }
 900     push(t, append(x));
 901   }
 902 }
 903 
 904 
 905 void GraphBuilder::load_local(ValueType* type, int index) {
 906   Value x = state()->local_at(index);
 907   assert(x != NULL && !x->type()->is_illegal(), "access of illegal local variable");
 908   push(type, x);
 909 }
 910 
 911 
 912 void GraphBuilder::store_local(ValueType* type, int index) {
 913   Value x = pop(type);
 914   store_local(state(), x, type, index);
 915 }
 916 
 917 
 918 void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) {
 919   if (parsing_jsr()) {
 920     // We need to do additional tracking of the location of the return
 921     // address for jsrs since we don't handle arbitrary jsr/ret
 922     // constructs. Here we are figuring out in which circumstances we
 923     // need to bail out.
 924     if (x->type()->is_address()) {
 925       scope_data()->set_jsr_return_address_local(index);
 926 
 927       // Also check parent jsrs (if any) at this time to see whether
 928       // they are using this local. We don't handle skipping over a
 929       // ret.
 930       for (ScopeData* cur_scope_data = scope_data()->parent();
 931            cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
 932            cur_scope_data = cur_scope_data->parent()) {
 933         if (cur_scope_data->jsr_return_address_local() == index) {
 934           BAILOUT("subroutine overwrites return address from previous subroutine");
 935         }
 936       }
 937     } else if (index == scope_data()->jsr_return_address_local()) {
 938       scope_data()->set_jsr_return_address_local(-1);
 939     }
 940   }
 941 
 942   state->store_local(index, round_fp(x));
 943 }
 944 
 945 
 946 void GraphBuilder::load_indexed(BasicType type) {
 947   ValueStack* state_before = copy_state_for_exception();
 948   Value index = ipop();
 949   Value array = apop();
 950   Value length = NULL;
 951   if (CSEArrayLength ||
 952       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
 953       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
 954     length = append(new ArrayLength(array, state_before));
 955   }
 956   push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before)));
 957 }
 958 
 959 
 960 void GraphBuilder::store_indexed(BasicType type) {
 961   ValueStack* state_before = copy_state_for_exception();
 962   Value value = pop(as_ValueType(type));
 963   Value index = ipop();
 964   Value array = apop();
 965   Value length = NULL;
 966   if (CSEArrayLength ||
 967       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
 968       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
 969     length = append(new ArrayLength(array, state_before));
 970   }
 971   StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before);
 972   append(result);
 973   _memory->store_value(value);
 974 
 975   if (type == T_OBJECT && is_profiling()) {
 976     // Note that we'd collect profile data in this method if we wanted it.
 977     compilation()->set_would_profile(true);
 978 
 979     if (profile_checkcasts()) {
 980       result->set_profiled_method(method());
 981       result->set_profiled_bci(bci());
 982       result->set_should_profile(true);
 983     }
 984   }
 985 }
 986 
 987 
 988 void GraphBuilder::stack_op(Bytecodes::Code code) {
 989   switch (code) {
 990     case Bytecodes::_pop:
 991       { state()->raw_pop();
 992       }
 993       break;
 994     case Bytecodes::_pop2:
 995       { state()->raw_pop();
 996         state()->raw_pop();
 997       }
 998       break;
 999     case Bytecodes::_dup:
1000       { Value w = state()->raw_pop();
1001         state()->raw_push(w);
1002         state()->raw_push(w);
1003       }
1004       break;
1005     case Bytecodes::_dup_x1:
1006       { Value w1 = state()->raw_pop();
1007         Value w2 = state()->raw_pop();
1008         state()->raw_push(w1);
1009         state()->raw_push(w2);
1010         state()->raw_push(w1);
1011       }
1012       break;
1013     case Bytecodes::_dup_x2:
1014       { Value w1 = state()->raw_pop();
1015         Value w2 = state()->raw_pop();
1016         Value w3 = state()->raw_pop();
1017         state()->raw_push(w1);
1018         state()->raw_push(w3);
1019         state()->raw_push(w2);
1020         state()->raw_push(w1);
1021       }
1022       break;
1023     case Bytecodes::_dup2:
1024       { Value w1 = state()->raw_pop();
1025         Value w2 = state()->raw_pop();
1026         state()->raw_push(w2);
1027         state()->raw_push(w1);
1028         state()->raw_push(w2);
1029         state()->raw_push(w1);
1030       }
1031       break;
1032     case Bytecodes::_dup2_x1:
1033       { Value w1 = state()->raw_pop();
1034         Value w2 = state()->raw_pop();
1035         Value w3 = state()->raw_pop();
1036         state()->raw_push(w2);
1037         state()->raw_push(w1);
1038         state()->raw_push(w3);
1039         state()->raw_push(w2);
1040         state()->raw_push(w1);
1041       }
1042       break;
1043     case Bytecodes::_dup2_x2:
1044       { Value w1 = state()->raw_pop();
1045         Value w2 = state()->raw_pop();
1046         Value w3 = state()->raw_pop();
1047         Value w4 = state()->raw_pop();
1048         state()->raw_push(w2);
1049         state()->raw_push(w1);
1050         state()->raw_push(w4);
1051         state()->raw_push(w3);
1052         state()->raw_push(w2);
1053         state()->raw_push(w1);
1054       }
1055       break;
1056     case Bytecodes::_swap:
1057       { Value w1 = state()->raw_pop();
1058         Value w2 = state()->raw_pop();
1059         state()->raw_push(w1);
1060         state()->raw_push(w2);
1061       }
1062       break;
1063     default:
1064       ShouldNotReachHere();
1065       break;
1066   }
1067 }
1068 
1069 
1070 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) {
1071   Value y = pop(type);
1072   Value x = pop(type);
1073   // NOTE: strictfp can be queried from current method since we don't
1074   // inline methods with differing strictfp bits
1075   Value res = new ArithmeticOp(code, x, y, method()->is_strict(), state_before);
1076   // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
1077   res = append(res);
1078   if (method()->is_strict()) {
1079     res = round_fp(res);
1080   }
1081   push(type, res);
1082 }
1083 
1084 
1085 void GraphBuilder::negate_op(ValueType* type) {
1086   push(type, append(new NegateOp(pop(type))));
1087 }
1088 
1089 
1090 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) {
1091   Value s = ipop();
1092   Value x = pop(type);
1093   // try to simplify
1094   // Note: This code should go into the canonicalizer as soon as it can
1095   //       can handle canonicalized forms that contain more than one node.
1096   if (CanonicalizeNodes && code == Bytecodes::_iushr) {
1097     // pattern: x >>> s
1098     IntConstant* s1 = s->type()->as_IntConstant();
1099     if (s1 != NULL) {
1100       // pattern: x >>> s1, with s1 constant
1101       ShiftOp* l = x->as_ShiftOp();
1102       if (l != NULL && l->op() == Bytecodes::_ishl) {
1103         // pattern: (a << b) >>> s1
1104         IntConstant* s0 = l->y()->type()->as_IntConstant();
1105         if (s0 != NULL) {
1106           // pattern: (a << s0) >>> s1
1107           const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts
1108           const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts
1109           if (s0c == s1c) {
1110             if (s0c == 0) {
1111               // pattern: (a << 0) >>> 0 => simplify to: a
1112               ipush(l->x());
1113             } else {
1114               // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
1115               assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases");
1116               const int m = (1 << (BitsPerInt - s0c)) - 1;
1117               Value s = append(new Constant(new IntConstant(m)));
1118               ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s)));
1119             }
1120             return;
1121           }
1122         }
1123       }
1124     }
1125   }
1126   // could not simplify
1127   push(type, append(new ShiftOp(code, x, s)));
1128 }
1129 
1130 
1131 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) {
1132   Value y = pop(type);
1133   Value x = pop(type);
1134   push(type, append(new LogicOp(code, x, y)));
1135 }
1136 
1137 
1138 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
1139   ValueStack* state_before = copy_state_before();
1140   Value y = pop(type);
1141   Value x = pop(type);
1142   ipush(append(new CompareOp(code, x, y, state_before)));
1143 }
1144 
1145 
1146 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) {
1147   push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to))));
1148 }
1149 
1150 
1151 void GraphBuilder::increment() {
1152   int index = stream()->get_index();
1153   int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
1154   load_local(intType, index);
1155   ipush(append(new Constant(new IntConstant(delta))));
1156   arithmetic_op(intType, Bytecodes::_iadd);
1157   store_local(intType, index);
1158 }
1159 
1160 
1161 void GraphBuilder::_goto(int from_bci, int to_bci) {
1162   Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
1163   if (is_profiling()) {
1164     compilation()->set_would_profile(true);
1165   }
1166   if (profile_branches()) {
1167     x->set_profiled_method(method());
1168     x->set_profiled_bci(bci());
1169     x->set_should_profile(true);
1170   }
1171   append(x);
1172 }
1173 
1174 
1175 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
1176   BlockBegin* tsux = block_at(stream()->get_dest());
1177   BlockBegin* fsux = block_at(stream()->next_bci());
1178   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
1179   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
1180 
1181   if (is_profiling()) {
1182     If* if_node = i->as_If();
1183     if (if_node != NULL) {
1184       // Note that we'd collect profile data in this method if we wanted it.
1185       compilation()->set_would_profile(true);
1186       // At level 2 we need the proper bci to count backedges
1187       if_node->set_profiled_bci(bci());
1188       if (profile_branches()) {
1189         // Successors can be rotated by the canonicalizer, check for this case.
1190         if_node->set_profiled_method(method());
1191         if_node->set_should_profile(true);
1192         if (if_node->tsux() == fsux) {
1193           if_node->set_swapped(true);
1194         }
1195       }
1196       return;
1197     }
1198 
1199     // Check if this If was reduced to Goto.
1200     Goto *goto_node = i->as_Goto();
1201     if (goto_node != NULL) {
1202       compilation()->set_would_profile(true);
1203       if (profile_branches()) {
1204         goto_node->set_profiled_method(method());
1205         goto_node->set_profiled_bci(bci());
1206         goto_node->set_should_profile(true);
1207         // Find out which successor is used.
1208         if (goto_node->default_sux() == tsux) {
1209           goto_node->set_direction(Goto::taken);
1210         } else if (goto_node->default_sux() == fsux) {
1211           goto_node->set_direction(Goto::not_taken);
1212         } else {
1213           ShouldNotReachHere();
1214         }
1215       }
1216       return;
1217     }
1218   }
1219 }
1220 
1221 
1222 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
1223   Value y = append(new Constant(intZero));
1224   ValueStack* state_before = copy_state_before();
1225   Value x = ipop();
1226   if_node(x, cond, y, state_before);
1227 }
1228 
1229 
1230 void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
1231   Value y = append(new Constant(objectNull));
1232   ValueStack* state_before = copy_state_before();
1233   Value x = apop();
1234   if_node(x, cond, y, state_before);
1235 }
1236 
1237 
1238 void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
1239   ValueStack* state_before = copy_state_before();
1240   Value y = pop(type);
1241   Value x = pop(type);
1242   if_node(x, cond, y, state_before);
1243 }
1244 
1245 
1246 void GraphBuilder::jsr(int dest) {
1247   // We only handle well-formed jsrs (those which are "block-structured").
1248   // If the bytecodes are strange (jumping out of a jsr block) then we
1249   // might end up trying to re-parse a block containing a jsr which
1250   // has already been activated. Watch for this case and bail out.
1251   for (ScopeData* cur_scope_data = scope_data();
1252        cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
1253        cur_scope_data = cur_scope_data->parent()) {
1254     if (cur_scope_data->jsr_entry_bci() == dest) {
1255       BAILOUT("too-complicated jsr/ret structure");
1256     }
1257   }
1258 
1259   push(addressType, append(new Constant(new AddressConstant(next_bci()))));
1260   if (!try_inline_jsr(dest)) {
1261     return; // bailed out while parsing and inlining subroutine
1262   }
1263 }
1264 
1265 
1266 void GraphBuilder::ret(int local_index) {
1267   if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
1268 
1269   if (local_index != scope_data()->jsr_return_address_local()) {
1270     BAILOUT("can not handle complicated jsr/ret constructs");
1271   }
1272 
1273   // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
1274   append(new Goto(scope_data()->jsr_continuation(), false));
1275 }
1276 
1277 
1278 void GraphBuilder::table_switch() {
1279   Bytecode_tableswitch sw(stream());
1280   const int l = sw.length();
1281   if (CanonicalizeNodes && l == 1) {
1282     // total of 2 successors => use If instead of switch
1283     // Note: This code should go into the canonicalizer as soon as it can
1284     //       can handle canonicalized forms that contain more than one node.
1285     Value key = append(new Constant(new IntConstant(sw.low_key())));
1286     BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
1287     BlockBegin* fsux = block_at(bci() + sw.default_offset());
1288     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
1289     ValueStack* state_before = is_bb ? copy_state_before() : NULL;
1290     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
1291   } else {
1292     // collect successors
1293     BlockList* sux = new BlockList(l + 1, NULL);
1294     int i;
1295     bool has_bb = false;
1296     for (i = 0; i < l; i++) {
1297       sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
1298       if (sw.dest_offset_at(i) < 0) has_bb = true;
1299     }
1300     // add default successor
1301     sux->at_put(i, block_at(bci() + sw.default_offset()));
1302     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
1303     append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
1304   }
1305 }
1306 
1307 
1308 void GraphBuilder::lookup_switch() {
1309   Bytecode_lookupswitch sw(stream());
1310   const int l = sw.number_of_pairs();
1311   if (CanonicalizeNodes && l == 1) {
1312     // total of 2 successors => use If instead of switch
1313     // Note: This code should go into the canonicalizer as soon as it can
1314     //       can handle canonicalized forms that contain more than one node.
1315     // simplify to If
1316     LookupswitchPair pair = sw.pair_at(0);
1317     Value key = append(new Constant(new IntConstant(pair.match())));
1318     BlockBegin* tsux = block_at(bci() + pair.offset());
1319     BlockBegin* fsux = block_at(bci() + sw.default_offset());
1320     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
1321     ValueStack* state_before = is_bb ? copy_state_before() : NULL;
1322     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
1323   } else {
1324     // collect successors & keys
1325     BlockList* sux = new BlockList(l + 1, NULL);
1326     intArray* keys = new intArray(l, 0);
1327     int i;
1328     bool has_bb = false;
1329     for (i = 0; i < l; i++) {
1330       LookupswitchPair pair = sw.pair_at(i);
1331       if (pair.offset() < 0) has_bb = true;
1332       sux->at_put(i, block_at(bci() + pair.offset()));
1333       keys->at_put(i, pair.match());
1334     }
1335     // add default successor
1336     sux->at_put(i, block_at(bci() + sw.default_offset()));
1337     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
1338     append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
1339   }
1340 }
1341 
1342 void GraphBuilder::call_register_finalizer() {
1343   // If the receiver requires finalization then emit code to perform
1344   // the registration on return.
1345 
1346   // Gather some type information about the receiver
1347   Value receiver = state()->local_at(0);
1348   assert(receiver != NULL, "must have a receiver");
1349   ciType* declared_type = receiver->declared_type();
1350   ciType* exact_type = receiver->exact_type();
1351   if (exact_type == NULL &&
1352       receiver->as_Local() &&
1353       receiver->as_Local()->java_index() == 0) {
1354     ciInstanceKlass* ik = compilation()->method()->holder();
1355     if (ik->is_final()) {
1356       exact_type = ik;
1357     } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
1358       // test class is leaf class
1359       compilation()->dependency_recorder()->assert_leaf_type(ik);
1360       exact_type = ik;
1361     } else {
1362       declared_type = ik;
1363     }
1364   }
1365 
1366   // see if we know statically that registration isn't required
1367   bool needs_check = true;
1368   if (exact_type != NULL) {
1369     needs_check = exact_type->as_instance_klass()->has_finalizer();
1370   } else if (declared_type != NULL) {
1371     ciInstanceKlass* ik = declared_type->as_instance_klass();
1372     if (!Dependencies::has_finalizable_subclass(ik)) {
1373       compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik);
1374       needs_check = false;
1375     }
1376   }
1377 
1378   if (needs_check) {
1379     // Perform the registration of finalizable objects.
1380     ValueStack* state_before = copy_state_for_exception();
1381     load_local(objectType, 0);
1382     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1383                                state()->pop_arguments(1),
1384                                true, state_before, true));
1385   }
1386 }
1387 
1388 
1389 void GraphBuilder::method_return(Value x) {
1390   if (RegisterFinalizersAtInit &&
1391       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1392     call_register_finalizer();
1393   }
1394 
1395   // Check to see whether we are inlining. If so, Return
1396   // instructions become Gotos to the continuation point.
1397   if (continuation() != NULL) {
1398     assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
1399 
1400     if (compilation()->env()->dtrace_method_probes()) {
1401       // Report exit from inline methods
1402       Values* args = new Values(1);
1403       args->push(append(new Constant(new ObjectConstant(method()))));
1404       append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
1405     }
1406 
1407     // If the inlined method is synchronized, the monitor must be
1408     // released before we jump to the continuation block.
1409     if (method()->is_synchronized()) {
1410       assert(state()->locks_size() == 1, "receiver must be locked here");
1411       monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
1412     }
1413 
1414     // State at end of inlined method is the state of the caller
1415     // without the method parameters on stack, including the
1416     // return value, if any, of the inlined method on operand stack.
1417     set_state(state()->caller_state()->copy_for_parsing());
1418     if (x != NULL) {
1419       state()->push(x->type(), x);
1420     }
1421     Goto* goto_callee = new Goto(continuation(), false);
1422 
1423     // See whether this is the first return; if so, store off some
1424     // of the state for later examination
1425     if (num_returns() == 0) {
1426       set_inline_cleanup_info(_block, _last, state());
1427     }
1428 
1429     // The current bci() is in the wrong scope, so use the bci() of
1430     // the continuation point.
1431     append_with_bci(goto_callee, scope_data()->continuation()->bci());
1432     incr_num_returns();
1433 
1434     return;
1435   }
1436 
1437   state()->truncate_stack(0);
1438   if (method()->is_synchronized()) {
1439     // perform the unlocking before exiting the method
1440     Value receiver;
1441     if (!method()->is_static()) {
1442       receiver = _initial_state->local_at(0);
1443     } else {
1444       receiver = append(new Constant(new ClassConstant(method()->holder())));
1445     }
1446     append_split(new MonitorExit(receiver, state()->unlock()));
1447   }
1448 
1449   append(new Return(x));
1450 }
1451 
1452 
1453 void GraphBuilder::access_field(Bytecodes::Code code) {
1454   bool will_link;
1455   ciField* field = stream()->get_field(will_link);
1456   ciInstanceKlass* holder = field->holder();
1457   BasicType field_type = field->type()->basic_type();
1458   ValueType* type = as_ValueType(field_type);
1459   // call will_link again to determine if the field is valid.
1460   const bool needs_patching = !holder->is_loaded() ||
1461                               !field->will_link(method()->holder(), code) ||
1462                               PatchALot;
1463 
1464   ValueStack* state_before = NULL;
1465   if (!holder->is_initialized() || needs_patching) {
1466     // save state before instruction for debug info when
1467     // deoptimization happens during patching
1468     state_before = copy_state_before();
1469   }
1470 
1471   Value obj = NULL;
1472   if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1473     if (state_before != NULL) {
1474       // build a patching constant
1475       obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
1476     } else {
1477       obj = new Constant(new InstanceConstant(holder->java_mirror()));
1478     }
1479   }
1480 
1481 
1482   const int offset = !needs_patching ? field->offset() : -1;
1483   switch (code) {
1484     case Bytecodes::_getstatic: {
1485       // check for compile-time constants, i.e., initialized static final fields
1486       Instruction* constant = NULL;
1487       if (field->is_constant() && !PatchALot) {
1488         ciConstant field_val = field->constant_value();
1489         BasicType field_type = field_val.basic_type();
1490         switch (field_type) {
1491         case T_ARRAY:
1492         case T_OBJECT:
1493           if (field_val.as_object()->should_be_constant()) {
1494             constant =  new Constant(as_ValueType(field_val));
1495           }
1496           break;
1497 
1498         default:
1499           constant = new Constant(as_ValueType(field_val));
1500         }
1501       }
1502       if (constant != NULL) {
1503         push(type, append(constant));
1504       } else {
1505         if (state_before == NULL) {
1506           state_before = copy_state_for_exception();
1507         }
1508         push(type, append(new LoadField(append(obj), offset, field, true,
1509                                         state_before, needs_patching)));
1510       }
1511       break;
1512     }
1513     case Bytecodes::_putstatic:
1514       { Value val = pop(type);
1515         if (state_before == NULL) {
1516           state_before = copy_state_for_exception();
1517         }
1518         append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
1519       }
1520       break;
1521     case Bytecodes::_getfield :
1522       {
1523         if (state_before == NULL) {
1524           state_before = copy_state_for_exception();
1525         }
1526         LoadField* load = new LoadField(apop(), offset, field, false, state_before, needs_patching);
1527         Value replacement = !needs_patching ? _memory->load(load) : load;
1528         if (replacement != load) {
1529           assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
1530           push(type, replacement);
1531         } else {
1532           push(type, append(load));
1533         }
1534         break;
1535       }
1536 
1537     case Bytecodes::_putfield :
1538       { Value val = pop(type);
1539         if (state_before == NULL) {
1540           state_before = copy_state_for_exception();
1541         }
1542         StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, needs_patching);
1543         if (!needs_patching) store = _memory->store(store);
1544         if (store != NULL) {
1545           append(store);
1546         }
1547       }
1548       break;
1549     default                   :
1550       ShouldNotReachHere();
1551       break;
1552   }
1553 }
1554 
1555 
1556 Dependencies* GraphBuilder::dependency_recorder() const {
1557   assert(DeoptC1, "need debug information");
1558   return compilation()->dependency_recorder();
1559 }
1560 
1561 
1562 void GraphBuilder::invoke(Bytecodes::Code code) {
1563   bool will_link;
1564   ciMethod* target = stream()->get_method(will_link);
1565   // we have to make sure the argument size (incl. the receiver)
1566   // is correct for compilation (the call would fail later during
1567   // linkage anyway) - was bug (gri 7/28/99)
1568   if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error");
1569   ciInstanceKlass* klass = target->holder();
1570 
1571   // check if CHA possible: if so, change the code to invoke_special
1572   ciInstanceKlass* calling_klass = method()->holder();
1573   ciKlass* holder = stream()->get_declared_method_holder();
1574   ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
1575   ciInstanceKlass* actual_recv = callee_holder;
1576 
1577   // some methods are obviously bindable without any type checks so
1578   // convert them directly to an invokespecial.
1579   if (target->is_loaded() && !target->is_abstract() &&
1580       target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) {
1581     code = Bytecodes::_invokespecial;
1582   }
1583 
1584   // NEEDS_CLEANUP
1585   // I've added the target-is_loaded() test below but I don't really understand
1586   // how klass->is_loaded() can be true and yet target->is_loaded() is false.
1587   // this happened while running the JCK invokevirtual tests under doit.  TKR
1588   ciMethod* cha_monomorphic_target = NULL;
1589   ciMethod* exact_target = NULL;
1590   if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
1591       !target->is_method_handle_invoke()) {
1592     Value receiver = NULL;
1593     ciInstanceKlass* receiver_klass = NULL;
1594     bool type_is_exact = false;
1595     // try to find a precise receiver type
1596     if (will_link && !target->is_static()) {
1597       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
1598       receiver = state()->stack_at(index);
1599       ciType* type = receiver->exact_type();
1600       if (type != NULL && type->is_loaded() &&
1601           type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
1602         receiver_klass = (ciInstanceKlass*) type;
1603         type_is_exact = true;
1604       }
1605       if (type == NULL) {
1606         type = receiver->declared_type();
1607         if (type != NULL && type->is_loaded() &&
1608             type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
1609           receiver_klass = (ciInstanceKlass*) type;
1610           if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) {
1611             // Insert a dependency on this type since
1612             // find_monomorphic_target may assume it's already done.
1613             dependency_recorder()->assert_leaf_type(receiver_klass);
1614             type_is_exact = true;
1615           }
1616         }
1617       }
1618     }
1619     if (receiver_klass != NULL && type_is_exact &&
1620         receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) {
1621       // If we have the exact receiver type we can bind directly to
1622       // the method to call.
1623       exact_target = target->resolve_invoke(calling_klass, receiver_klass);
1624       if (exact_target != NULL) {
1625         target = exact_target;
1626         code = Bytecodes::_invokespecial;
1627       }
1628     }
1629     if (receiver_klass != NULL &&
1630         receiver_klass->is_subtype_of(actual_recv) &&
1631         actual_recv->is_initialized()) {
1632       actual_recv = receiver_klass;
1633     }
1634 
1635     if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) ||
1636         (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
1637       // Use CHA on the receiver to select a more precise method.
1638       cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
1639     } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != NULL) {
1640       // if there is only one implementor of this interface then we
1641       // may be able bind this invoke directly to the implementing
1642       // klass but we need both a dependence on the single interface
1643       // and on the method we bind to.  Additionally since all we know
1644       // about the receiver type is the it's supposed to implement the
1645       // interface we have to insert a check that it's the class we
1646       // expect.  Interface types are not checked by the verifier so
1647       // they are roughly equivalent to Object.
1648       ciInstanceKlass* singleton = NULL;
1649       if (target->holder()->nof_implementors() == 1) {
1650         singleton = target->holder()->implementor(0);
1651       }
1652       if (singleton) {
1653         cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
1654         if (cha_monomorphic_target != NULL) {
1655           // If CHA is able to bind this invoke then update the class
1656           // to match that class, otherwise klass will refer to the
1657           // interface.
1658           klass = cha_monomorphic_target->holder();
1659           actual_recv = target->holder();
1660 
1661           // insert a check it's really the expected class.
1662           CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception());
1663           c->set_incompatible_class_change_check();
1664           c->set_direct_compare(klass->is_final());
1665           append_split(c);
1666         }
1667       }
1668     }
1669   }
1670 
1671   if (cha_monomorphic_target != NULL) {
1672     if (cha_monomorphic_target->is_abstract()) {
1673       // Do not optimize for abstract methods
1674       cha_monomorphic_target = NULL;
1675     }
1676   }
1677 
1678   if (cha_monomorphic_target != NULL) {
1679     if (!(target->is_final_method())) {
1680       // If we inlined because CHA revealed only a single target method,
1681       // then we are dependent on that target method not getting overridden
1682       // by dynamic class loading.  Be sure to test the "static" receiver
1683       // dest_method here, as opposed to the actual receiver, which may
1684       // falsely lead us to believe that the receiver is final or private.
1685       dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target);
1686     }
1687     code = Bytecodes::_invokespecial;
1688   }
1689   // check if we could do inlining
1690   if (!PatchALot && Inline && klass->is_loaded() &&
1691       (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
1692       && target->will_link(klass, callee_holder, code)) {
1693     // callee is known => check if we have static binding
1694     assert(target->is_loaded(), "callee must be known");
1695     if (code == Bytecodes::_invokestatic
1696      || code == Bytecodes::_invokespecial
1697      || code == Bytecodes::_invokevirtual && target->is_final_method()
1698     ) {
1699       // static binding => check if callee is ok
1700       ciMethod* inline_target = (cha_monomorphic_target != NULL)
1701                                   ? cha_monomorphic_target
1702                                   : target;
1703       bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
1704       CHECK_BAILOUT();
1705 
1706 #ifndef PRODUCT
1707       // printing
1708       if (PrintInlining && !res) {
1709         // if it was successfully inlined, then it was already printed.
1710         print_inline_result(inline_target, res);
1711       }
1712 #endif
1713       clear_inline_bailout();
1714       if (res) {
1715         // Register dependence if JVMTI has either breakpoint
1716         // setting or hotswapping of methods capabilities since they may
1717         // cause deoptimization.
1718         if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) {
1719           dependency_recorder()->assert_evol_method(inline_target);
1720         }
1721         return;
1722       }
1723     }
1724   }
1725   // If we attempted an inline which did not succeed because of a
1726   // bailout during construction of the callee graph, the entire
1727   // compilation has to be aborted. This is fairly rare and currently
1728   // seems to only occur for jasm-generated classes which contain
1729   // jsr/ret pairs which are not associated with finally clauses and
1730   // do not have exception handlers in the containing method, and are
1731   // therefore not caught early enough to abort the inlining without
1732   // corrupting the graph. (We currently bail out with a non-empty
1733   // stack at a ret in these situations.)
1734   CHECK_BAILOUT();
1735 
1736   // inlining not successful => standard invoke
1737   bool is_loaded = target->is_loaded();
1738   bool has_receiver =
1739     code == Bytecodes::_invokespecial   ||
1740     code == Bytecodes::_invokevirtual   ||
1741     code == Bytecodes::_invokeinterface;
1742   bool is_invokedynamic = code == Bytecodes::_invokedynamic;
1743   ValueType* result_type = as_ValueType(target->return_type());
1744 
1745   // We require the debug info to be the "state before" because
1746   // invokedynamics may deoptimize.
1747   ValueStack* state_before = is_invokedynamic ? copy_state_before() : copy_state_exhandling();
1748 
1749   Values* args = state()->pop_arguments(target->arg_size_no_receiver());
1750   Value recv = has_receiver ? apop() : NULL;
1751   int vtable_index = methodOopDesc::invalid_vtable_index;
1752 
1753 #ifdef SPARC
1754   // Currently only supported on Sparc.
1755   // The UseInlineCaches only controls dispatch to invokevirtuals for
1756   // loaded classes which we weren't able to statically bind.
1757   if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
1758       && !target->can_be_statically_bound()) {
1759     // Find a vtable index if one is available
1760     vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
1761   }
1762 #endif
1763 
1764   if (recv != NULL &&
1765       (code == Bytecodes::_invokespecial ||
1766        !is_loaded || target->is_final())) {
1767     // invokespecial always needs a NULL check.  invokevirtual where
1768     // the target is final or where it's not known that whether the
1769     // target is final requires a NULL check.  Otherwise normal
1770     // invokevirtual will perform the null check during the lookup
1771     // logic or the unverified entry point.  Profiling of calls
1772     // requires that the null check is performed in all cases.
1773     null_check(recv);
1774   }
1775 
1776   if (is_profiling()) {
1777     if (recv != NULL && profile_calls()) {
1778       null_check(recv);
1779     }
1780     // Note that we'd collect profile data in this method if we wanted it.
1781     compilation()->set_would_profile(true);
1782 
1783     if (profile_calls()) {
1784       assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
1785       ciKlass* target_klass = NULL;
1786       if (cha_monomorphic_target != NULL) {
1787         target_klass = cha_monomorphic_target->holder();
1788       } else if (exact_target != NULL) {
1789         target_klass = exact_target->holder();
1790       }
1791       profile_call(recv, target_klass);
1792     }
1793   }
1794 
1795   Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
1796   // push result
1797   append_split(result);
1798 
1799   if (result_type != voidType) {
1800     if (method()->is_strict()) {
1801       push(result_type, round_fp(result));
1802     } else {
1803       push(result_type, result);
1804     }
1805   }
1806 }
1807 
1808 
1809 void GraphBuilder::new_instance(int klass_index) {
1810   ValueStack* state_before = copy_state_exhandling();
1811   bool will_link;
1812   ciKlass* klass = stream()->get_klass(will_link);
1813   assert(klass->is_instance_klass(), "must be an instance klass");
1814   NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before);
1815   _memory->new_instance(new_instance);
1816   apush(append_split(new_instance));
1817 }
1818 
1819 
1820 void GraphBuilder::new_type_array() {
1821   ValueStack* state_before = copy_state_exhandling();
1822   apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before)));
1823 }
1824 
1825 
1826 void GraphBuilder::new_object_array() {
1827   bool will_link;
1828   ciKlass* klass = stream()->get_klass(will_link);
1829   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
1830   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
1831   apush(append_split(n));
1832 }
1833 
1834 
1835 bool GraphBuilder::direct_compare(ciKlass* k) {
1836   if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
1837     ciInstanceKlass* ik = k->as_instance_klass();
1838     if (ik->is_final()) {
1839       return true;
1840     } else {
1841       if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
1842         // test class is leaf class
1843         dependency_recorder()->assert_leaf_type(ik);
1844         return true;
1845       }
1846     }
1847   }
1848   return false;
1849 }
1850 
1851 
1852 void GraphBuilder::check_cast(int klass_index) {
1853   bool will_link;
1854   ciKlass* klass = stream()->get_klass(will_link);
1855   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception();
1856   CheckCast* c = new CheckCast(klass, apop(), state_before);
1857   apush(append_split(c));
1858   c->set_direct_compare(direct_compare(klass));
1859 
1860   if (is_profiling()) {
1861     // Note that we'd collect profile data in this method if we wanted it.
1862     compilation()->set_would_profile(true);
1863 
1864     if (profile_checkcasts()) {
1865       c->set_profiled_method(method());
1866       c->set_profiled_bci(bci());
1867       c->set_should_profile(true);
1868     }
1869   }
1870 }
1871 
1872 
1873 void GraphBuilder::instance_of(int klass_index) {
1874   bool will_link;
1875   ciKlass* klass = stream()->get_klass(will_link);
1876   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
1877   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
1878   ipush(append_split(i));
1879   i->set_direct_compare(direct_compare(klass));
1880 
1881   if (is_profiling()) {
1882     // Note that we'd collect profile data in this method if we wanted it.
1883     compilation()->set_would_profile(true);
1884 
1885     if (profile_checkcasts()) {
1886       i->set_profiled_method(method());
1887       i->set_profiled_bci(bci());
1888       i->set_should_profile(true);
1889     }
1890   }
1891 }
1892 
1893 
1894 void GraphBuilder::monitorenter(Value x, int bci) {
1895   // save state before locking in case of deoptimization after a NullPointerException
1896   ValueStack* state_before = copy_state_for_exception_with_bci(bci);
1897   append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci);
1898   kill_all();
1899 }
1900 
1901 
1902 void GraphBuilder::monitorexit(Value x, int bci) {
1903   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
1904   kill_all();
1905 }
1906 
1907 
1908 void GraphBuilder::new_multi_array(int dimensions) {
1909   bool will_link;
1910   ciKlass* klass = stream()->get_klass(will_link);
1911   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
1912 
1913   Values* dims = new Values(dimensions, NULL);
1914   // fill in all dimensions
1915   int i = dimensions;
1916   while (i-- > 0) dims->at_put(i, ipop());
1917   // create array
1918   NewArray* n = new NewMultiArray(klass, dims, state_before);
1919   apush(append_split(n));
1920 }
1921 
1922 
1923 void GraphBuilder::throw_op(int bci) {
1924   // We require that the debug info for a Throw be the "state before"
1925   // the Throw (i.e., exception oop is still on TOS)
1926   ValueStack* state_before = copy_state_before_with_bci(bci);
1927   Throw* t = new Throw(apop(), state_before);
1928   // operand stack not needed after a throw
1929   state()->truncate_stack(0);
1930   append_with_bci(t, bci);
1931 }
1932 
1933 
1934 Value GraphBuilder::round_fp(Value fp_value) {
1935   // no rounding needed if SSE2 is used
1936   if (RoundFPResults && UseSSE < 2) {
1937     // Must currently insert rounding node for doubleword values that
1938     // are results of expressions (i.e., not loads from memory or
1939     // constants)
1940     if (fp_value->type()->tag() == doubleTag &&
1941         fp_value->as_Constant() == NULL &&
1942         fp_value->as_Local() == NULL &&       // method parameters need no rounding
1943         fp_value->as_RoundFP() == NULL) {
1944       return append(new RoundFP(fp_value));
1945     }
1946   }
1947   return fp_value;
1948 }
1949 
1950 
1951 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
1952   Canonicalizer canon(compilation(), instr, bci);
1953   Instruction* i1 = canon.canonical();
1954   if (i1->is_linked() || !i1->can_be_linked()) {
1955     // Canonicalizer returned an instruction which was already
1956     // appended so simply return it.
1957     return i1;
1958   }
1959 
1960   if (UseLocalValueNumbering) {
1961     // Lookup the instruction in the ValueMap and add it to the map if
1962     // it's not found.
1963     Instruction* i2 = vmap()->find_insert(i1);
1964     if (i2 != i1) {
1965       // found an entry in the value map, so just return it.
1966       assert(i2->is_linked(), "should already be linked");
1967       return i2;
1968     }
1969     ValueNumberingEffects vne(vmap());
1970     i1->visit(&vne);
1971   }
1972 
1973   // i1 was not eliminated => append it
1974   assert(i1->next() == NULL, "shouldn't already be linked");
1975   _last = _last->set_next(i1, canon.bci());
1976 
1977   if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) {
1978     // set the bailout state but complete normal processing.  We
1979     // might do a little more work before noticing the bailout so we
1980     // want processing to continue normally until it's noticed.
1981     bailout("Method and/or inlining is too large");
1982   }
1983 
1984 #ifndef PRODUCT
1985   if (PrintIRDuringConstruction) {
1986     InstructionPrinter ip;
1987     ip.print_line(i1);
1988     if (Verbose) {
1989       state()->print();
1990     }
1991   }
1992 #endif
1993 
1994   // save state after modification of operand stack for StateSplit instructions
1995   StateSplit* s = i1->as_StateSplit();
1996   if (s != NULL) {
1997     if (EliminateFieldAccess) {
1998       Intrinsic* intrinsic = s->as_Intrinsic();
1999       if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
2000         _memory->kill();
2001       }
2002     }
2003     s->set_state(state()->copy(ValueStack::StateAfter, canon.bci()));
2004   }
2005 
2006   // set up exception handlers for this instruction if necessary
2007   if (i1->can_trap()) {
2008     i1->set_exception_handlers(handle_exception(i1));
2009     assert(i1->exception_state() != NULL || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state");
2010   }
2011   return i1;
2012 }
2013 
2014 
2015 Instruction* GraphBuilder::append(Instruction* instr) {
2016   assert(instr->as_StateSplit() == NULL || instr->as_BlockEnd() != NULL, "wrong append used");
2017   return append_with_bci(instr, bci());
2018 }
2019 
2020 
2021 Instruction* GraphBuilder::append_split(StateSplit* instr) {
2022   return append_with_bci(instr, bci());
2023 }
2024 
2025 
2026 void GraphBuilder::null_check(Value value) {
2027   if (value->as_NewArray() != NULL || value->as_NewInstance() != NULL) {
2028     return;
2029   } else {
2030     Constant* con = value->as_Constant();
2031     if (con) {
2032       ObjectType* c = con->type()->as_ObjectType();
2033       if (c && c->is_loaded()) {
2034         ObjectConstant* oc = c->as_ObjectConstant();
2035         if (!oc || !oc->value()->is_null_object()) {
2036           return;
2037         }
2038       }
2039     }
2040   }
2041   append(new NullCheck(value, copy_state_for_exception()));
2042 }
2043 
2044 
2045 
2046 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
2047   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != NULL)) {
2048     assert(instruction->exception_state() == NULL
2049            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
2050            || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->jvmti_can_access_local_variables()),
2051            "exception_state should be of exception kind");
2052     return new XHandlers();
2053   }
2054 
2055   XHandlers*  exception_handlers = new XHandlers();
2056   ScopeData*  cur_scope_data = scope_data();
2057   ValueStack* cur_state = instruction->state_before();
2058   ValueStack* prev_state = NULL;
2059   int scope_count = 0;
2060 
2061   assert(cur_state != NULL, "state_before must be set");
2062   do {
2063     int cur_bci = cur_state->bci();
2064     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2065     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");
2066 
2067     // join with all potential exception handlers
2068     XHandlers* list = cur_scope_data->xhandlers();
2069     const int n = list->length();
2070     for (int i = 0; i < n; i++) {
2071       XHandler* h = list->handler_at(i);
2072       if (h->covers(cur_bci)) {
2073         // h is a potential exception handler => join it
2074         compilation()->set_has_exception_handlers(true);
2075 
2076         BlockBegin* entry = h->entry_block();
2077         if (entry == block()) {
2078           // It's acceptable for an exception handler to cover itself
2079           // but we don't handle that in the parser currently.  It's
2080           // very rare so we bailout instead of trying to handle it.
2081           BAILOUT_("exception handler covers itself", exception_handlers);
2082         }
2083         assert(entry->bci() == h->handler_bci(), "must match");
2084         assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
2085 
2086         // previously this was a BAILOUT, but this is not necessary
2087         // now because asynchronous exceptions are not handled this way.
2088         assert(entry->state() == NULL || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match");
2089 
2090         // xhandler start with an empty expression stack
2091         if (cur_state->stack_size() != 0) {
2092           cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
2093         }
2094         if (instruction->exception_state() == NULL) {
2095           instruction->set_exception_state(cur_state);
2096         }
2097 
2098         // Note: Usually this join must work. However, very
2099         // complicated jsr-ret structures where we don't ret from
2100         // the subroutine can cause the objects on the monitor
2101         // stacks to not match because blocks can be parsed twice.
2102         // The only test case we've seen so far which exhibits this
2103         // problem is caught by the infinite recursion test in
2104         // GraphBuilder::jsr() if the join doesn't work.
2105         if (!entry->try_merge(cur_state)) {
2106           BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
2107         }
2108 
2109         // add current state for correct handling of phi functions at begin of xhandler
2110         int phi_operand = entry->add_exception_state(cur_state);
2111 
2112         // add entry to the list of xhandlers of this block
2113         _block->add_exception_handler(entry);
2114 
2115         // add back-edge from xhandler entry to this block
2116         if (!entry->is_predecessor(_block)) {
2117           entry->add_predecessor(_block);
2118         }
2119 
2120         // clone XHandler because phi_operand and scope_count can not be shared
2121         XHandler* new_xhandler = new XHandler(h);
2122         new_xhandler->set_phi_operand(phi_operand);
2123         new_xhandler->set_scope_count(scope_count);
2124         exception_handlers->append(new_xhandler);
2125 
2126         // fill in exception handler subgraph lazily
2127         assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet");
2128         cur_scope_data->add_to_work_list(entry);
2129 
2130         // stop when reaching catchall
2131         if (h->catch_type() == 0) {
2132           return exception_handlers;
2133         }
2134       }
2135     }
2136 
2137     if (exception_handlers->length() == 0) {
2138       // This scope and all callees do not handle exceptions, so the local
2139       // variables of this scope are not needed. However, the scope itself is
2140       // required for a correct exception stack trace -> clear out the locals.
2141       if (_compilation->env()->jvmti_can_access_local_variables()) {
2142         cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
2143       } else {
2144         cur_state = cur_state->copy(ValueStack::EmptyExceptionState, cur_state->bci());
2145       }
2146       if (prev_state != NULL) {
2147         prev_state->set_caller_state(cur_state);
2148       }
2149       if (instruction->exception_state() == NULL) {
2150         instruction->set_exception_state(cur_state);
2151       }
2152     }
2153 
2154     // Set up iteration for next time.
2155     // If parsing a jsr, do not grab exception handlers from the
2156     // parent scopes for this method (already got them, and they
2157     // needed to be cloned)
2158 
2159     while (cur_scope_data->parsing_jsr()) {
2160       cur_scope_data = cur_scope_data->parent();
2161     }
2162 
2163     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2164     assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler");
2165 
2166     prev_state = cur_state;
2167     cur_state = cur_state->caller_state();
2168     cur_scope_data = cur_scope_data->parent();
2169     scope_count++;
2170   } while (cur_scope_data != NULL);
2171 
2172   return exception_handlers;
2173 }
2174 
2175 
2176 // Helper class for simplifying Phis.
2177 class PhiSimplifier : public BlockClosure {
2178  private:
2179   bool _has_substitutions;
2180   Value simplify(Value v);
2181 
2182  public:
2183   PhiSimplifier(BlockBegin* start) : _has_substitutions(false) {
2184     start->iterate_preorder(this);
2185     if (_has_substitutions) {
2186       SubstitutionResolver sr(start);
2187     }
2188   }
2189   void block_do(BlockBegin* b);
2190   bool has_substitutions() const { return _has_substitutions; }
2191 };
2192 
2193 
2194 Value PhiSimplifier::simplify(Value v) {
2195   Phi* phi = v->as_Phi();
2196 
2197   if (phi == NULL) {
2198     // no phi function
2199     return v;
2200   } else if (v->has_subst()) {
2201     // already substituted; subst can be phi itself -> simplify
2202     return simplify(v->subst());
2203   } else if (phi->is_set(Phi::cannot_simplify)) {
2204     // already tried to simplify phi before
2205     return phi;
2206   } else if (phi->is_set(Phi::visited)) {
2207     // break cycles in phi functions
2208     return phi;
2209   } else if (phi->type()->is_illegal()) {
2210     // illegal phi functions are ignored anyway
2211     return phi;
2212 
2213   } else {
2214     // mark phi function as processed to break cycles in phi functions
2215     phi->set(Phi::visited);
2216 
2217     // simplify x = [y, x] and x = [y, y] to y
2218     Value subst = NULL;
2219     int opd_count = phi->operand_count();
2220     for (int i = 0; i < opd_count; i++) {
2221       Value opd = phi->operand_at(i);
2222       assert(opd != NULL, "Operand must exist!");
2223 
2224       if (opd->type()->is_illegal()) {
2225         // if one operand is illegal, the entire phi function is illegal
2226         phi->make_illegal();
2227         phi->clear(Phi::visited);
2228         return phi;
2229       }
2230 
2231       Value new_opd = simplify(opd);
2232       assert(new_opd != NULL, "Simplified operand must exist!");
2233 
2234       if (new_opd != phi && new_opd != subst) {
2235         if (subst == NULL) {
2236           subst = new_opd;
2237         } else {
2238           // no simplification possible
2239           phi->set(Phi::cannot_simplify);
2240           phi->clear(Phi::visited);
2241           return phi;
2242         }
2243       }
2244     }
2245 
2246     // sucessfully simplified phi function
2247     assert(subst != NULL, "illegal phi function");
2248     _has_substitutions = true;
2249     phi->clear(Phi::visited);
2250     phi->set_subst(subst);
2251 
2252 #ifndef PRODUCT
2253     if (PrintPhiFunctions) {
2254       tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
2255     }
2256 #endif
2257 
2258     return subst;
2259   }
2260 }
2261 
2262 
2263 void PhiSimplifier::block_do(BlockBegin* b) {
2264   for_each_phi_fun(b, phi,
2265     simplify(phi);
2266   );
2267 
2268 #ifdef ASSERT
2269   for_each_phi_fun(b, phi,
2270                    assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification");
2271   );
2272 
2273   ValueStack* state = b->state()->caller_state();
2274   for_each_state_value(state, value,
2275     Phi* phi = value->as_Phi();
2276     assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state");
2277   );
2278 #endif
2279 }
2280 
2281 // This method is called after all blocks are filled with HIR instructions
2282 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x]
2283 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) {
2284   PhiSimplifier simplifier(start);
2285 }
2286 
2287 
2288 void GraphBuilder::connect_to_end(BlockBegin* beg) {
2289   // setup iteration
2290   kill_all();
2291   _block = beg;
2292   _state = beg->state()->copy_for_parsing();
2293   _last  = beg;
2294   iterate_bytecodes_for_block(beg->bci());
2295 }
2296 
2297 
2298 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
2299 #ifndef PRODUCT
2300   if (PrintIRDuringConstruction) {
2301     tty->cr();
2302     InstructionPrinter ip;
2303     ip.print_instr(_block); tty->cr();
2304     ip.print_stack(_block->state()); tty->cr();
2305     ip.print_inline_level(_block);
2306     ip.print_head();
2307     tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size());
2308   }
2309 #endif
2310   _skip_block = false;
2311   assert(state() != NULL, "ValueStack missing!");
2312   ciBytecodeStream s(method());
2313   s.reset_to_bci(bci);
2314   int prev_bci = bci;
2315   scope_data()->set_stream(&s);
2316   // iterate
2317   Bytecodes::Code code = Bytecodes::_illegal;
2318   bool push_exception = false;
2319 
2320   if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == NULL) {
2321     // first thing in the exception entry block should be the exception object.
2322     push_exception = true;
2323   }
2324 
2325   while (!bailed_out() && last()->as_BlockEnd() == NULL &&
2326          (code = stream()->next()) != ciBytecodeStream::EOBC() &&
2327          (block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
2328     assert(state()->kind() == ValueStack::Parsing, "invalid state kind");
2329 
2330     // Check for active jsr during OSR compilation
2331     if (compilation()->is_osr_compile()
2332         && scope()->is_top_scope()
2333         && parsing_jsr()
2334         && s.cur_bci() == compilation()->osr_bci()) {
2335       bailout("OSR not supported while a jsr is active");
2336     }
2337 
2338     if (push_exception) {
2339       apush(append(new ExceptionObject()));
2340       push_exception = false;
2341     }
2342 
2343     // handle bytecode
2344     switch (code) {
2345       case Bytecodes::_nop            : /* nothing to do */ break;
2346       case Bytecodes::_aconst_null    : apush(append(new Constant(objectNull            ))); break;
2347       case Bytecodes::_iconst_m1      : ipush(append(new Constant(new IntConstant   (-1)))); break;
2348       case Bytecodes::_iconst_0       : ipush(append(new Constant(intZero               ))); break;
2349       case Bytecodes::_iconst_1       : ipush(append(new Constant(intOne                ))); break;
2350       case Bytecodes::_iconst_2       : ipush(append(new Constant(new IntConstant   ( 2)))); break;
2351       case Bytecodes::_iconst_3       : ipush(append(new Constant(new IntConstant   ( 3)))); break;
2352       case Bytecodes::_iconst_4       : ipush(append(new Constant(new IntConstant   ( 4)))); break;
2353       case Bytecodes::_iconst_5       : ipush(append(new Constant(new IntConstant   ( 5)))); break;
2354       case Bytecodes::_lconst_0       : lpush(append(new Constant(new LongConstant  ( 0)))); break;
2355       case Bytecodes::_lconst_1       : lpush(append(new Constant(new LongConstant  ( 1)))); break;
2356       case Bytecodes::_fconst_0       : fpush(append(new Constant(new FloatConstant ( 0)))); break;
2357       case Bytecodes::_fconst_1       : fpush(append(new Constant(new FloatConstant ( 1)))); break;
2358       case Bytecodes::_fconst_2       : fpush(append(new Constant(new FloatConstant ( 2)))); break;
2359       case Bytecodes::_dconst_0       : dpush(append(new Constant(new DoubleConstant( 0)))); break;
2360       case Bytecodes::_dconst_1       : dpush(append(new Constant(new DoubleConstant( 1)))); break;
2361       case Bytecodes::_bipush         : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break;
2362       case Bytecodes::_sipush         : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break;
2363       case Bytecodes::_ldc            : // fall through
2364       case Bytecodes::_ldc_w          : // fall through
2365       case Bytecodes::_ldc2_w         : load_constant(); break;
2366       case Bytecodes::_iload          : load_local(intType     , s.get_index()); break;
2367       case Bytecodes::_lload          : load_local(longType    , s.get_index()); break;
2368       case Bytecodes::_fload          : load_local(floatType   , s.get_index()); break;
2369       case Bytecodes::_dload          : load_local(doubleType  , s.get_index()); break;
2370       case Bytecodes::_aload          : load_local(instanceType, s.get_index()); break;
2371       case Bytecodes::_iload_0        : load_local(intType   , 0); break;
2372       case Bytecodes::_iload_1        : load_local(intType   , 1); break;
2373       case Bytecodes::_iload_2        : load_local(intType   , 2); break;
2374       case Bytecodes::_iload_3        : load_local(intType   , 3); break;
2375       case Bytecodes::_lload_0        : load_local(longType  , 0); break;
2376       case Bytecodes::_lload_1        : load_local(longType  , 1); break;
2377       case Bytecodes::_lload_2        : load_local(longType  , 2); break;
2378       case Bytecodes::_lload_3        : load_local(longType  , 3); break;
2379       case Bytecodes::_fload_0        : load_local(floatType , 0); break;
2380       case Bytecodes::_fload_1        : load_local(floatType , 1); break;
2381       case Bytecodes::_fload_2        : load_local(floatType , 2); break;
2382       case Bytecodes::_fload_3        : load_local(floatType , 3); break;
2383       case Bytecodes::_dload_0        : load_local(doubleType, 0); break;
2384       case Bytecodes::_dload_1        : load_local(doubleType, 1); break;
2385       case Bytecodes::_dload_2        : load_local(doubleType, 2); break;
2386       case Bytecodes::_dload_3        : load_local(doubleType, 3); break;
2387       case Bytecodes::_aload_0        : load_local(objectType, 0); break;
2388       case Bytecodes::_aload_1        : load_local(objectType, 1); break;
2389       case Bytecodes::_aload_2        : load_local(objectType, 2); break;
2390       case Bytecodes::_aload_3        : load_local(objectType, 3); break;
2391       case Bytecodes::_iaload         : load_indexed(T_INT   ); break;
2392       case Bytecodes::_laload         : load_indexed(T_LONG  ); break;
2393       case Bytecodes::_faload         : load_indexed(T_FLOAT ); break;
2394       case Bytecodes::_daload         : load_indexed(T_DOUBLE); break;
2395       case Bytecodes::_aaload         : load_indexed(T_OBJECT); break;
2396       case Bytecodes::_baload         : load_indexed(T_BYTE  ); break;
2397       case Bytecodes::_caload         : load_indexed(T_CHAR  ); break;
2398       case Bytecodes::_saload         : load_indexed(T_SHORT ); break;
2399       case Bytecodes::_istore         : store_local(intType   , s.get_index()); break;
2400       case Bytecodes::_lstore         : store_local(longType  , s.get_index()); break;
2401       case Bytecodes::_fstore         : store_local(floatType , s.get_index()); break;
2402       case Bytecodes::_dstore         : store_local(doubleType, s.get_index()); break;
2403       case Bytecodes::_astore         : store_local(objectType, s.get_index()); break;
2404       case Bytecodes::_istore_0       : store_local(intType   , 0); break;
2405       case Bytecodes::_istore_1       : store_local(intType   , 1); break;
2406       case Bytecodes::_istore_2       : store_local(intType   , 2); break;
2407       case Bytecodes::_istore_3       : store_local(intType   , 3); break;
2408       case Bytecodes::_lstore_0       : store_local(longType  , 0); break;
2409       case Bytecodes::_lstore_1       : store_local(longType  , 1); break;
2410       case Bytecodes::_lstore_2       : store_local(longType  , 2); break;
2411       case Bytecodes::_lstore_3       : store_local(longType  , 3); break;
2412       case Bytecodes::_fstore_0       : store_local(floatType , 0); break;
2413       case Bytecodes::_fstore_1       : store_local(floatType , 1); break;
2414       case Bytecodes::_fstore_2       : store_local(floatType , 2); break;
2415       case Bytecodes::_fstore_3       : store_local(floatType , 3); break;
2416       case Bytecodes::_dstore_0       : store_local(doubleType, 0); break;
2417       case Bytecodes::_dstore_1       : store_local(doubleType, 1); break;
2418       case Bytecodes::_dstore_2       : store_local(doubleType, 2); break;
2419       case Bytecodes::_dstore_3       : store_local(doubleType, 3); break;
2420       case Bytecodes::_astore_0       : store_local(objectType, 0); break;
2421       case Bytecodes::_astore_1       : store_local(objectType, 1); break;
2422       case Bytecodes::_astore_2       : store_local(objectType, 2); break;
2423       case Bytecodes::_astore_3       : store_local(objectType, 3); break;
2424       case Bytecodes::_iastore        : store_indexed(T_INT   ); break;
2425       case Bytecodes::_lastore        : store_indexed(T_LONG  ); break;
2426       case Bytecodes::_fastore        : store_indexed(T_FLOAT ); break;
2427       case Bytecodes::_dastore        : store_indexed(T_DOUBLE); break;
2428       case Bytecodes::_aastore        : store_indexed(T_OBJECT); break;
2429       case Bytecodes::_bastore        : store_indexed(T_BYTE  ); break;
2430       case Bytecodes::_castore        : store_indexed(T_CHAR  ); break;
2431       case Bytecodes::_sastore        : store_indexed(T_SHORT ); break;
2432       case Bytecodes::_pop            : // fall through
2433       case Bytecodes::_pop2           : // fall through
2434       case Bytecodes::_dup            : // fall through
2435       case Bytecodes::_dup_x1         : // fall through
2436       case Bytecodes::_dup_x2         : // fall through
2437       case Bytecodes::_dup2           : // fall through
2438       case Bytecodes::_dup2_x1        : // fall through
2439       case Bytecodes::_dup2_x2        : // fall through
2440       case Bytecodes::_swap           : stack_op(code); break;
2441       case Bytecodes::_iadd           : arithmetic_op(intType   , code); break;
2442       case Bytecodes::_ladd           : arithmetic_op(longType  , code); break;
2443       case Bytecodes::_fadd           : arithmetic_op(floatType , code); break;
2444       case Bytecodes::_dadd           : arithmetic_op(doubleType, code); break;
2445       case Bytecodes::_isub           : arithmetic_op(intType   , code); break;
2446       case Bytecodes::_lsub           : arithmetic_op(longType  , code); break;
2447       case Bytecodes::_fsub           : arithmetic_op(floatType , code); break;
2448       case Bytecodes::_dsub           : arithmetic_op(doubleType, code); break;
2449       case Bytecodes::_imul           : arithmetic_op(intType   , code); break;
2450       case Bytecodes::_lmul           : arithmetic_op(longType  , code); break;
2451       case Bytecodes::_fmul           : arithmetic_op(floatType , code); break;
2452       case Bytecodes::_dmul           : arithmetic_op(doubleType, code); break;
2453       case Bytecodes::_idiv           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
2454       case Bytecodes::_ldiv           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
2455       case Bytecodes::_fdiv           : arithmetic_op(floatType , code); break;
2456       case Bytecodes::_ddiv           : arithmetic_op(doubleType, code); break;
2457       case Bytecodes::_irem           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
2458       case Bytecodes::_lrem           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
2459       case Bytecodes::_frem           : arithmetic_op(floatType , code); break;
2460       case Bytecodes::_drem           : arithmetic_op(doubleType, code); break;
2461       case Bytecodes::_ineg           : negate_op(intType   ); break;
2462       case Bytecodes::_lneg           : negate_op(longType  ); break;
2463       case Bytecodes::_fneg           : negate_op(floatType ); break;
2464       case Bytecodes::_dneg           : negate_op(doubleType); break;
2465       case Bytecodes::_ishl           : shift_op(intType , code); break;
2466       case Bytecodes::_lshl           : shift_op(longType, code); break;
2467       case Bytecodes::_ishr           : shift_op(intType , code); break;
2468       case Bytecodes::_lshr           : shift_op(longType, code); break;
2469       case Bytecodes::_iushr          : shift_op(intType , code); break;
2470       case Bytecodes::_lushr          : shift_op(longType, code); break;
2471       case Bytecodes::_iand           : logic_op(intType , code); break;
2472       case Bytecodes::_land           : logic_op(longType, code); break;
2473       case Bytecodes::_ior            : logic_op(intType , code); break;
2474       case Bytecodes::_lor            : logic_op(longType, code); break;
2475       case Bytecodes::_ixor           : logic_op(intType , code); break;
2476       case Bytecodes::_lxor           : logic_op(longType, code); break;
2477       case Bytecodes::_iinc           : increment(); break;
2478       case Bytecodes::_i2l            : convert(code, T_INT   , T_LONG  ); break;
2479       case Bytecodes::_i2f            : convert(code, T_INT   , T_FLOAT ); break;
2480       case Bytecodes::_i2d            : convert(code, T_INT   , T_DOUBLE); break;
2481       case Bytecodes::_l2i            : convert(code, T_LONG  , T_INT   ); break;
2482       case Bytecodes::_l2f            : convert(code, T_LONG  , T_FLOAT ); break;
2483       case Bytecodes::_l2d            : convert(code, T_LONG  , T_DOUBLE); break;
2484       case Bytecodes::_f2i            : convert(code, T_FLOAT , T_INT   ); break;
2485       case Bytecodes::_f2l            : convert(code, T_FLOAT , T_LONG  ); break;
2486       case Bytecodes::_f2d            : convert(code, T_FLOAT , T_DOUBLE); break;
2487       case Bytecodes::_d2i            : convert(code, T_DOUBLE, T_INT   ); break;
2488       case Bytecodes::_d2l            : convert(code, T_DOUBLE, T_LONG  ); break;
2489       case Bytecodes::_d2f            : convert(code, T_DOUBLE, T_FLOAT ); break;
2490       case Bytecodes::_i2b            : convert(code, T_INT   , T_BYTE  ); break;
2491       case Bytecodes::_i2c            : convert(code, T_INT   , T_CHAR  ); break;
2492       case Bytecodes::_i2s            : convert(code, T_INT   , T_SHORT ); break;
2493       case Bytecodes::_lcmp           : compare_op(longType  , code); break;
2494       case Bytecodes::_fcmpl          : compare_op(floatType , code); break;
2495       case Bytecodes::_fcmpg          : compare_op(floatType , code); break;
2496       case Bytecodes::_dcmpl          : compare_op(doubleType, code); break;
2497       case Bytecodes::_dcmpg          : compare_op(doubleType, code); break;
2498       case Bytecodes::_ifeq           : if_zero(intType   , If::eql); break;
2499       case Bytecodes::_ifne           : if_zero(intType   , If::neq); break;
2500       case Bytecodes::_iflt           : if_zero(intType   , If::lss); break;
2501       case Bytecodes::_ifge           : if_zero(intType   , If::geq); break;
2502       case Bytecodes::_ifgt           : if_zero(intType   , If::gtr); break;
2503       case Bytecodes::_ifle           : if_zero(intType   , If::leq); break;
2504       case Bytecodes::_if_icmpeq      : if_same(intType   , If::eql); break;
2505       case Bytecodes::_if_icmpne      : if_same(intType   , If::neq); break;
2506       case Bytecodes::_if_icmplt      : if_same(intType   , If::lss); break;
2507       case Bytecodes::_if_icmpge      : if_same(intType   , If::geq); break;
2508       case Bytecodes::_if_icmpgt      : if_same(intType   , If::gtr); break;
2509       case Bytecodes::_if_icmple      : if_same(intType   , If::leq); break;
2510       case Bytecodes::_if_acmpeq      : if_same(objectType, If::eql); break;
2511       case Bytecodes::_if_acmpne      : if_same(objectType, If::neq); break;
2512       case Bytecodes::_goto           : _goto(s.cur_bci(), s.get_dest()); break;
2513       case Bytecodes::_jsr            : jsr(s.get_dest()); break;
2514       case Bytecodes::_ret            : ret(s.get_index()); break;
2515       case Bytecodes::_tableswitch    : table_switch(); break;
2516       case Bytecodes::_lookupswitch   : lookup_switch(); break;
2517       case Bytecodes::_ireturn        : method_return(ipop()); break;
2518       case Bytecodes::_lreturn        : method_return(lpop()); break;
2519       case Bytecodes::_freturn        : method_return(fpop()); break;
2520       case Bytecodes::_dreturn        : method_return(dpop()); break;
2521       case Bytecodes::_areturn        : method_return(apop()); break;
2522       case Bytecodes::_return         : method_return(NULL  ); break;
2523       case Bytecodes::_getstatic      : // fall through
2524       case Bytecodes::_putstatic      : // fall through
2525       case Bytecodes::_getfield       : // fall through
2526       case Bytecodes::_putfield       : access_field(code); break;
2527       case Bytecodes::_invokevirtual  : // fall through
2528       case Bytecodes::_invokespecial  : // fall through
2529       case Bytecodes::_invokestatic   : // fall through
2530       case Bytecodes::_invokedynamic  : // fall through
2531       case Bytecodes::_invokeinterface: invoke(code); break;
2532       case Bytecodes::_new            : new_instance(s.get_index_u2()); break;
2533       case Bytecodes::_newarray       : new_type_array(); break;
2534       case Bytecodes::_anewarray      : new_object_array(); break;
2535       case Bytecodes::_arraylength    : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; }
2536       case Bytecodes::_athrow         : throw_op(s.cur_bci()); break;
2537       case Bytecodes::_checkcast      : check_cast(s.get_index_u2()); break;
2538       case Bytecodes::_instanceof     : instance_of(s.get_index_u2()); break;
2539       case Bytecodes::_monitorenter   : monitorenter(apop(), s.cur_bci()); break;
2540       case Bytecodes::_monitorexit    : monitorexit (apop(), s.cur_bci()); break;
2541       case Bytecodes::_wide           : ShouldNotReachHere(); break;
2542       case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break;
2543       case Bytecodes::_ifnull         : if_null(objectType, If::eql); break;
2544       case Bytecodes::_ifnonnull      : if_null(objectType, If::neq); break;
2545       case Bytecodes::_goto_w         : _goto(s.cur_bci(), s.get_far_dest()); break;
2546       case Bytecodes::_jsr_w          : jsr(s.get_far_dest()); break;
2547       case Bytecodes::_breakpoint     : BAILOUT_("concurrent setting of breakpoint", NULL);
2548       default                         : ShouldNotReachHere(); break;
2549     }
2550     // save current bci to setup Goto at the end
2551     prev_bci = s.cur_bci();
2552   }
2553   CHECK_BAILOUT_(NULL);
2554   // stop processing of this block (see try_inline_full)
2555   if (_skip_block) {
2556     _skip_block = false;
2557     assert(_last && _last->as_BlockEnd(), "");
2558     return _last->as_BlockEnd();
2559   }
2560   // if there are any, check if last instruction is a BlockEnd instruction
2561   BlockEnd* end = last()->as_BlockEnd();
2562   if (end == NULL) {
2563     // all blocks must end with a BlockEnd instruction => add a Goto
2564     end = new Goto(block_at(s.cur_bci()), false);
2565     append(end);
2566   }
2567   assert(end == last()->as_BlockEnd(), "inconsistency");
2568 
2569   assert(end->state() != NULL, "state must already be present");
2570   assert(end->as_Return() == NULL || end->as_Throw() == NULL || end->state()->stack_size() == 0, "stack not needed for return and throw");
2571 
2572   // connect to begin & set state
2573   // NOTE that inlining may have changed the block we are parsing
2574   block()->set_end(end);
2575   // propagate state
2576   for (int i = end->number_of_sux() - 1; i >= 0; i--) {
2577     BlockBegin* sux = end->sux_at(i);
2578     assert(sux->is_predecessor(block()), "predecessor missing");
2579     // be careful, bailout if bytecodes are strange
2580     if (!sux->try_merge(end->state())) BAILOUT_("block join failed", NULL);
2581     scope_data()->add_to_work_list(end->sux_at(i));
2582   }
2583 
2584   scope_data()->set_stream(NULL);
2585 
2586   // done
2587   return end;
2588 }
2589 
2590 
2591 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) {
2592   do {
2593     if (start_in_current_block_for_inlining && !bailed_out()) {
2594       iterate_bytecodes_for_block(0);
2595       start_in_current_block_for_inlining = false;
2596     } else {
2597       BlockBegin* b;
2598       while ((b = scope_data()->remove_from_work_list()) != NULL) {
2599         if (!b->is_set(BlockBegin::was_visited_flag)) {
2600           if (b->is_set(BlockBegin::osr_entry_flag)) {
2601             // we're about to parse the osr entry block, so make sure
2602             // we setup the OSR edge leading into this block so that
2603             // Phis get setup correctly.
2604             setup_osr_entry_block();
2605             // this is no longer the osr entry block, so clear it.
2606             b->clear(BlockBegin::osr_entry_flag);
2607           }
2608           b->set(BlockBegin::was_visited_flag);
2609           connect_to_end(b);
2610         }
2611       }
2612     }
2613   } while (!bailed_out() && !scope_data()->is_work_list_empty());
2614 }
2615 
2616 
2617 bool GraphBuilder::_can_trap      [Bytecodes::number_of_java_codes];
2618 
2619 void GraphBuilder::initialize() {
2620   // the following bytecodes are assumed to potentially
2621   // throw exceptions in compiled code - note that e.g.
2622   // monitorexit & the return bytecodes do not throw
2623   // exceptions since monitor pairing proved that they
2624   // succeed (if monitor pairing succeeded)
2625   Bytecodes::Code can_trap_list[] =
2626     { Bytecodes::_ldc
2627     , Bytecodes::_ldc_w
2628     , Bytecodes::_ldc2_w
2629     , Bytecodes::_iaload
2630     , Bytecodes::_laload
2631     , Bytecodes::_faload
2632     , Bytecodes::_daload
2633     , Bytecodes::_aaload
2634     , Bytecodes::_baload
2635     , Bytecodes::_caload
2636     , Bytecodes::_saload
2637     , Bytecodes::_iastore
2638     , Bytecodes::_lastore
2639     , Bytecodes::_fastore
2640     , Bytecodes::_dastore
2641     , Bytecodes::_aastore
2642     , Bytecodes::_bastore
2643     , Bytecodes::_castore
2644     , Bytecodes::_sastore
2645     , Bytecodes::_idiv
2646     , Bytecodes::_ldiv
2647     , Bytecodes::_irem
2648     , Bytecodes::_lrem
2649     , Bytecodes::_getstatic
2650     , Bytecodes::_putstatic
2651     , Bytecodes::_getfield
2652     , Bytecodes::_putfield
2653     , Bytecodes::_invokevirtual
2654     , Bytecodes::_invokespecial
2655     , Bytecodes::_invokestatic
2656     , Bytecodes::_invokedynamic
2657     , Bytecodes::_invokeinterface
2658     , Bytecodes::_new
2659     , Bytecodes::_newarray
2660     , Bytecodes::_anewarray
2661     , Bytecodes::_arraylength
2662     , Bytecodes::_athrow
2663     , Bytecodes::_checkcast
2664     , Bytecodes::_instanceof
2665     , Bytecodes::_monitorenter
2666     , Bytecodes::_multianewarray
2667     };
2668 
2669   // inititialize trap tables
2670   for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
2671     _can_trap[i] = false;
2672   }
2673   // set standard trap info
2674   for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) {
2675     _can_trap[can_trap_list[j]] = true;
2676   }
2677 }
2678 
2679 
2680 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) {
2681   assert(entry->is_set(f), "entry/flag mismatch");
2682   // create header block
2683   BlockBegin* h = new BlockBegin(entry->bci());
2684   h->set_depth_first_number(0);
2685 
2686   Value l = h;
2687   BlockEnd* g = new Goto(entry, false);
2688   l->set_next(g, entry->bci());
2689   h->set_end(g);
2690   h->set(f);
2691   // setup header block end state
2692   ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis)
2693   assert(s->stack_is_empty(), "must have empty stack at entry point");
2694   g->set_state(s);
2695   return h;
2696 }
2697 
2698 
2699 
2700 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) {
2701   BlockBegin* start = new BlockBegin(0);
2702 
2703   // This code eliminates the empty start block at the beginning of
2704   // each method.  Previously, each method started with the
2705   // start-block created below, and this block was followed by the
2706   // header block that was always empty.  This header block is only
2707   // necesary if std_entry is also a backward branch target because
2708   // then phi functions may be necessary in the header block.  It's
2709   // also necessary when profiling so that there's a single block that
2710   // can increment the interpreter_invocation_count.
2711   BlockBegin* new_header_block;
2712   if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) {
2713     new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
2714   } else {
2715     new_header_block = std_entry;
2716   }
2717 
2718   // setup start block (root for the IR graph)
2719   Base* base =
2720     new Base(
2721       new_header_block,
2722       osr_entry
2723     );
2724   start->set_next(base, 0);
2725   start->set_end(base);
2726   // create & setup state for start block
2727   start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
2728   base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
2729 
2730   if (base->std_entry()->state() == NULL) {
2731     // setup states for header blocks
2732     base->std_entry()->merge(state);
2733   }
2734 
2735   assert(base->std_entry()->state() != NULL, "");
2736   return start;
2737 }
2738 
2739 
2740 void GraphBuilder::setup_osr_entry_block() {
2741   assert(compilation()->is_osr_compile(), "only for osrs");
2742 
2743   int osr_bci = compilation()->osr_bci();
2744   ciBytecodeStream s(method());
2745   s.reset_to_bci(osr_bci);
2746   s.next();
2747   scope_data()->set_stream(&s);
2748 
2749   // create a new block to be the osr setup code
2750   _osr_entry = new BlockBegin(osr_bci);
2751   _osr_entry->set(BlockBegin::osr_entry_flag);
2752   _osr_entry->set_depth_first_number(0);
2753   BlockBegin* target = bci2block()->at(osr_bci);
2754   assert(target != NULL && target->is_set(BlockBegin::osr_entry_flag), "must be there");
2755   // the osr entry has no values for locals
2756   ValueStack* state = target->state()->copy();
2757   _osr_entry->set_state(state);
2758 
2759   kill_all();
2760   _block = _osr_entry;
2761   _state = _osr_entry->state()->copy();
2762   assert(_state->bci() == osr_bci, "mismatch");
2763   _last  = _osr_entry;
2764   Value e = append(new OsrEntry());
2765   e->set_needs_null_check(false);
2766 
2767   // OSR buffer is
2768   //
2769   // locals[nlocals-1..0]
2770   // monitors[number_of_locks-1..0]
2771   //
2772   // locals is a direct copy of the interpreter frame so in the osr buffer
2773   // so first slot in the local array is the last local from the interpreter
2774   // and last slot is local[0] (receiver) from the interpreter
2775   //
2776   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
2777   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
2778   // in the interpreter frame (the method lock if a sync method)
2779 
2780   // Initialize monitors in the compiled activation.
2781 
2782   int index;
2783   Value local;
2784 
2785   // find all the locals that the interpreter thinks contain live oops
2786   const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
2787 
2788   // compute the offset into the locals so that we can treat the buffer
2789   // as if the locals were still in the interpreter frame
2790   int locals_offset = BytesPerWord * (method()->max_locals() - 1);
2791   for_each_local_value(state, index, local) {
2792     int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord;
2793     Value get;
2794     if (local->type()->is_object_kind() && !live_oops.at(index)) {
2795       // The interpreter thinks this local is dead but the compiler
2796       // doesn't so pretend that the interpreter passed in null.
2797       get = append(new Constant(objectNull));
2798     } else {
2799       get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
2800                                     append(new Constant(new IntConstant(offset))),
2801                                     0,
2802                                     true /*unaligned*/, true /*wide*/));
2803     }
2804     _state->store_local(index, get);
2805   }
2806 
2807   // the storage for the OSR buffer is freed manually in the LIRGenerator.
2808 
2809   assert(state->caller_state() == NULL, "should be top scope");
2810   state->clear_locals();
2811   Goto* g = new Goto(target, false);
2812   append(g);
2813   _osr_entry->set_end(g);
2814   target->merge(_osr_entry->end()->state());
2815 
2816   scope_data()->set_stream(NULL);
2817 }
2818 
2819 
2820 ValueStack* GraphBuilder::state_at_entry() {
2821   ValueStack* state = new ValueStack(scope(), NULL);
2822 
2823   // Set up locals for receiver
2824   int idx = 0;
2825   if (!method()->is_static()) {
2826     // we should always see the receiver
2827     state->store_local(idx, new Local(objectType, idx));
2828     idx = 1;
2829   }
2830 
2831   // Set up locals for incoming arguments
2832   ciSignature* sig = method()->signature();
2833   for (int i = 0; i < sig->count(); i++) {
2834     ciType* type = sig->type_at(i);
2835     BasicType basic_type = type->basic_type();
2836     // don't allow T_ARRAY to propagate into locals types
2837     if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2838     ValueType* vt = as_ValueType(basic_type);
2839     state->store_local(idx, new Local(vt, idx));
2840     idx += type->size();
2841   }
2842 
2843   // lock synchronized method
2844   if (method()->is_synchronized()) {
2845     state->lock(NULL);
2846   }
2847 
2848   return state;
2849 }
2850 
2851 
2852 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
2853   : _scope_data(NULL)
2854   , _instruction_count(0)
2855   , _osr_entry(NULL)
2856   , _memory(new MemoryBuffer())
2857   , _compilation(compilation)
2858   , _inline_bailout_msg(NULL)
2859 {
2860   int osr_bci = compilation->osr_bci();
2861 
2862   // determine entry points and bci2block mapping
2863   BlockListBuilder blm(compilation, scope, osr_bci);
2864   CHECK_BAILOUT();
2865 
2866   BlockList* bci2block = blm.bci2block();
2867   BlockBegin* start_block = bci2block->at(0);
2868 
2869   push_root_scope(scope, bci2block, start_block);
2870 
2871   // setup state for std entry
2872   _initial_state = state_at_entry();
2873   start_block->merge(_initial_state);
2874 
2875   // complete graph
2876   _vmap        = new ValueMap();
2877   switch (scope->method()->intrinsic_id()) {
2878   case vmIntrinsics::_dabs          : // fall through
2879   case vmIntrinsics::_dsqrt         : // fall through
2880   case vmIntrinsics::_dsin          : // fall through
2881   case vmIntrinsics::_dcos          : // fall through
2882   case vmIntrinsics::_dtan          : // fall through
2883   case vmIntrinsics::_dlog          : // fall through
2884   case vmIntrinsics::_dlog10        : // fall through
2885     {
2886       // Compiles where the root method is an intrinsic need a special
2887       // compilation environment because the bytecodes for the method
2888       // shouldn't be parsed during the compilation, only the special
2889       // Intrinsic node should be emitted.  If this isn't done the the
2890       // code for the inlined version will be different than the root
2891       // compiled version which could lead to monotonicity problems on
2892       // intel.
2893 
2894       // Set up a stream so that appending instructions works properly.
2895       ciBytecodeStream s(scope->method());
2896       s.reset_to_bci(0);
2897       scope_data()->set_stream(&s);
2898       s.next();
2899 
2900       // setup the initial block state
2901       _block = start_block;
2902       _state = start_block->state()->copy_for_parsing();
2903       _last  = start_block;
2904       load_local(doubleType, 0);
2905 
2906       // Emit the intrinsic node.
2907       bool result = try_inline_intrinsics(scope->method());
2908       if (!result) BAILOUT("failed to inline intrinsic");
2909       method_return(dpop());
2910 
2911       // connect the begin and end blocks and we're all done.
2912       BlockEnd* end = last()->as_BlockEnd();
2913       block()->set_end(end);
2914       break;
2915     }
2916   default:
2917     scope_data()->add_to_work_list(start_block);
2918     iterate_all_blocks();
2919     break;
2920   }
2921   CHECK_BAILOUT();
2922 
2923   _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
2924 
2925   eliminate_redundant_phis(_start);
2926 
2927   NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats());
2928   // for osr compile, bailout if some requirements are not fulfilled
2929   if (osr_bci != -1) {
2930     BlockBegin* osr_block = blm.bci2block()->at(osr_bci);
2931     assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile");
2932 
2933     // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points
2934     if (!osr_block->state()->stack_is_empty()) {
2935       BAILOUT("stack not empty at OSR entry point");
2936     }
2937   }
2938 #ifndef PRODUCT
2939   if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count);
2940 #endif
2941 }
2942 
2943 
2944 ValueStack* GraphBuilder::copy_state_before() {
2945   return copy_state_before_with_bci(bci());
2946 }
2947 
2948 ValueStack* GraphBuilder::copy_state_exhandling() {
2949   return copy_state_exhandling_with_bci(bci());
2950 }
2951 
2952 ValueStack* GraphBuilder::copy_state_for_exception() {
2953   return copy_state_for_exception_with_bci(bci());
2954 }
2955 
2956 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) {
2957   return state()->copy(ValueStack::StateBefore, bci);
2958 }
2959 
2960 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) {
2961   if (!has_handler()) return NULL;
2962   return state()->copy(ValueStack::StateBefore, bci);
2963 }
2964 
2965 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) {
2966   ValueStack* s = copy_state_exhandling_with_bci(bci);
2967   if (s == NULL) {
2968     if (_compilation->env()->jvmti_can_access_local_variables()) {
2969       s = state()->copy(ValueStack::ExceptionState, bci);
2970     } else {
2971       s = state()->copy(ValueStack::EmptyExceptionState, bci);
2972     }
2973   }
2974   return s;
2975 }
2976 
2977 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
2978   int recur_level = 0;
2979   for (IRScope* s = scope(); s != NULL; s = s->caller()) {
2980     if (s->method() == cur_callee) {
2981       ++recur_level;
2982     }
2983   }
2984   return recur_level;
2985 }
2986 
2987 
2988 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
2989   // Clear out any existing inline bailout condition
2990   clear_inline_bailout();
2991 
2992   if (callee->should_exclude()) {
2993     // callee is excluded
2994     INLINE_BAILOUT("excluded by CompilerOracle")
2995   } else if (!callee->can_be_compiled()) {
2996     // callee is not compilable (prob. has breakpoints)
2997     INLINE_BAILOUT("not compilable")
2998   } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
2999     // intrinsics can be native or not
3000     return true;
3001   } else if (callee->is_native()) {
3002     // non-intrinsic natives cannot be inlined
3003     INLINE_BAILOUT("non-intrinsic native")
3004   } else if (callee->is_abstract()) {
3005     INLINE_BAILOUT("abstract")
3006   } else {
3007     return try_inline_full(callee, holder_known);
3008   }
3009 }
3010 
3011 
3012 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
3013   if (!InlineNatives           ) INLINE_BAILOUT("intrinsic method inlining disabled");
3014   if (callee->is_synchronized()) {
3015     // We don't currently support any synchronized intrinsics
3016     return false;
3017   }
3018 
3019   // callee seems like a good candidate
3020   // determine id
3021   bool preserves_state = false;
3022   bool cantrap = true;
3023   vmIntrinsics::ID id = callee->intrinsic_id();
3024   switch (id) {
3025     case vmIntrinsics::_arraycopy     :
3026       if (!InlineArrayCopy) return false;
3027       break;
3028 
3029     case vmIntrinsics::_currentTimeMillis:
3030     case vmIntrinsics::_nanoTime:
3031       preserves_state = true;
3032       cantrap = false;
3033       break;
3034 
3035     case vmIntrinsics::_floatToRawIntBits   :
3036     case vmIntrinsics::_intBitsToFloat      :
3037     case vmIntrinsics::_doubleToRawLongBits :
3038     case vmIntrinsics::_longBitsToDouble    :
3039       if (!InlineMathNatives) return false;
3040       preserves_state = true;
3041       cantrap = false;
3042       break;
3043 
3044     case vmIntrinsics::_getClass      :
3045       if (!InlineClassNatives) return false;
3046       preserves_state = true;
3047       break;
3048 
3049     case vmIntrinsics::_currentThread :
3050       if (!InlineThreadNatives) return false;
3051       preserves_state = true;
3052       cantrap = false;
3053       break;
3054 
3055     case vmIntrinsics::_dabs          : // fall through
3056     case vmIntrinsics::_dsqrt         : // fall through
3057     case vmIntrinsics::_dsin          : // fall through
3058     case vmIntrinsics::_dcos          : // fall through
3059     case vmIntrinsics::_dtan          : // fall through
3060     case vmIntrinsics::_dlog          : // fall through
3061     case vmIntrinsics::_dlog10        : // fall through
3062       if (!InlineMathNatives) return false;
3063       cantrap = false;
3064       preserves_state = true;
3065       break;
3066 
3067     // sun/misc/AtomicLong.attemptUpdate
3068     case vmIntrinsics::_attemptUpdate :
3069       if (!VM_Version::supports_cx8()) return false;
3070       if (!InlineAtomicLong) return false;
3071       preserves_state = true;
3072       break;
3073 
3074     // Use special nodes for Unsafe instructions so we can more easily
3075     // perform an address-mode optimization on the raw variants
3076     case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT,  false);
3077     case vmIntrinsics::_getBoolean: return append_unsafe_get_obj(callee, T_BOOLEAN, false);
3078     case vmIntrinsics::_getByte   : return append_unsafe_get_obj(callee, T_BYTE,    false);
3079     case vmIntrinsics::_getShort  : return append_unsafe_get_obj(callee, T_SHORT,   false);
3080     case vmIntrinsics::_getChar   : return append_unsafe_get_obj(callee, T_CHAR,    false);
3081     case vmIntrinsics::_getInt    : return append_unsafe_get_obj(callee, T_INT,     false);
3082     case vmIntrinsics::_getLong   : return append_unsafe_get_obj(callee, T_LONG,    false);
3083     case vmIntrinsics::_getFloat  : return append_unsafe_get_obj(callee, T_FLOAT,   false);
3084     case vmIntrinsics::_getDouble : return append_unsafe_get_obj(callee, T_DOUBLE,  false);
3085 
3086     case vmIntrinsics::_putObject : return append_unsafe_put_obj(callee, T_OBJECT,  false);
3087     case vmIntrinsics::_putBoolean: return append_unsafe_put_obj(callee, T_BOOLEAN, false);
3088     case vmIntrinsics::_putByte   : return append_unsafe_put_obj(callee, T_BYTE,    false);
3089     case vmIntrinsics::_putShort  : return append_unsafe_put_obj(callee, T_SHORT,   false);
3090     case vmIntrinsics::_putChar   : return append_unsafe_put_obj(callee, T_CHAR,    false);
3091     case vmIntrinsics::_putInt    : return append_unsafe_put_obj(callee, T_INT,     false);
3092     case vmIntrinsics::_putLong   : return append_unsafe_put_obj(callee, T_LONG,    false);
3093     case vmIntrinsics::_putFloat  : return append_unsafe_put_obj(callee, T_FLOAT,   false);
3094     case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE,  false);
3095 
3096     case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT,  true);
3097     case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
3098     case vmIntrinsics::_getByteVolatile   : return append_unsafe_get_obj(callee, T_BYTE,    true);
3099     case vmIntrinsics::_getShortVolatile  : return append_unsafe_get_obj(callee, T_SHORT,   true);
3100     case vmIntrinsics::_getCharVolatile   : return append_unsafe_get_obj(callee, T_CHAR,    true);
3101     case vmIntrinsics::_getIntVolatile    : return append_unsafe_get_obj(callee, T_INT,     true);
3102     case vmIntrinsics::_getLongVolatile   : return append_unsafe_get_obj(callee, T_LONG,    true);
3103     case vmIntrinsics::_getFloatVolatile  : return append_unsafe_get_obj(callee, T_FLOAT,   true);
3104     case vmIntrinsics::_getDoubleVolatile : return append_unsafe_get_obj(callee, T_DOUBLE,  true);
3105 
3106     case vmIntrinsics::_putObjectVolatile : return append_unsafe_put_obj(callee, T_OBJECT,  true);
3107     case vmIntrinsics::_putBooleanVolatile: return append_unsafe_put_obj(callee, T_BOOLEAN, true);
3108     case vmIntrinsics::_putByteVolatile   : return append_unsafe_put_obj(callee, T_BYTE,    true);
3109     case vmIntrinsics::_putShortVolatile  : return append_unsafe_put_obj(callee, T_SHORT,   true);
3110     case vmIntrinsics::_putCharVolatile   : return append_unsafe_put_obj(callee, T_CHAR,    true);
3111     case vmIntrinsics::_putIntVolatile    : return append_unsafe_put_obj(callee, T_INT,     true);
3112     case vmIntrinsics::_putLongVolatile   : return append_unsafe_put_obj(callee, T_LONG,    true);
3113     case vmIntrinsics::_putFloatVolatile  : return append_unsafe_put_obj(callee, T_FLOAT,   true);
3114     case vmIntrinsics::_putDoubleVolatile : return append_unsafe_put_obj(callee, T_DOUBLE,  true);
3115 
3116     case vmIntrinsics::_getByte_raw   : return append_unsafe_get_raw(callee, T_BYTE);
3117     case vmIntrinsics::_getShort_raw  : return append_unsafe_get_raw(callee, T_SHORT);
3118     case vmIntrinsics::_getChar_raw   : return append_unsafe_get_raw(callee, T_CHAR);
3119     case vmIntrinsics::_getInt_raw    : return append_unsafe_get_raw(callee, T_INT);
3120     case vmIntrinsics::_getLong_raw   : return append_unsafe_get_raw(callee, T_LONG);
3121     case vmIntrinsics::_getFloat_raw  : return append_unsafe_get_raw(callee, T_FLOAT);
3122     case vmIntrinsics::_getDouble_raw : return append_unsafe_get_raw(callee, T_DOUBLE);
3123 
3124     case vmIntrinsics::_putByte_raw   : return append_unsafe_put_raw(callee, T_BYTE);
3125     case vmIntrinsics::_putShort_raw  : return append_unsafe_put_raw(callee, T_SHORT);
3126     case vmIntrinsics::_putChar_raw   : return append_unsafe_put_raw(callee, T_CHAR);
3127     case vmIntrinsics::_putInt_raw    : return append_unsafe_put_raw(callee, T_INT);
3128     case vmIntrinsics::_putLong_raw   : return append_unsafe_put_raw(callee, T_LONG);
3129     case vmIntrinsics::_putFloat_raw  : return append_unsafe_put_raw(callee, T_FLOAT);
3130     case vmIntrinsics::_putDouble_raw : return append_unsafe_put_raw(callee, T_DOUBLE);
3131 
3132     case vmIntrinsics::_prefetchRead        : return append_unsafe_prefetch(callee, false, false);
3133     case vmIntrinsics::_prefetchWrite       : return append_unsafe_prefetch(callee, false, true);
3134     case vmIntrinsics::_prefetchReadStatic  : return append_unsafe_prefetch(callee, true,  false);
3135     case vmIntrinsics::_prefetchWriteStatic : return append_unsafe_prefetch(callee, true,  true);
3136 
3137     case vmIntrinsics::_checkIndex    :
3138       if (!InlineNIOCheckIndex) return false;
3139       preserves_state = true;
3140       break;
3141     case vmIntrinsics::_putOrderedObject : return append_unsafe_put_obj(callee, T_OBJECT,  true);
3142     case vmIntrinsics::_putOrderedInt    : return append_unsafe_put_obj(callee, T_INT,     true);
3143     case vmIntrinsics::_putOrderedLong   : return append_unsafe_put_obj(callee, T_LONG,    true);
3144 
3145     case vmIntrinsics::_compareAndSwapLong:
3146       if (!VM_Version::supports_cx8()) return false;
3147       // fall through
3148     case vmIntrinsics::_compareAndSwapInt:
3149     case vmIntrinsics::_compareAndSwapObject:
3150       append_unsafe_CAS(callee);
3151       return true;
3152 
3153     default                       : return false; // do not inline
3154   }
3155   // create intrinsic node
3156   const bool has_receiver = !callee->is_static();
3157   ValueType* result_type = as_ValueType(callee->return_type());
3158   ValueStack* state_before = copy_state_for_exception();
3159 
3160   Values* args = state()->pop_arguments(callee->arg_size());
3161 
3162   if (is_profiling()) {
3163     // Don't profile in the special case where the root method
3164     // is the intrinsic
3165     if (callee != method()) {
3166       // Note that we'd collect profile data in this method if we wanted it.
3167       compilation()->set_would_profile(true);
3168       if (profile_calls()) {
3169         Value recv = NULL;
3170         if (has_receiver) {
3171           recv = args->at(0);
3172           null_check(recv);
3173         }
3174         profile_call(recv, NULL);
3175       }
3176     }
3177   }
3178 
3179   Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, state_before,
3180                                     preserves_state, cantrap);
3181   // append instruction & push result
3182   Value value = append_split(result);
3183   if (result_type != voidType) push(result_type, value);
3184 
3185 #ifndef PRODUCT
3186   // printing
3187   if (PrintInlining) {
3188     print_inline_result(callee, true);
3189   }
3190 #endif
3191 
3192   // done
3193   return true;
3194 }
3195 
3196 
3197 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) {
3198   // Introduce a new callee continuation point - all Ret instructions
3199   // will be replaced with Gotos to this point.
3200   BlockBegin* cont = block_at(next_bci());
3201   assert(cont != NULL, "continuation must exist (BlockListBuilder starts a new block after a jsr");
3202 
3203   // Note: can not assign state to continuation yet, as we have to
3204   // pick up the state from the Ret instructions.
3205 
3206   // Push callee scope
3207   push_scope_for_jsr(cont, jsr_dest_bci);
3208 
3209   // Temporarily set up bytecode stream so we can append instructions
3210   // (only using the bci of this stream)
3211   scope_data()->set_stream(scope_data()->parent()->stream());
3212 
3213   BlockBegin* jsr_start_block = block_at(jsr_dest_bci);
3214   assert(jsr_start_block != NULL, "jsr start block must exist");
3215   assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet");
3216   Goto* goto_sub = new Goto(jsr_start_block, false);
3217   // Must copy state to avoid wrong sharing when parsing bytecodes
3218   assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block");
3219   jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci));
3220   append(goto_sub);
3221   _block->set_end(goto_sub);
3222   _last = _block = jsr_start_block;
3223 
3224   // Clear out bytecode stream
3225   scope_data()->set_stream(NULL);
3226 
3227   scope_data()->add_to_work_list(jsr_start_block);
3228 
3229   // Ready to resume parsing in subroutine
3230   iterate_all_blocks();
3231 
3232   // If we bailed out during parsing, return immediately (this is bad news)
3233   CHECK_BAILOUT_(false);
3234 
3235   // Detect whether the continuation can actually be reached. If not,
3236   // it has not had state set by the join() operations in
3237   // iterate_bytecodes_for_block()/ret() and we should not touch the
3238   // iteration state. The calling activation of
3239   // iterate_bytecodes_for_block will then complete normally.
3240   if (cont->state() != NULL) {
3241     if (!cont->is_set(BlockBegin::was_visited_flag)) {
3242       // add continuation to work list instead of parsing it immediately
3243       scope_data()->parent()->add_to_work_list(cont);
3244     }
3245   }
3246 
3247   assert(jsr_continuation() == cont, "continuation must not have changed");
3248   assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) ||
3249          jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag),
3250          "continuation can only be visited in case of backward branches");
3251   assert(_last && _last->as_BlockEnd(), "block must have end");
3252 
3253   // continuation is in work list, so end iteration of current block
3254   _skip_block = true;
3255   pop_scope_for_jsr();
3256 
3257   return true;
3258 }
3259 
3260 
3261 // Inline the entry of a synchronized method as a monitor enter and
3262 // register the exception handler which releases the monitor if an
3263 // exception is thrown within the callee. Note that the monitor enter
3264 // cannot throw an exception itself, because the receiver is
3265 // guaranteed to be non-null by the explicit null check at the
3266 // beginning of inlining.
3267 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) {
3268   assert(lock != NULL && sync_handler != NULL, "lock or handler missing");
3269 
3270   monitorenter(lock, SynchronizationEntryBCI);
3271   assert(_last->as_MonitorEnter() != NULL, "monitor enter expected");
3272   _last->set_needs_null_check(false);
3273 
3274   sync_handler->set(BlockBegin::exception_entry_flag);
3275   sync_handler->set(BlockBegin::is_on_work_list_flag);
3276 
3277   ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
3278   XHandler* h = new XHandler(desc);
3279   h->set_entry_block(sync_handler);
3280   scope_data()->xhandlers()->append(h);
3281   scope_data()->set_has_handler();
3282 }
3283 
3284 
3285 // If an exception is thrown and not handled within an inlined
3286 // synchronized method, the monitor must be released before the
3287 // exception is rethrown in the outer scope. Generate the appropriate
3288 // instructions here.
3289 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) {
3290   BlockBegin* orig_block = _block;
3291   ValueStack* orig_state = _state;
3292   Instruction* orig_last = _last;
3293   _last = _block = sync_handler;
3294   _state = sync_handler->state()->copy();
3295 
3296   assert(sync_handler != NULL, "handler missing");
3297   assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here");
3298 
3299   assert(lock != NULL || default_handler, "lock or handler missing");
3300 
3301   XHandler* h = scope_data()->xhandlers()->remove_last();
3302   assert(h->entry_block() == sync_handler, "corrupt list of handlers");
3303 
3304   block()->set(BlockBegin::was_visited_flag);
3305   Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI);
3306   assert(exception->is_pinned(), "must be");
3307 
3308   int bci = SynchronizationEntryBCI;
3309   if (compilation()->env()->dtrace_method_probes()) {
3310     // Report exit from inline methods.  We don't have a stream here
3311     // so pass an explicit bci of SynchronizationEntryBCI.
3312     Values* args = new Values(1);
3313     args->push(append_with_bci(new Constant(new ObjectConstant(method())), bci));
3314     append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci);
3315   }
3316 
3317   if (lock) {
3318     assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
3319     if (!lock->is_linked()) {
3320       lock = append_with_bci(lock, bci);
3321     }
3322 
3323     // exit the monitor in the context of the synchronized method
3324     monitorexit(lock, bci);
3325 
3326     // exit the context of the synchronized method
3327     if (!default_handler) {
3328       pop_scope();
3329       bci = _state->caller_state()->bci();
3330       _state = _state->caller_state()->copy_for_parsing();
3331     }
3332   }
3333 
3334   // perform the throw as if at the the call site
3335   apush(exception);
3336   throw_op(bci);
3337 
3338   BlockEnd* end = last()->as_BlockEnd();
3339   block()->set_end(end);
3340 
3341   _block = orig_block;
3342   _state = orig_state;
3343   _last = orig_last;
3344 }
3345 
3346 
3347 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
3348   assert(!callee->is_native(), "callee must not be native");
3349   if (count_backedges() && callee->has_loops()) {
3350     INLINE_BAILOUT("too complex for tiered");
3351   }
3352   // first perform tests of things it's not possible to inline
3353   if (callee->has_exception_handlers() &&
3354       !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
3355   if (callee->is_synchronized() &&
3356       !InlineSynchronizedMethods         ) INLINE_BAILOUT("callee is synchronized");
3357   if (!callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet");
3358   if (!callee->has_balanced_monitors())    INLINE_BAILOUT("callee's monitors do not match");
3359 
3360   // Proper inlining of methods with jsrs requires a little more work.
3361   if (callee->has_jsrs()                 ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
3362 
3363   // now perform tests that are based on flag settings
3364   if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("too-deep inlining");
3365   if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
3366   if (callee->code_size() > max_inline_size()                 ) INLINE_BAILOUT("callee is too large");
3367 
3368   // don't inline throwable methods unless the inlining tree is rooted in a throwable class
3369   if (callee->name() == ciSymbol::object_initializer_name() &&
3370       callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
3371     // Throwable constructor call
3372     IRScope* top = scope();
3373     while (top->caller() != NULL) {
3374       top = top->caller();
3375     }
3376     if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
3377       INLINE_BAILOUT("don't inline Throwable constructors");
3378     }
3379   }
3380 
3381   // When SSE2 is used on intel, then no special handling is needed
3382   // for strictfp because the enum-constant is fixed at compile time,
3383   // the check for UseSSE2 is needed here
3384   if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) {
3385     INLINE_BAILOUT("caller and callee have different strict fp requirements");
3386   }
3387 
3388   if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
3389     INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
3390   }
3391 
3392   if (is_profiling() && !callee->ensure_method_data()) {
3393     INLINE_BAILOUT("mdo allocation failed");
3394   }
3395 #ifndef PRODUCT
3396       // printing
3397   if (PrintInlining) {
3398     print_inline_result(callee, true);
3399   }
3400 #endif
3401 
3402   // NOTE: Bailouts from this point on, which occur at the
3403   // GraphBuilder level, do not cause bailout just of the inlining but
3404   // in fact of the entire compilation.
3405 
3406   BlockBegin* orig_block = block();
3407 
3408   const int args_base = state()->stack_size() - callee->arg_size();
3409   assert(args_base >= 0, "stack underflow during inlining");
3410 
3411   // Insert null check if necessary
3412   Value recv = NULL;
3413   if (code() != Bytecodes::_invokestatic) {
3414     // note: null check must happen even if first instruction of callee does
3415     //       an implicit null check since the callee is in a different scope
3416     //       and we must make sure exception handling does the right thing
3417     assert(!callee->is_static(), "callee must not be static");
3418     assert(callee->arg_size() > 0, "must have at least a receiver");
3419     recv = state()->stack_at(args_base);
3420     null_check(recv);
3421   }
3422 
3423   if (is_profiling()) {
3424     // Note that we'd collect profile data in this method if we wanted it.
3425     // this may be redundant here...
3426     compilation()->set_would_profile(true);
3427 
3428     if (profile_calls()) {
3429       profile_call(recv, holder_known ? callee->holder() : NULL);
3430     }
3431     if (profile_inlined_calls()) {
3432       profile_invocation(callee, copy_state_before());
3433     }
3434   }
3435 
3436   // Introduce a new callee continuation point - if the callee has
3437   // more than one return instruction or the return does not allow
3438   // fall-through of control flow, all return instructions of the
3439   // callee will need to be replaced by Goto's pointing to this
3440   // continuation point.
3441   BlockBegin* cont = block_at(next_bci());
3442   bool continuation_existed = true;
3443   if (cont == NULL) {
3444     cont = new BlockBegin(next_bci());
3445     // low number so that continuation gets parsed as early as possible
3446     cont->set_depth_first_number(0);
3447 #ifndef PRODUCT
3448     if (PrintInitialBlockList) {
3449       tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
3450                     cont->block_id(), cont->bci(), bci());
3451     }
3452 #endif
3453     continuation_existed = false;
3454   }
3455   // Record number of predecessors of continuation block before
3456   // inlining, to detect if inlined method has edges to its
3457   // continuation after inlining.
3458   int continuation_preds = cont->number_of_preds();
3459 
3460   // Push callee scope
3461   push_scope(callee, cont);
3462 
3463   // the BlockListBuilder for the callee could have bailed out
3464   CHECK_BAILOUT_(false);
3465 
3466   // Temporarily set up bytecode stream so we can append instructions
3467   // (only using the bci of this stream)
3468   scope_data()->set_stream(scope_data()->parent()->stream());
3469 
3470   // Pass parameters into callee state: add assignments
3471   // note: this will also ensure that all arguments are computed before being passed
3472   ValueStack* callee_state = state();
3473   ValueStack* caller_state = state()->caller_state();
3474   { int i = args_base;
3475     while (i < caller_state->stack_size()) {
3476       const int par_no = i - args_base;
3477       Value  arg = caller_state->stack_at_inc(i);
3478       // NOTE: take base() of arg->type() to avoid problems storing
3479       // constants
3480       store_local(callee_state, arg, arg->type()->base(), par_no);
3481     }
3482   }
3483 
3484   // Remove args from stack.
3485   // Note that we preserve locals state in case we can use it later
3486   // (see use of pop_scope() below)
3487   caller_state->truncate_stack(args_base);
3488   assert(callee_state->stack_size() == 0, "callee stack must be empty");
3489 
3490   Value lock;
3491   BlockBegin* sync_handler;
3492 
3493   // Inline the locking of the receiver if the callee is synchronized
3494   if (callee->is_synchronized()) {
3495     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
3496                                : state()->local_at(0);
3497     sync_handler = new BlockBegin(SynchronizationEntryBCI);
3498     inline_sync_entry(lock, sync_handler);
3499   }
3500 
3501   if (compilation()->env()->dtrace_method_probes()) {
3502     Values* args = new Values(1);
3503     args->push(append(new Constant(new ObjectConstant(method()))));
3504     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
3505   }
3506 
3507   BlockBegin* callee_start_block = block_at(0);
3508   if (callee_start_block != NULL) {
3509     assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
3510     Goto* goto_callee = new Goto(callee_start_block, false);
3511     // The state for this goto is in the scope of the callee, so use
3512     // the entry bci for the callee instead of the call site bci.
3513     append_with_bci(goto_callee, 0);
3514     _block->set_end(goto_callee);
3515     callee_start_block->merge(callee_state);
3516 
3517     _last = _block = callee_start_block;
3518 
3519     scope_data()->add_to_work_list(callee_start_block);
3520   }
3521 
3522   // Clear out bytecode stream
3523   scope_data()->set_stream(NULL);
3524 
3525   // Ready to resume parsing in callee (either in the same block we
3526   // were in before or in the callee's start block)
3527   iterate_all_blocks(callee_start_block == NULL);
3528 
3529   // If we bailed out during parsing, return immediately (this is bad news)
3530   if (bailed_out()) return false;
3531 
3532   // iterate_all_blocks theoretically traverses in random order; in
3533   // practice, we have only traversed the continuation if we are
3534   // inlining into a subroutine
3535   assert(continuation_existed ||
3536          !continuation()->is_set(BlockBegin::was_visited_flag),
3537          "continuation should not have been parsed yet if we created it");
3538 
3539   // If we bailed out during parsing, return immediately (this is bad news)
3540   CHECK_BAILOUT_(false);
3541 
3542   // At this point we are almost ready to return and resume parsing of
3543   // the caller back in the GraphBuilder. The only thing we want to do
3544   // first is an optimization: during parsing of the callee we
3545   // generated at least one Goto to the continuation block. If we
3546   // generated exactly one, and if the inlined method spanned exactly
3547   // one block (and we didn't have to Goto its entry), then we snip
3548   // off the Goto to the continuation, allowing control to fall
3549   // through back into the caller block and effectively performing
3550   // block merging. This allows load elimination and CSE to take place
3551   // across multiple callee scopes if they are relatively simple, and
3552   // is currently essential to making inlining profitable.
3553   if (   num_returns() == 1
3554       && block() == orig_block
3555       && block() == inline_cleanup_block()) {
3556     _last = inline_cleanup_return_prev();
3557     _state = inline_cleanup_state();
3558   } else if (continuation_preds == cont->number_of_preds()) {
3559     // Inlining caused that the instructions after the invoke in the
3560     // caller are not reachable any more. So skip filling this block
3561     // with instructions!
3562     assert (cont == continuation(), "");
3563     assert(_last && _last->as_BlockEnd(), "");
3564     _skip_block = true;
3565   } else {
3566     // Resume parsing in continuation block unless it was already parsed.
3567     // Note that if we don't change _last here, iteration in
3568     // iterate_bytecodes_for_block will stop when we return.
3569     if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
3570       // add continuation to work list instead of parsing it immediately
3571       assert(_last && _last->as_BlockEnd(), "");
3572       scope_data()->parent()->add_to_work_list(continuation());
3573       _skip_block = true;
3574     }
3575   }
3576 
3577   // Fill the exception handler for synchronized methods with instructions
3578   if (callee->is_synchronized() && sync_handler->state() != NULL) {
3579     fill_sync_handler(lock, sync_handler);
3580   } else {
3581     pop_scope();
3582   }
3583 
3584   compilation()->notice_inlined_method(callee);
3585 
3586   return true;
3587 }
3588 
3589 
3590 void GraphBuilder::inline_bailout(const char* msg) {
3591   assert(msg != NULL, "inline bailout msg must exist");
3592   _inline_bailout_msg = msg;
3593 }
3594 
3595 
3596 void GraphBuilder::clear_inline_bailout() {
3597   _inline_bailout_msg = NULL;
3598 }
3599 
3600 
3601 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) {
3602   ScopeData* data = new ScopeData(NULL);
3603   data->set_scope(scope);
3604   data->set_bci2block(bci2block);
3605   _scope_data = data;
3606   _block = start;
3607 }
3608 
3609 
3610 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) {
3611   IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false);
3612   scope()->add_callee(callee_scope);
3613 
3614   BlockListBuilder blb(compilation(), callee_scope, -1);
3615   CHECK_BAILOUT();
3616 
3617   if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) {
3618     // this scope can be inlined directly into the caller so remove
3619     // the block at bci 0.
3620     blb.bci2block()->at_put(0, NULL);
3621   }
3622 
3623   set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci())));
3624 
3625   ScopeData* data = new ScopeData(scope_data());
3626   data->set_scope(callee_scope);
3627   data->set_bci2block(blb.bci2block());
3628   data->set_continuation(continuation);
3629   _scope_data = data;
3630 }
3631 
3632 
3633 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) {
3634   ScopeData* data = new ScopeData(scope_data());
3635   data->set_parsing_jsr();
3636   data->set_jsr_entry_bci(jsr_dest_bci);
3637   data->set_jsr_return_address_local(-1);
3638   // Must clone bci2block list as we will be mutating it in order to
3639   // properly clone all blocks in jsr region as well as exception
3640   // handlers containing rets
3641   BlockList* new_bci2block = new BlockList(bci2block()->length());
3642   new_bci2block->push_all(bci2block());
3643   data->set_bci2block(new_bci2block);
3644   data->set_scope(scope());
3645   data->setup_jsr_xhandlers();
3646   data->set_continuation(continuation());
3647   data->set_jsr_continuation(jsr_continuation);
3648   _scope_data = data;
3649 }
3650 
3651 
3652 void GraphBuilder::pop_scope() {
3653   int number_of_locks = scope()->number_of_locks();
3654   _scope_data = scope_data()->parent();
3655   // accumulate minimum number of monitor slots to be reserved
3656   scope()->set_min_number_of_locks(number_of_locks);
3657 }
3658 
3659 
3660 void GraphBuilder::pop_scope_for_jsr() {
3661   _scope_data = scope_data()->parent();
3662 }
3663 
3664 bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) {
3665   if (InlineUnsafeOps) {
3666     Values* args = state()->pop_arguments(callee->arg_size());
3667     null_check(args->at(0));
3668     Instruction* offset = args->at(2);
3669 #ifndef _LP64
3670     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
3671 #endif
3672     Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile));
3673     push(op->type(), op);
3674     compilation()->set_has_unsafe_access(true);
3675   }
3676   return InlineUnsafeOps;
3677 }
3678 
3679 
3680 bool GraphBuilder::append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile) {
3681   if (InlineUnsafeOps) {
3682     Values* args = state()->pop_arguments(callee->arg_size());
3683     null_check(args->at(0));
3684     Instruction* offset = args->at(2);
3685 #ifndef _LP64
3686     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
3687 #endif
3688     Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, args->at(3), is_volatile));
3689     compilation()->set_has_unsafe_access(true);
3690     kill_all();
3691   }
3692   return InlineUnsafeOps;
3693 }
3694 
3695 
3696 bool GraphBuilder::append_unsafe_get_raw(ciMethod* callee, BasicType t) {
3697   if (InlineUnsafeOps) {
3698     Values* args = state()->pop_arguments(callee->arg_size());
3699     null_check(args->at(0));
3700     Instruction* op = append(new UnsafeGetRaw(t, args->at(1), false));
3701     push(op->type(), op);
3702     compilation()->set_has_unsafe_access(true);
3703   }
3704   return InlineUnsafeOps;
3705 }
3706 
3707 
3708 bool GraphBuilder::append_unsafe_put_raw(ciMethod* callee, BasicType t) {
3709   if (InlineUnsafeOps) {
3710     Values* args = state()->pop_arguments(callee->arg_size());
3711     null_check(args->at(0));
3712     Instruction* op = append(new UnsafePutRaw(t, args->at(1), args->at(2)));
3713     compilation()->set_has_unsafe_access(true);
3714   }
3715   return InlineUnsafeOps;
3716 }
3717 
3718 
3719 bool GraphBuilder::append_unsafe_prefetch(ciMethod* callee, bool is_static, bool is_store) {
3720   if (InlineUnsafeOps) {
3721     Values* args = state()->pop_arguments(callee->arg_size());
3722     int obj_arg_index = 1; // Assume non-static case
3723     if (is_static) {
3724       obj_arg_index = 0;
3725     } else {
3726       null_check(args->at(0));
3727     }
3728     Instruction* offset = args->at(obj_arg_index + 1);
3729 #ifndef _LP64
3730     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
3731 #endif
3732     Instruction* op = is_store ? append(new UnsafePrefetchWrite(args->at(obj_arg_index), offset))
3733                                : append(new UnsafePrefetchRead (args->at(obj_arg_index), offset));
3734     compilation()->set_has_unsafe_access(true);
3735   }
3736   return InlineUnsafeOps;
3737 }
3738 
3739 
3740 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
3741   ValueStack* state_before = copy_state_for_exception();
3742   ValueType* result_type = as_ValueType(callee->return_type());
3743   assert(result_type->is_int(), "int result");
3744   Values* args = state()->pop_arguments(callee->arg_size());
3745 
3746   // Pop off some args to speically handle, then push back
3747   Value newval = args->pop();
3748   Value cmpval = args->pop();
3749   Value offset = args->pop();
3750   Value src = args->pop();
3751   Value unsafe_obj = args->pop();
3752 
3753   // Separately handle the unsafe arg. It is not needed for code
3754   // generation, but must be null checked
3755   null_check(unsafe_obj);
3756 
3757 #ifndef _LP64
3758   offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
3759 #endif
3760 
3761   args->push(src);
3762   args->push(offset);
3763   args->push(cmpval);
3764   args->push(newval);
3765 
3766   // An unsafe CAS can alias with other field accesses, but we don't
3767   // know which ones so mark the state as no preserved.  This will
3768   // cause CSE to invalidate memory across it.
3769   bool preserves_state = false;
3770   Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state);
3771   append_split(result);
3772   push(result_type, result);
3773   compilation()->set_has_unsafe_access(true);
3774 }
3775 
3776 
3777 #ifndef PRODUCT
3778 void GraphBuilder::print_inline_result(ciMethod* callee, bool res) {
3779   CompileTask::print_inlining(callee, scope()->level(), bci(), _inline_bailout_msg);
3780   if (res && CIPrintMethodCodes) {
3781     callee->print_codes();
3782   }
3783 }
3784 
3785 
3786 void GraphBuilder::print_stats() {
3787   vmap()->print();
3788 }
3789 #endif // PRODUCT
3790 
3791 void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
3792   append(new ProfileCall(method(), bci(), recv, known_holder));
3793 }
3794 
3795 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
3796   append(new ProfileInvoke(callee, state));
3797 }