1 /*
   2  * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CFGPrinter.hpp"
  27 #include "c1/c1_Canonicalizer.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_GraphBuilder.hpp"
  30 #include "c1/c1_InstructionPrinter.hpp"
  31 #include "ci/ciField.hpp"
  32 #include "ci/ciKlass.hpp"
  33 #include "interpreter/bytecode.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "utilities/bitMap.inline.hpp"
  36 
  37 class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
  38  private:
  39   Compilation* _compilation;
  40   IRScope*     _scope;
  41 
  42   BlockList    _blocks;                // internal list of all blocks
  43   BlockList*   _bci2block;             // mapping from bci to blocks for GraphBuilder
  44 
  45   // fields used by mark_loops
  46   BitMap       _active;                // for iteration of control flow graph
  47   BitMap       _visited;               // for iteration of control flow graph
  48   intArray     _loop_map;              // caches the information if a block is contained in a loop
  49   int          _next_loop_index;       // next free loop number
  50   int          _next_block_number;     // for reverse postorder numbering of blocks
  51 
  52   // accessors
  53   Compilation*  compilation() const              { return _compilation; }
  54   IRScope*      scope() const                    { return _scope; }
  55   ciMethod*     method() const                   { return scope()->method(); }
  56   XHandlers*    xhandlers() const                { return scope()->xhandlers(); }
  57 
  58   // unified bailout support
  59   void          bailout(const char* msg) const   { compilation()->bailout(msg); }
  60   bool          bailed_out() const               { return compilation()->bailed_out(); }
  61 
  62   // helper functions
  63   BlockBegin* make_block_at(int bci, BlockBegin* predecessor);
  64   void handle_exceptions(BlockBegin* current, int cur_bci);
  65   void handle_jsr(BlockBegin* current, int sr_bci, int next_bci);
  66   void store_one(BlockBegin* current, int local);
  67   void store_two(BlockBegin* current, int local);
  68   void set_entries(int osr_bci);
  69   void set_leaders();
  70 
  71   void make_loop_header(BlockBegin* block);
  72   void mark_loops();
  73   int  mark_loops(BlockBegin* b, bool in_subroutine);
  74 
  75   // debugging
  76 #ifndef PRODUCT
  77   void print();
  78 #endif
  79 
  80  public:
  81   // creation
  82   BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci);
  83 
  84   // accessors for GraphBuilder
  85   BlockList*    bci2block() const                { return _bci2block; }
  86 };
  87 
  88 
  89 // Implementation of BlockListBuilder
  90 
  91 BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
  92  : _compilation(compilation)
  93  , _scope(scope)
  94  , _blocks(16)
  95  , _bci2block(new BlockList(scope->method()->code_size(), NULL))
  96  , _next_block_number(0)
  97  , _active()         // size not known yet
  98  , _visited()        // size not known yet
  99  , _next_loop_index(0)
 100  , _loop_map() // size not known yet
 101 {
 102   set_entries(osr_bci);
 103   set_leaders();
 104   CHECK_BAILOUT();
 105 
 106   mark_loops();
 107   NOT_PRODUCT(if (PrintInitialBlockList) print());
 108 
 109 #ifndef PRODUCT
 110   if (PrintCFGToFile) {
 111     stringStream title;
 112     title.print("BlockListBuilder ");
 113     scope->method()->print_name(&title);
 114     CFGPrinter::print_cfg(_bci2block, title.as_string(), false, false);
 115   }
 116 #endif
 117 }
 118 
 119 
 120 void BlockListBuilder::set_entries(int osr_bci) {
 121   // generate start blocks
 122   BlockBegin* std_entry = make_block_at(0, NULL);
 123   if (scope()->caller() == NULL) {
 124     std_entry->set(BlockBegin::std_entry_flag);
 125   }
 126   if (osr_bci != -1) {
 127     BlockBegin* osr_entry = make_block_at(osr_bci, NULL);
 128     osr_entry->set(BlockBegin::osr_entry_flag);
 129   }
 130 
 131   // generate exception entry blocks
 132   XHandlers* list = xhandlers();
 133   const int n = list->length();
 134   for (int i = 0; i < n; i++) {
 135     XHandler* h = list->handler_at(i);
 136     BlockBegin* entry = make_block_at(h->handler_bci(), NULL);
 137     entry->set(BlockBegin::exception_entry_flag);
 138     h->set_entry_block(entry);
 139   }
 140 }
 141 
 142 
 143 BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) {
 144   assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer");
 145 
 146   BlockBegin* block = _bci2block->at(cur_bci);
 147   if (block == NULL) {
 148     block = new BlockBegin(cur_bci);
 149     block->init_stores_to_locals(method()->max_locals());
 150     _bci2block->at_put(cur_bci, block);
 151     _blocks.append(block);
 152 
 153     assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
 154   }
 155 
 156   if (predecessor != NULL) {
 157     if (block->is_set(BlockBegin::exception_entry_flag)) {
 158       BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block);
 159     }
 160 
 161     predecessor->add_successor(block);
 162     block->increment_total_preds();
 163   }
 164 
 165   return block;
 166 }
 167 
 168 
 169 inline void BlockListBuilder::store_one(BlockBegin* current, int local) {
 170   current->stores_to_locals().set_bit(local);
 171 }
 172 inline void BlockListBuilder::store_two(BlockBegin* current, int local) {
 173   store_one(current, local);
 174   store_one(current, local + 1);
 175 }
 176 
 177 
 178 void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) {
 179   // Draws edges from a block to its exception handlers
 180   XHandlers* list = xhandlers();
 181   const int n = list->length();
 182 
 183   for (int i = 0; i < n; i++) {
 184     XHandler* h = list->handler_at(i);
 185 
 186     if (h->covers(cur_bci)) {
 187       BlockBegin* entry = h->entry_block();
 188       assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set");
 189       assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set");
 190 
 191       // add each exception handler only once
 192       if (!current->is_successor(entry)) {
 193         current->add_successor(entry);
 194         entry->increment_total_preds();
 195       }
 196 
 197       // stop when reaching catchall
 198       if (h->catch_type() == 0) break;
 199     }
 200   }
 201 }
 202 
 203 void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) {
 204   // start a new block after jsr-bytecode and link this block into cfg
 205   make_block_at(next_bci, current);
 206 
 207   // start a new block at the subroutine entry at mark it with special flag
 208   BlockBegin* sr_block = make_block_at(sr_bci, current);
 209   if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) {
 210     sr_block->set(BlockBegin::subroutine_entry_flag);
 211   }
 212 }
 213 
 214 
 215 void BlockListBuilder::set_leaders() {
 216   bool has_xhandlers = xhandlers()->has_handlers();
 217   BlockBegin* current = NULL;
 218 
 219   // The information which bci starts a new block simplifies the analysis
 220   // Without it, backward branches could jump to a bci where no block was created
 221   // during bytecode iteration. This would require the creation of a new block at the
 222   // branch target and a modification of the successor lists.
 223   BitMap bci_block_start = method()->bci_block_start();
 224 
 225   ciBytecodeStream s(method());
 226   while (s.next() != ciBytecodeStream::EOBC()) {
 227     int cur_bci = s.cur_bci();
 228 
 229     if (bci_block_start.at(cur_bci)) {
 230       current = make_block_at(cur_bci, current);
 231     }
 232     assert(current != NULL, "must have current block");
 233 
 234     if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) {
 235       handle_exceptions(current, cur_bci);
 236     }
 237 
 238     switch (s.cur_bc()) {
 239       // track stores to local variables for selective creation of phi functions
 240       case Bytecodes::_iinc:     store_one(current, s.get_index()); break;
 241       case Bytecodes::_istore:   store_one(current, s.get_index()); break;
 242       case Bytecodes::_lstore:   store_two(current, s.get_index()); break;
 243       case Bytecodes::_fstore:   store_one(current, s.get_index()); break;
 244       case Bytecodes::_dstore:   store_two(current, s.get_index()); break;
 245       case Bytecodes::_astore:   store_one(current, s.get_index()); break;
 246       case Bytecodes::_istore_0: store_one(current, 0); break;
 247       case Bytecodes::_istore_1: store_one(current, 1); break;
 248       case Bytecodes::_istore_2: store_one(current, 2); break;
 249       case Bytecodes::_istore_3: store_one(current, 3); break;
 250       case Bytecodes::_lstore_0: store_two(current, 0); break;
 251       case Bytecodes::_lstore_1: store_two(current, 1); break;
 252       case Bytecodes::_lstore_2: store_two(current, 2); break;
 253       case Bytecodes::_lstore_3: store_two(current, 3); break;
 254       case Bytecodes::_fstore_0: store_one(current, 0); break;
 255       case Bytecodes::_fstore_1: store_one(current, 1); break;
 256       case Bytecodes::_fstore_2: store_one(current, 2); break;
 257       case Bytecodes::_fstore_3: store_one(current, 3); break;
 258       case Bytecodes::_dstore_0: store_two(current, 0); break;
 259       case Bytecodes::_dstore_1: store_two(current, 1); break;
 260       case Bytecodes::_dstore_2: store_two(current, 2); break;
 261       case Bytecodes::_dstore_3: store_two(current, 3); break;
 262       case Bytecodes::_astore_0: store_one(current, 0); break;
 263       case Bytecodes::_astore_1: store_one(current, 1); break;
 264       case Bytecodes::_astore_2: store_one(current, 2); break;
 265       case Bytecodes::_astore_3: store_one(current, 3); break;
 266 
 267       // track bytecodes that affect the control flow
 268       case Bytecodes::_athrow:  // fall through
 269       case Bytecodes::_ret:     // fall through
 270       case Bytecodes::_ireturn: // fall through
 271       case Bytecodes::_lreturn: // fall through
 272       case Bytecodes::_freturn: // fall through
 273       case Bytecodes::_dreturn: // fall through
 274       case Bytecodes::_areturn: // fall through
 275       case Bytecodes::_return:
 276         current = NULL;
 277         break;
 278 
 279       case Bytecodes::_ifeq:      // fall through
 280       case Bytecodes::_ifne:      // fall through
 281       case Bytecodes::_iflt:      // fall through
 282       case Bytecodes::_ifge:      // fall through
 283       case Bytecodes::_ifgt:      // fall through
 284       case Bytecodes::_ifle:      // fall through
 285       case Bytecodes::_if_icmpeq: // fall through
 286       case Bytecodes::_if_icmpne: // fall through
 287       case Bytecodes::_if_icmplt: // fall through
 288       case Bytecodes::_if_icmpge: // fall through
 289       case Bytecodes::_if_icmpgt: // fall through
 290       case Bytecodes::_if_icmple: // fall through
 291       case Bytecodes::_if_acmpeq: // fall through
 292       case Bytecodes::_if_acmpne: // fall through
 293       case Bytecodes::_ifnull:    // fall through
 294       case Bytecodes::_ifnonnull:
 295         make_block_at(s.next_bci(), current);
 296         make_block_at(s.get_dest(), current);
 297         current = NULL;
 298         break;
 299 
 300       case Bytecodes::_goto:
 301         make_block_at(s.get_dest(), current);
 302         current = NULL;
 303         break;
 304 
 305       case Bytecodes::_goto_w:
 306         make_block_at(s.get_far_dest(), current);
 307         current = NULL;
 308         break;
 309 
 310       case Bytecodes::_jsr:
 311         handle_jsr(current, s.get_dest(), s.next_bci());
 312         current = NULL;
 313         break;
 314 
 315       case Bytecodes::_jsr_w:
 316         handle_jsr(current, s.get_far_dest(), s.next_bci());
 317         current = NULL;
 318         break;
 319 
 320       case Bytecodes::_tableswitch: {
 321         // set block for each case
 322         Bytecode_tableswitch sw(&s);
 323         int l = sw.length();
 324         for (int i = 0; i < l; i++) {
 325           make_block_at(cur_bci + sw.dest_offset_at(i), current);
 326         }
 327         make_block_at(cur_bci + sw.default_offset(), current);
 328         current = NULL;
 329         break;
 330       }
 331 
 332       case Bytecodes::_lookupswitch: {
 333         // set block for each case
 334         Bytecode_lookupswitch sw(&s);
 335         int l = sw.number_of_pairs();
 336         for (int i = 0; i < l; i++) {
 337           make_block_at(cur_bci + sw.pair_at(i).offset(), current);
 338         }
 339         make_block_at(cur_bci + sw.default_offset(), current);
 340         current = NULL;
 341         break;
 342       }
 343     }
 344   }
 345 }
 346 
 347 
 348 void BlockListBuilder::mark_loops() {
 349   ResourceMark rm;
 350 
 351   _active = BitMap(BlockBegin::number_of_blocks());         _active.clear();
 352   _visited = BitMap(BlockBegin::number_of_blocks());        _visited.clear();
 353   _loop_map = intArray(BlockBegin::number_of_blocks(), 0);
 354   _next_loop_index = 0;
 355   _next_block_number = _blocks.length();
 356 
 357   // recursively iterate the control flow graph
 358   mark_loops(_bci2block->at(0), false);
 359   assert(_next_block_number >= 0, "invalid block numbers");
 360 }
 361 
 362 void BlockListBuilder::make_loop_header(BlockBegin* block) {
 363   if (block->is_set(BlockBegin::exception_entry_flag)) {
 364     // exception edges may look like loops but don't mark them as such
 365     // since it screws up block ordering.
 366     return;
 367   }
 368   if (!block->is_set(BlockBegin::parser_loop_header_flag)) {
 369     block->set(BlockBegin::parser_loop_header_flag);
 370 
 371     assert(_loop_map.at(block->block_id()) == 0, "must not be set yet");
 372     assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer");
 373     _loop_map.at_put(block->block_id(), 1 << _next_loop_index);
 374     if (_next_loop_index < 31) _next_loop_index++;
 375   } else {
 376     // block already marked as loop header
 377     assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
 378   }
 379 }
 380 
 381 int BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) {
 382   int block_id = block->block_id();
 383 
 384   if (_visited.at(block_id)) {
 385     if (_active.at(block_id)) {
 386       // reached block via backward branch
 387       make_loop_header(block);
 388     }
 389     // return cached loop information for this block
 390     return _loop_map.at(block_id);
 391   }
 392 
 393   if (block->is_set(BlockBegin::subroutine_entry_flag)) {
 394     in_subroutine = true;
 395   }
 396 
 397   // set active and visited bits before successors are processed
 398   _visited.set_bit(block_id);
 399   _active.set_bit(block_id);
 400 
 401   intptr_t loop_state = 0;
 402   for (int i = block->number_of_sux() - 1; i >= 0; i--) {
 403     // recursively process all successors
 404     loop_state |= mark_loops(block->sux_at(i), in_subroutine);
 405   }
 406 
 407   // clear active-bit after all successors are processed
 408   _active.clear_bit(block_id);
 409 
 410   // reverse-post-order numbering of all blocks
 411   block->set_depth_first_number(_next_block_number);
 412   _next_block_number--;
 413 
 414   if (loop_state != 0 || in_subroutine ) {
 415     // block is contained at least in one loop, so phi functions are necessary
 416     // phi functions are also necessary for all locals stored in a subroutine
 417     scope()->requires_phi_function().set_union(block->stores_to_locals());
 418   }
 419 
 420   if (block->is_set(BlockBegin::parser_loop_header_flag)) {
 421     int header_loop_state = _loop_map.at(block_id);
 422     assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set");
 423 
 424     // If the highest bit is set (i.e. when integer value is negative), the method
 425     // has 32 or more loops. This bit is never cleared because it is used for multiple loops
 426     if (header_loop_state >= 0) {
 427       clear_bits(loop_state, header_loop_state);
 428     }
 429   }
 430 
 431   // cache and return loop information for this block
 432   _loop_map.at_put(block_id, loop_state);
 433   return loop_state;
 434 }
 435 
 436 
 437 #ifndef PRODUCT
 438 
 439 int compare_depth_first(BlockBegin** a, BlockBegin** b) {
 440   return (*a)->depth_first_number() - (*b)->depth_first_number();
 441 }
 442 
 443 void BlockListBuilder::print() {
 444   tty->print("----- initial block list of BlockListBuilder for method ");
 445   method()->print_short_name();
 446   tty->cr();
 447 
 448   // better readability if blocks are sorted in processing order
 449   _blocks.sort(compare_depth_first);
 450 
 451   for (int i = 0; i < _blocks.length(); i++) {
 452     BlockBegin* cur = _blocks.at(i);
 453     tty->print("%4d: B%-4d bci: %-4d  preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
 454 
 455     tty->print(cur->is_set(BlockBegin::std_entry_flag)               ? " std" : "    ");
 456     tty->print(cur->is_set(BlockBegin::osr_entry_flag)               ? " osr" : "    ");
 457     tty->print(cur->is_set(BlockBegin::exception_entry_flag)         ? " ex" : "   ");
 458     tty->print(cur->is_set(BlockBegin::subroutine_entry_flag)        ? " sr" : "   ");
 459     tty->print(cur->is_set(BlockBegin::parser_loop_header_flag)      ? " lh" : "   ");
 460 
 461     if (cur->number_of_sux() > 0) {
 462       tty->print("    sux: ");
 463       for (int j = 0; j < cur->number_of_sux(); j++) {
 464         BlockBegin* sux = cur->sux_at(j);
 465         tty->print("B%d ", sux->block_id());
 466       }
 467     }
 468     tty->cr();
 469   }
 470 }
 471 
 472 #endif
 473 
 474 
 475 // A simple growable array of Values indexed by ciFields
 476 class FieldBuffer: public CompilationResourceObj {
 477  private:
 478   GrowableArray<Value> _values;
 479 
 480  public:
 481   FieldBuffer() {}
 482 
 483   void kill() {
 484     _values.trunc_to(0);
 485   }
 486 
 487   Value at(ciField* field) {
 488     assert(field->holder()->is_loaded(), "must be a loaded field");
 489     int offset = field->offset();
 490     if (offset < _values.length()) {
 491       return _values.at(offset);
 492     } else {
 493       return NULL;
 494     }
 495   }
 496 
 497   void at_put(ciField* field, Value value) {
 498     assert(field->holder()->is_loaded(), "must be a loaded field");
 499     int offset = field->offset();
 500     _values.at_put_grow(offset, value, NULL);
 501   }
 502 
 503 };
 504 
 505 
 506 // MemoryBuffer is fairly simple model of the current state of memory.
 507 // It partitions memory into several pieces.  The first piece is
 508 // generic memory where little is known about the owner of the memory.
 509 // This is conceptually represented by the tuple <O, F, V> which says
 510 // that the field F of object O has value V.  This is flattened so
 511 // that F is represented by the offset of the field and the parallel
 512 // arrays _objects and _values are used for O and V.  Loads of O.F can
 513 // simply use V.  Newly allocated objects are kept in a separate list
 514 // along with a parallel array for each object which represents the
 515 // current value of its fields.  Stores of the default value to fields
 516 // which have never been stored to before are eliminated since they
 517 // are redundant.  Once newly allocated objects are stored into
 518 // another object or they are passed out of the current compile they
 519 // are treated like generic memory.
 520 
 521 class MemoryBuffer: public CompilationResourceObj {
 522  private:
 523   FieldBuffer                 _values;
 524   GrowableArray<Value>        _objects;
 525   GrowableArray<Value>        _newobjects;
 526   GrowableArray<FieldBuffer*> _fields;
 527 
 528  public:
 529   MemoryBuffer() {}
 530 
 531   StoreField* store(StoreField* st) {
 532     if (!EliminateFieldAccess) {
 533       return st;
 534     }
 535 
 536     Value object = st->obj();
 537     Value value = st->value();
 538     ciField* field = st->field();
 539     if (field->holder()->is_loaded()) {
 540       int offset = field->offset();
 541       int index = _newobjects.find(object);
 542       if (index != -1) {
 543         // newly allocated object with no other stores performed on this field
 544         FieldBuffer* buf = _fields.at(index);
 545         if (buf->at(field) == NULL && is_default_value(value)) {
 546 #ifndef PRODUCT
 547           if (PrintIRDuringConstruction && Verbose) {
 548             tty->print_cr("Eliminated store for object %d:", index);
 549             st->print_line();
 550           }
 551 #endif
 552           return NULL;
 553         } else {
 554           buf->at_put(field, value);
 555         }
 556       } else {
 557         _objects.at_put_grow(offset, object, NULL);
 558         _values.at_put(field, value);
 559       }
 560 
 561       store_value(value);
 562     } else {
 563       // if we held onto field names we could alias based on names but
 564       // we don't know what's being stored to so kill it all.
 565       kill();
 566     }
 567     return st;
 568   }
 569 
 570 
 571   // return true if this value correspond to the default value of a field.
 572   bool is_default_value(Value value) {
 573     Constant* con = value->as_Constant();
 574     if (con) {
 575       switch (con->type()->tag()) {
 576         case intTag:    return con->type()->as_IntConstant()->value() == 0;
 577         case longTag:   return con->type()->as_LongConstant()->value() == 0;
 578         case floatTag:  return jint_cast(con->type()->as_FloatConstant()->value()) == 0;
 579         case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0);
 580         case objectTag: return con->type() == objectNull;
 581         default:  ShouldNotReachHere();
 582       }
 583     }
 584     return false;
 585   }
 586 
 587 
 588   // return either the actual value of a load or the load itself
 589   Value load(LoadField* load) {
 590     if (!EliminateFieldAccess) {
 591       return load;
 592     }
 593 
 594     if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) {
 595       // can't skip load since value might get rounded as a side effect
 596       return load;
 597     }
 598 
 599     ciField* field = load->field();
 600     Value object   = load->obj();
 601     if (field->holder()->is_loaded() && !field->is_volatile()) {
 602       int offset = field->offset();
 603       Value result = NULL;
 604       int index = _newobjects.find(object);
 605       if (index != -1) {
 606         result = _fields.at(index)->at(field);
 607       } else if (_objects.at_grow(offset, NULL) == object) {
 608         result = _values.at(field);
 609       }
 610       if (result != NULL) {
 611 #ifndef PRODUCT
 612         if (PrintIRDuringConstruction && Verbose) {
 613           tty->print_cr("Eliminated load: ");
 614           load->print_line();
 615         }
 616 #endif
 617         assert(result->type()->tag() == load->type()->tag(), "wrong types");
 618         return result;
 619       }
 620     }
 621     return load;
 622   }
 623 
 624   // Record this newly allocated object
 625   void new_instance(NewInstance* object) {
 626     int index = _newobjects.length();
 627     _newobjects.append(object);
 628     if (_fields.at_grow(index, NULL) == NULL) {
 629       _fields.at_put(index, new FieldBuffer());
 630     } else {
 631       _fields.at(index)->kill();
 632     }
 633   }
 634 
 635   void store_value(Value value) {
 636     int index = _newobjects.find(value);
 637     if (index != -1) {
 638       // stored a newly allocated object into another object.
 639       // Assume we've lost track of it as separate slice of memory.
 640       // We could do better by keeping track of whether individual
 641       // fields could alias each other.
 642       _newobjects.remove_at(index);
 643       // pull out the field info and store it at the end up the list
 644       // of field info list to be reused later.
 645       _fields.append(_fields.at(index));
 646       _fields.remove_at(index);
 647     }
 648   }
 649 
 650   void kill() {
 651     _newobjects.trunc_to(0);
 652     _objects.trunc_to(0);
 653     _values.kill();
 654   }
 655 };
 656 
 657 
 658 // Implementation of GraphBuilder's ScopeData
 659 
 660 GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
 661   : _parent(parent)
 662   , _bci2block(NULL)
 663   , _scope(NULL)
 664   , _has_handler(false)
 665   , _stream(NULL)
 666   , _work_list(NULL)
 667   , _parsing_jsr(false)
 668   , _jsr_xhandlers(NULL)
 669   , _caller_stack_size(-1)
 670   , _continuation(NULL)
 671   , _num_returns(0)
 672   , _cleanup_block(NULL)
 673   , _cleanup_return_prev(NULL)
 674   , _cleanup_state(NULL)
 675 {
 676   if (parent != NULL) {
 677     _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
 678   } else {
 679     _max_inline_size = MaxInlineSize;
 680   }
 681   if (_max_inline_size < MaxTrivialSize) {
 682     _max_inline_size = MaxTrivialSize;
 683   }
 684 }
 685 
 686 
 687 void GraphBuilder::kill_all() {
 688   if (UseLocalValueNumbering) {
 689     vmap()->kill_all();
 690   }
 691   _memory->kill();
 692 }
 693 
 694 
 695 BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
 696   if (parsing_jsr()) {
 697     // It is necessary to clone all blocks associated with a
 698     // subroutine, including those for exception handlers in the scope
 699     // of the method containing the jsr (because those exception
 700     // handlers may contain ret instructions in some cases).
 701     BlockBegin* block = bci2block()->at(bci);
 702     if (block != NULL && block == parent()->bci2block()->at(bci)) {
 703       BlockBegin* new_block = new BlockBegin(block->bci());
 704 #ifndef PRODUCT
 705       if (PrintInitialBlockList) {
 706         tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
 707                       block->block_id(), block->bci(), new_block->block_id());
 708       }
 709 #endif
 710       // copy data from cloned blocked
 711       new_block->set_depth_first_number(block->depth_first_number());
 712       if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
 713       // Preserve certain flags for assertion checking
 714       if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
 715       if (block->is_set(BlockBegin::exception_entry_flag))  new_block->set(BlockBegin::exception_entry_flag);
 716 
 717       // copy was_visited_flag to allow early detection of bailouts
 718       // if a block that is used in a jsr has already been visited before,
 719       // it is shared between the normal control flow and a subroutine
 720       // BlockBegin::try_merge returns false when the flag is set, this leads
 721       // to a compilation bailout
 722       if (block->is_set(BlockBegin::was_visited_flag))  new_block->set(BlockBegin::was_visited_flag);
 723 
 724       bci2block()->at_put(bci, new_block);
 725       block = new_block;
 726     }
 727     return block;
 728   } else {
 729     return bci2block()->at(bci);
 730   }
 731 }
 732 
 733 
 734 XHandlers* GraphBuilder::ScopeData::xhandlers() const {
 735   if (_jsr_xhandlers == NULL) {
 736     assert(!parsing_jsr(), "");
 737     return scope()->xhandlers();
 738   }
 739   assert(parsing_jsr(), "");
 740   return _jsr_xhandlers;
 741 }
 742 
 743 
 744 void GraphBuilder::ScopeData::set_scope(IRScope* scope) {
 745   _scope = scope;
 746   bool parent_has_handler = false;
 747   if (parent() != NULL) {
 748     parent_has_handler = parent()->has_handler();
 749   }
 750   _has_handler = parent_has_handler || scope->xhandlers()->has_handlers();
 751 }
 752 
 753 
 754 void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block,
 755                                                       Instruction* return_prev,
 756                                                       ValueStack* return_state) {
 757   _cleanup_block       = block;
 758   _cleanup_return_prev = return_prev;
 759   _cleanup_state       = return_state;
 760 }
 761 
 762 
 763 void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) {
 764   if (_work_list == NULL) {
 765     _work_list = new BlockList();
 766   }
 767 
 768   if (!block->is_set(BlockBegin::is_on_work_list_flag)) {
 769     // Do not start parsing the continuation block while in a
 770     // sub-scope
 771     if (parsing_jsr()) {
 772       if (block == jsr_continuation()) {
 773         return;
 774       }
 775     } else {
 776       if (block == continuation()) {
 777         return;
 778       }
 779     }
 780     block->set(BlockBegin::is_on_work_list_flag);
 781     _work_list->push(block);
 782 
 783     sort_top_into_worklist(_work_list, block);
 784   }
 785 }
 786 
 787 
 788 void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) {
 789   assert(worklist->top() == top, "");
 790   // sort block descending into work list
 791   const int dfn = top->depth_first_number();
 792   assert(dfn != -1, "unknown depth first number");
 793   int i = worklist->length()-2;
 794   while (i >= 0) {
 795     BlockBegin* b = worklist->at(i);
 796     if (b->depth_first_number() < dfn) {
 797       worklist->at_put(i+1, b);
 798     } else {
 799       break;
 800     }
 801     i --;
 802   }
 803   if (i >= -1) worklist->at_put(i + 1, top);
 804 }
 805 
 806 
 807 BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
 808   if (is_work_list_empty()) {
 809     return NULL;
 810   }
 811   return _work_list->pop();
 812 }
 813 
 814 
 815 bool GraphBuilder::ScopeData::is_work_list_empty() const {
 816   return (_work_list == NULL || _work_list->length() == 0);
 817 }
 818 
 819 
 820 void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
 821   assert(parsing_jsr(), "");
 822   // clone all the exception handlers from the scope
 823   XHandlers* handlers = new XHandlers(scope()->xhandlers());
 824   const int n = handlers->length();
 825   for (int i = 0; i < n; i++) {
 826     // The XHandlers need to be adjusted to dispatch to the cloned
 827     // handler block instead of the default one but the synthetic
 828     // unlocker needs to be handled specially.  The synthetic unlocker
 829     // should be left alone since there can be only one and all code
 830     // should dispatch to the same one.
 831     XHandler* h = handlers->handler_at(i);
 832     assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
 833     h->set_entry_block(block_at(h->handler_bci()));
 834   }
 835   _jsr_xhandlers = handlers;
 836 }
 837 
 838 
 839 int GraphBuilder::ScopeData::num_returns() {
 840   if (parsing_jsr()) {
 841     return parent()->num_returns();
 842   }
 843   return _num_returns;
 844 }
 845 
 846 
 847 void GraphBuilder::ScopeData::incr_num_returns() {
 848   if (parsing_jsr()) {
 849     parent()->incr_num_returns();
 850   } else {
 851     ++_num_returns;
 852   }
 853 }
 854 
 855 
 856 // Implementation of GraphBuilder
 857 
 858 #define INLINE_BAILOUT(msg)        { inline_bailout(msg); return false; }
 859 
 860 
 861 void GraphBuilder::load_constant() {
 862   ciConstant con = stream()->get_constant();
 863   if (con.basic_type() == T_ILLEGAL) {
 864     BAILOUT("could not resolve a constant");
 865   } else {
 866     ValueType* t = illegalType;
 867     ValueStack* patch_state = NULL;
 868     switch (con.basic_type()) {
 869       case T_BOOLEAN: t = new IntConstant     (con.as_boolean()); break;
 870       case T_BYTE   : t = new IntConstant     (con.as_byte   ()); break;
 871       case T_CHAR   : t = new IntConstant     (con.as_char   ()); break;
 872       case T_SHORT  : t = new IntConstant     (con.as_short  ()); break;
 873       case T_INT    : t = new IntConstant     (con.as_int    ()); break;
 874       case T_LONG   : t = new LongConstant    (con.as_long   ()); break;
 875       case T_FLOAT  : t = new FloatConstant   (con.as_float  ()); break;
 876       case T_DOUBLE : t = new DoubleConstant  (con.as_double ()); break;
 877       case T_ARRAY  : t = new ArrayConstant   (con.as_object ()->as_array   ()); break;
 878       case T_OBJECT :
 879        {
 880         ciObject* obj = con.as_object();
 881         if (!obj->is_loaded()
 882             || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
 883           patch_state = copy_state_before();
 884           t = new ObjectConstant(obj);
 885         } else {
 886           assert(!obj->is_klass(), "must be java_mirror of klass");
 887           t = new InstanceConstant(obj->as_instance());
 888         }
 889         break;
 890        }
 891       default       : ShouldNotReachHere();
 892     }
 893     Value x;
 894     if (patch_state != NULL) {
 895       x = new Constant(t, patch_state);
 896     } else {
 897       x = new Constant(t);
 898     }
 899     push(t, append(x));
 900   }
 901 }
 902 
 903 
 904 void GraphBuilder::load_local(ValueType* type, int index) {
 905   Value x = state()->local_at(index);
 906   assert(x != NULL && !x->type()->is_illegal(), "access of illegal local variable");
 907   push(type, x);
 908 }
 909 
 910 
 911 void GraphBuilder::store_local(ValueType* type, int index) {
 912   Value x = pop(type);
 913   store_local(state(), x, type, index);
 914 }
 915 
 916 
 917 void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) {
 918   if (parsing_jsr()) {
 919     // We need to do additional tracking of the location of the return
 920     // address for jsrs since we don't handle arbitrary jsr/ret
 921     // constructs. Here we are figuring out in which circumstances we
 922     // need to bail out.
 923     if (x->type()->is_address()) {
 924       scope_data()->set_jsr_return_address_local(index);
 925 
 926       // Also check parent jsrs (if any) at this time to see whether
 927       // they are using this local. We don't handle skipping over a
 928       // ret.
 929       for (ScopeData* cur_scope_data = scope_data()->parent();
 930            cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
 931            cur_scope_data = cur_scope_data->parent()) {
 932         if (cur_scope_data->jsr_return_address_local() == index) {
 933           BAILOUT("subroutine overwrites return address from previous subroutine");
 934         }
 935       }
 936     } else if (index == scope_data()->jsr_return_address_local()) {
 937       scope_data()->set_jsr_return_address_local(-1);
 938     }
 939   }
 940 
 941   state->store_local(index, round_fp(x));
 942 }
 943 
 944 
 945 void GraphBuilder::load_indexed(BasicType type) {
 946   ValueStack* state_before = copy_state_for_exception();
 947   Value index = ipop();
 948   Value array = apop();
 949   Value length = NULL;
 950   if (CSEArrayLength ||
 951       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
 952       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
 953     length = append(new ArrayLength(array, state_before));
 954   }
 955   push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before)));
 956 }
 957 
 958 
 959 void GraphBuilder::store_indexed(BasicType type) {
 960   ValueStack* state_before = copy_state_for_exception();
 961   Value value = pop(as_ValueType(type));
 962   Value index = ipop();
 963   Value array = apop();
 964   Value length = NULL;
 965   if (CSEArrayLength ||
 966       (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
 967       (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
 968     length = append(new ArrayLength(array, state_before));
 969   }
 970   StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before);
 971   append(result);
 972   _memory->store_value(value);
 973 
 974   if (type == T_OBJECT && is_profiling()) {
 975     // Note that we'd collect profile data in this method if we wanted it.
 976     compilation()->set_would_profile(true);
 977 
 978     if (profile_checkcasts()) {
 979       result->set_profiled_method(method());
 980       result->set_profiled_bci(bci());
 981       result->set_should_profile(true);
 982     }
 983   }
 984 }
 985 
 986 
 987 void GraphBuilder::stack_op(Bytecodes::Code code) {
 988   switch (code) {
 989     case Bytecodes::_pop:
 990       { state()->raw_pop();
 991       }
 992       break;
 993     case Bytecodes::_pop2:
 994       { state()->raw_pop();
 995         state()->raw_pop();
 996       }
 997       break;
 998     case Bytecodes::_dup:
 999       { Value w = state()->raw_pop();
1000         state()->raw_push(w);
1001         state()->raw_push(w);
1002       }
1003       break;
1004     case Bytecodes::_dup_x1:
1005       { Value w1 = state()->raw_pop();
1006         Value w2 = state()->raw_pop();
1007         state()->raw_push(w1);
1008         state()->raw_push(w2);
1009         state()->raw_push(w1);
1010       }
1011       break;
1012     case Bytecodes::_dup_x2:
1013       { Value w1 = state()->raw_pop();
1014         Value w2 = state()->raw_pop();
1015         Value w3 = state()->raw_pop();
1016         state()->raw_push(w1);
1017         state()->raw_push(w3);
1018         state()->raw_push(w2);
1019         state()->raw_push(w1);
1020       }
1021       break;
1022     case Bytecodes::_dup2:
1023       { Value w1 = state()->raw_pop();
1024         Value w2 = state()->raw_pop();
1025         state()->raw_push(w2);
1026         state()->raw_push(w1);
1027         state()->raw_push(w2);
1028         state()->raw_push(w1);
1029       }
1030       break;
1031     case Bytecodes::_dup2_x1:
1032       { Value w1 = state()->raw_pop();
1033         Value w2 = state()->raw_pop();
1034         Value w3 = state()->raw_pop();
1035         state()->raw_push(w2);
1036         state()->raw_push(w1);
1037         state()->raw_push(w3);
1038         state()->raw_push(w2);
1039         state()->raw_push(w1);
1040       }
1041       break;
1042     case Bytecodes::_dup2_x2:
1043       { Value w1 = state()->raw_pop();
1044         Value w2 = state()->raw_pop();
1045         Value w3 = state()->raw_pop();
1046         Value w4 = state()->raw_pop();
1047         state()->raw_push(w2);
1048         state()->raw_push(w1);
1049         state()->raw_push(w4);
1050         state()->raw_push(w3);
1051         state()->raw_push(w2);
1052         state()->raw_push(w1);
1053       }
1054       break;
1055     case Bytecodes::_swap:
1056       { Value w1 = state()->raw_pop();
1057         Value w2 = state()->raw_pop();
1058         state()->raw_push(w1);
1059         state()->raw_push(w2);
1060       }
1061       break;
1062     default:
1063       ShouldNotReachHere();
1064       break;
1065   }
1066 }
1067 
1068 
1069 void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) {
1070   Value y = pop(type);
1071   Value x = pop(type);
1072   // NOTE: strictfp can be queried from current method since we don't
1073   // inline methods with differing strictfp bits
1074   Value res = new ArithmeticOp(code, x, y, method()->is_strict(), state_before);
1075   // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
1076   res = append(res);
1077   if (method()->is_strict()) {
1078     res = round_fp(res);
1079   }
1080   push(type, res);
1081 }
1082 
1083 
1084 void GraphBuilder::negate_op(ValueType* type) {
1085   push(type, append(new NegateOp(pop(type))));
1086 }
1087 
1088 
1089 void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) {
1090   Value s = ipop();
1091   Value x = pop(type);
1092   // try to simplify
1093   // Note: This code should go into the canonicalizer as soon as it can
1094   //       can handle canonicalized forms that contain more than one node.
1095   if (CanonicalizeNodes && code == Bytecodes::_iushr) {
1096     // pattern: x >>> s
1097     IntConstant* s1 = s->type()->as_IntConstant();
1098     if (s1 != NULL) {
1099       // pattern: x >>> s1, with s1 constant
1100       ShiftOp* l = x->as_ShiftOp();
1101       if (l != NULL && l->op() == Bytecodes::_ishl) {
1102         // pattern: (a << b) >>> s1
1103         IntConstant* s0 = l->y()->type()->as_IntConstant();
1104         if (s0 != NULL) {
1105           // pattern: (a << s0) >>> s1
1106           const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts
1107           const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts
1108           if (s0c == s1c) {
1109             if (s0c == 0) {
1110               // pattern: (a << 0) >>> 0 => simplify to: a
1111               ipush(l->x());
1112             } else {
1113               // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
1114               assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases");
1115               const int m = (1 << (BitsPerInt - s0c)) - 1;
1116               Value s = append(new Constant(new IntConstant(m)));
1117               ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s)));
1118             }
1119             return;
1120           }
1121         }
1122       }
1123     }
1124   }
1125   // could not simplify
1126   push(type, append(new ShiftOp(code, x, s)));
1127 }
1128 
1129 
1130 void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) {
1131   Value y = pop(type);
1132   Value x = pop(type);
1133   push(type, append(new LogicOp(code, x, y)));
1134 }
1135 
1136 
1137 void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
1138   ValueStack* state_before = copy_state_before();
1139   Value y = pop(type);
1140   Value x = pop(type);
1141   ipush(append(new CompareOp(code, x, y, state_before)));
1142 }
1143 
1144 
1145 void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) {
1146   push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to))));
1147 }
1148 
1149 
1150 void GraphBuilder::increment() {
1151   int index = stream()->get_index();
1152   int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
1153   load_local(intType, index);
1154   ipush(append(new Constant(new IntConstant(delta))));
1155   arithmetic_op(intType, Bytecodes::_iadd);
1156   store_local(intType, index);
1157 }
1158 
1159 
1160 void GraphBuilder::_goto(int from_bci, int to_bci) {
1161   Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
1162   if (is_profiling()) {
1163     compilation()->set_would_profile(true);
1164   }
1165   if (profile_branches()) {
1166     x->set_profiled_method(method());
1167     x->set_profiled_bci(bci());
1168     x->set_should_profile(true);
1169   }
1170   append(x);
1171 }
1172 
1173 
1174 void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
1175   BlockBegin* tsux = block_at(stream()->get_dest());
1176   BlockBegin* fsux = block_at(stream()->next_bci());
1177   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
1178   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
1179 
1180   if (is_profiling()) {
1181     If* if_node = i->as_If();
1182     if (if_node != NULL) {
1183       // Note that we'd collect profile data in this method if we wanted it.
1184       compilation()->set_would_profile(true);
1185       // At level 2 we need the proper bci to count backedges
1186       if_node->set_profiled_bci(bci());
1187       if (profile_branches()) {
1188         // Successors can be rotated by the canonicalizer, check for this case.
1189         if_node->set_profiled_method(method());
1190         if_node->set_should_profile(true);
1191         if (if_node->tsux() == fsux) {
1192           if_node->set_swapped(true);
1193         }
1194       }
1195       return;
1196     }
1197 
1198     // Check if this If was reduced to Goto.
1199     Goto *goto_node = i->as_Goto();
1200     if (goto_node != NULL) {
1201       compilation()->set_would_profile(true);
1202       if (profile_branches()) {
1203         goto_node->set_profiled_method(method());
1204         goto_node->set_profiled_bci(bci());
1205         goto_node->set_should_profile(true);
1206         // Find out which successor is used.
1207         if (goto_node->default_sux() == tsux) {
1208           goto_node->set_direction(Goto::taken);
1209         } else if (goto_node->default_sux() == fsux) {
1210           goto_node->set_direction(Goto::not_taken);
1211         } else {
1212           ShouldNotReachHere();
1213         }
1214       }
1215       return;
1216     }
1217   }
1218 }
1219 
1220 
1221 void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
1222   Value y = append(new Constant(intZero));
1223   ValueStack* state_before = copy_state_before();
1224   Value x = ipop();
1225   if_node(x, cond, y, state_before);
1226 }
1227 
1228 
1229 void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
1230   Value y = append(new Constant(objectNull));
1231   ValueStack* state_before = copy_state_before();
1232   Value x = apop();
1233   if_node(x, cond, y, state_before);
1234 }
1235 
1236 
1237 void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
1238   ValueStack* state_before = copy_state_before();
1239   Value y = pop(type);
1240   Value x = pop(type);
1241   if_node(x, cond, y, state_before);
1242 }
1243 
1244 
1245 void GraphBuilder::jsr(int dest) {
1246   // We only handle well-formed jsrs (those which are "block-structured").
1247   // If the bytecodes are strange (jumping out of a jsr block) then we
1248   // might end up trying to re-parse a block containing a jsr which
1249   // has already been activated. Watch for this case and bail out.
1250   for (ScopeData* cur_scope_data = scope_data();
1251        cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
1252        cur_scope_data = cur_scope_data->parent()) {
1253     if (cur_scope_data->jsr_entry_bci() == dest) {
1254       BAILOUT("too-complicated jsr/ret structure");
1255     }
1256   }
1257 
1258   push(addressType, append(new Constant(new AddressConstant(next_bci()))));
1259   if (!try_inline_jsr(dest)) {
1260     return; // bailed out while parsing and inlining subroutine
1261   }
1262 }
1263 
1264 
1265 void GraphBuilder::ret(int local_index) {
1266   if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
1267 
1268   if (local_index != scope_data()->jsr_return_address_local()) {
1269     BAILOUT("can not handle complicated jsr/ret constructs");
1270   }
1271 
1272   // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
1273   append(new Goto(scope_data()->jsr_continuation(), false));
1274 }
1275 
1276 
1277 void GraphBuilder::table_switch() {
1278   Bytecode_tableswitch sw(stream());
1279   const int l = sw.length();
1280   if (CanonicalizeNodes && l == 1) {
1281     // total of 2 successors => use If instead of switch
1282     // Note: This code should go into the canonicalizer as soon as it can
1283     //       can handle canonicalized forms that contain more than one node.
1284     Value key = append(new Constant(new IntConstant(sw.low_key())));
1285     BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
1286     BlockBegin* fsux = block_at(bci() + sw.default_offset());
1287     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
1288     ValueStack* state_before = is_bb ? copy_state_before() : NULL;
1289     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
1290   } else {
1291     // collect successors
1292     BlockList* sux = new BlockList(l + 1, NULL);
1293     int i;
1294     bool has_bb = false;
1295     for (i = 0; i < l; i++) {
1296       sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
1297       if (sw.dest_offset_at(i) < 0) has_bb = true;
1298     }
1299     // add default successor
1300     sux->at_put(i, block_at(bci() + sw.default_offset()));
1301     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
1302     append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
1303   }
1304 }
1305 
1306 
1307 void GraphBuilder::lookup_switch() {
1308   Bytecode_lookupswitch sw(stream());
1309   const int l = sw.number_of_pairs();
1310   if (CanonicalizeNodes && l == 1) {
1311     // total of 2 successors => use If instead of switch
1312     // Note: This code should go into the canonicalizer as soon as it can
1313     //       can handle canonicalized forms that contain more than one node.
1314     // simplify to If
1315     LookupswitchPair pair = sw.pair_at(0);
1316     Value key = append(new Constant(new IntConstant(pair.match())));
1317     BlockBegin* tsux = block_at(bci() + pair.offset());
1318     BlockBegin* fsux = block_at(bci() + sw.default_offset());
1319     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
1320     ValueStack* state_before = is_bb ? copy_state_before() : NULL;
1321     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
1322   } else {
1323     // collect successors & keys
1324     BlockList* sux = new BlockList(l + 1, NULL);
1325     intArray* keys = new intArray(l, 0);
1326     int i;
1327     bool has_bb = false;
1328     for (i = 0; i < l; i++) {
1329       LookupswitchPair pair = sw.pair_at(i);
1330       if (pair.offset() < 0) has_bb = true;
1331       sux->at_put(i, block_at(bci() + pair.offset()));
1332       keys->at_put(i, pair.match());
1333     }
1334     // add default successor
1335     sux->at_put(i, block_at(bci() + sw.default_offset()));
1336     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
1337     append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
1338   }
1339 }
1340 
1341 void GraphBuilder::call_register_finalizer() {
1342   // If the receiver requires finalization then emit code to perform
1343   // the registration on return.
1344 
1345   // Gather some type information about the receiver
1346   Value receiver = state()->local_at(0);
1347   assert(receiver != NULL, "must have a receiver");
1348   ciType* declared_type = receiver->declared_type();
1349   ciType* exact_type = receiver->exact_type();
1350   if (exact_type == NULL &&
1351       receiver->as_Local() &&
1352       receiver->as_Local()->java_index() == 0) {
1353     ciInstanceKlass* ik = compilation()->method()->holder();
1354     if (ik->is_final()) {
1355       exact_type = ik;
1356     } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
1357       // test class is leaf class
1358       compilation()->dependency_recorder()->assert_leaf_type(ik);
1359       exact_type = ik;
1360     } else {
1361       declared_type = ik;
1362     }
1363   }
1364 
1365   // see if we know statically that registration isn't required
1366   bool needs_check = true;
1367   if (exact_type != NULL) {
1368     needs_check = exact_type->as_instance_klass()->has_finalizer();
1369   } else if (declared_type != NULL) {
1370     ciInstanceKlass* ik = declared_type->as_instance_klass();
1371     if (!Dependencies::has_finalizable_subclass(ik)) {
1372       compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik);
1373       needs_check = false;
1374     }
1375   }
1376 
1377   if (needs_check) {
1378     // Perform the registration of finalizable objects.
1379     ValueStack* state_before = copy_state_for_exception();
1380     load_local(objectType, 0);
1381     append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1382                                state()->pop_arguments(1),
1383                                true, state_before, true));
1384   }
1385 }
1386 
1387 
1388 void GraphBuilder::method_return(Value x) {
1389   if (RegisterFinalizersAtInit &&
1390       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1391     call_register_finalizer();
1392   }
1393 
1394   // Check to see whether we are inlining. If so, Return
1395   // instructions become Gotos to the continuation point.
1396   if (continuation() != NULL) {
1397     assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
1398 
1399     if (compilation()->env()->dtrace_method_probes()) {
1400       // Report exit from inline methods
1401       Values* args = new Values(1);
1402       args->push(append(new Constant(new ObjectConstant(method()))));
1403       append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
1404     }
1405 
1406     // If the inlined method is synchronized, the monitor must be
1407     // released before we jump to the continuation block.
1408     if (method()->is_synchronized()) {
1409       assert(state()->locks_size() == 1, "receiver must be locked here");
1410       monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
1411     }
1412 
1413     // State at end of inlined method is the state of the caller
1414     // without the method parameters on stack, including the
1415     // return value, if any, of the inlined method on operand stack.
1416     set_state(state()->caller_state()->copy_for_parsing());
1417     if (x != NULL) {
1418       state()->push(x->type(), x);
1419     }
1420     Goto* goto_callee = new Goto(continuation(), false);
1421 
1422     // See whether this is the first return; if so, store off some
1423     // of the state for later examination
1424     if (num_returns() == 0) {
1425       set_inline_cleanup_info(_block, _last, state());
1426     }
1427 
1428     // The current bci() is in the wrong scope, so use the bci() of
1429     // the continuation point.
1430     append_with_bci(goto_callee, scope_data()->continuation()->bci());
1431     incr_num_returns();
1432 
1433     return;
1434   }
1435 
1436   state()->truncate_stack(0);
1437   if (method()->is_synchronized()) {
1438     // perform the unlocking before exiting the method
1439     Value receiver;
1440     if (!method()->is_static()) {
1441       receiver = _initial_state->local_at(0);
1442     } else {
1443       receiver = append(new Constant(new ClassConstant(method()->holder())));
1444     }
1445     append_split(new MonitorExit(receiver, state()->unlock()));
1446   }
1447 
1448   append(new Return(x));
1449 }
1450 
1451 
1452 void GraphBuilder::access_field(Bytecodes::Code code) {
1453   bool will_link;
1454   ciField* field = stream()->get_field(will_link);
1455   ciInstanceKlass* holder = field->holder();
1456   BasicType field_type = field->type()->basic_type();
1457   ValueType* type = as_ValueType(field_type);
1458   // call will_link again to determine if the field is valid.
1459   const bool needs_patching = !holder->is_loaded() ||
1460                               !field->will_link(method()->holder(), code) ||
1461                               PatchALot;
1462 
1463   ValueStack* state_before = NULL;
1464   if (!holder->is_initialized() || needs_patching) {
1465     // save state before instruction for debug info when
1466     // deoptimization happens during patching
1467     state_before = copy_state_before();
1468   }
1469 
1470   Value obj = NULL;
1471   if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1472     if (state_before != NULL) {
1473       // build a patching constant
1474       obj = new Constant(new ClassConstant(holder), state_before);
1475     } else {
1476       obj = new Constant(new ClassConstant(holder));
1477     }
1478   }
1479 
1480 
1481   const int offset = !needs_patching ? field->offset() : -1;
1482   switch (code) {
1483     case Bytecodes::_getstatic: {
1484       // check for compile-time constants, i.e., initialized static final fields
1485       Instruction* constant = NULL;
1486       if (field->is_constant() && !PatchALot) {
1487         ciConstant field_val = field->constant_value();
1488         BasicType field_type = field_val.basic_type();
1489         switch (field_type) {
1490         case T_ARRAY:
1491         case T_OBJECT:
1492           if (field_val.as_object()->should_be_constant()) {
1493             constant =  new Constant(as_ValueType(field_val));
1494           }
1495           break;
1496 
1497         default:
1498           constant = new Constant(as_ValueType(field_val));
1499         }
1500       }
1501       if (constant != NULL) {
1502         push(type, append(constant));
1503       } else {
1504         if (state_before == NULL) {
1505           state_before = copy_state_for_exception();
1506         }
1507         push(type, append(new LoadField(append(obj), offset, field, true,
1508                                         state_before, needs_patching)));
1509       }
1510       break;
1511     }
1512     case Bytecodes::_putstatic:
1513       { Value val = pop(type);
1514         if (state_before == NULL) {
1515           state_before = copy_state_for_exception();
1516         }
1517         append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
1518       }
1519       break;
1520     case Bytecodes::_getfield :
1521       {
1522         if (state_before == NULL) {
1523           state_before = copy_state_for_exception();
1524         }
1525         LoadField* load = new LoadField(apop(), offset, field, false, state_before, needs_patching);
1526         Value replacement = !needs_patching ? _memory->load(load) : load;
1527         if (replacement != load) {
1528           assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
1529           push(type, replacement);
1530         } else {
1531           push(type, append(load));
1532         }
1533         break;
1534       }
1535 
1536     case Bytecodes::_putfield :
1537       { Value val = pop(type);
1538         if (state_before == NULL) {
1539           state_before = copy_state_for_exception();
1540         }
1541         StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, needs_patching);
1542         if (!needs_patching) store = _memory->store(store);
1543         if (store != NULL) {
1544           append(store);
1545         }
1546       }
1547       break;
1548     default                   :
1549       ShouldNotReachHere();
1550       break;
1551   }
1552 }
1553 
1554 
1555 Dependencies* GraphBuilder::dependency_recorder() const {
1556   assert(DeoptC1, "need debug information");
1557   return compilation()->dependency_recorder();
1558 }
1559 
1560 
1561 void GraphBuilder::invoke(Bytecodes::Code code) {
1562   bool will_link;
1563   ciMethod* target = stream()->get_method(will_link);
1564   // we have to make sure the argument size (incl. the receiver)
1565   // is correct for compilation (the call would fail later during
1566   // linkage anyway) - was bug (gri 7/28/99)
1567   if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error");
1568   ciInstanceKlass* klass = target->holder();
1569 
1570   // check if CHA possible: if so, change the code to invoke_special
1571   ciInstanceKlass* calling_klass = method()->holder();
1572   ciKlass* holder = stream()->get_declared_method_holder();
1573   ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
1574   ciInstanceKlass* actual_recv = callee_holder;
1575 
1576   // some methods are obviously bindable without any type checks so
1577   // convert them directly to an invokespecial.
1578   if (target->is_loaded() && !target->is_abstract() &&
1579       target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) {
1580     code = Bytecodes::_invokespecial;
1581   }
1582 
1583   // NEEDS_CLEANUP
1584   // I've added the target-is_loaded() test below but I don't really understand
1585   // how klass->is_loaded() can be true and yet target->is_loaded() is false.
1586   // this happened while running the JCK invokevirtual tests under doit.  TKR
1587   ciMethod* cha_monomorphic_target = NULL;
1588   ciMethod* exact_target = NULL;
1589   if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
1590       !target->is_method_handle_invoke()) {
1591     Value receiver = NULL;
1592     ciInstanceKlass* receiver_klass = NULL;
1593     bool type_is_exact = false;
1594     // try to find a precise receiver type
1595     if (will_link && !target->is_static()) {
1596       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
1597       receiver = state()->stack_at(index);
1598       ciType* type = receiver->exact_type();
1599       if (type != NULL && type->is_loaded() &&
1600           type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
1601         receiver_klass = (ciInstanceKlass*) type;
1602         type_is_exact = true;
1603       }
1604       if (type == NULL) {
1605         type = receiver->declared_type();
1606         if (type != NULL && type->is_loaded() &&
1607             type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
1608           receiver_klass = (ciInstanceKlass*) type;
1609           if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) {
1610             // Insert a dependency on this type since
1611             // find_monomorphic_target may assume it's already done.
1612             dependency_recorder()->assert_leaf_type(receiver_klass);
1613             type_is_exact = true;
1614           }
1615         }
1616       }
1617     }
1618     if (receiver_klass != NULL && type_is_exact &&
1619         receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) {
1620       // If we have the exact receiver type we can bind directly to
1621       // the method to call.
1622       exact_target = target->resolve_invoke(calling_klass, receiver_klass);
1623       if (exact_target != NULL) {
1624         target = exact_target;
1625         code = Bytecodes::_invokespecial;
1626       }
1627     }
1628     if (receiver_klass != NULL &&
1629         receiver_klass->is_subtype_of(actual_recv) &&
1630         actual_recv->is_initialized()) {
1631       actual_recv = receiver_klass;
1632     }
1633 
1634     if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) ||
1635         (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
1636       // Use CHA on the receiver to select a more precise method.
1637       cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
1638     } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != NULL) {
1639       // if there is only one implementor of this interface then we
1640       // may be able bind this invoke directly to the implementing
1641       // klass but we need both a dependence on the single interface
1642       // and on the method we bind to.  Additionally since all we know
1643       // about the receiver type is the it's supposed to implement the
1644       // interface we have to insert a check that it's the class we
1645       // expect.  Interface types are not checked by the verifier so
1646       // they are roughly equivalent to Object.
1647       ciInstanceKlass* singleton = NULL;
1648       if (target->holder()->nof_implementors() == 1) {
1649         singleton = target->holder()->implementor(0);
1650       }
1651       if (singleton) {
1652         cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
1653         if (cha_monomorphic_target != NULL) {
1654           // If CHA is able to bind this invoke then update the class
1655           // to match that class, otherwise klass will refer to the
1656           // interface.
1657           klass = cha_monomorphic_target->holder();
1658           actual_recv = target->holder();
1659 
1660           // insert a check it's really the expected class.
1661           CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception());
1662           c->set_incompatible_class_change_check();
1663           c->set_direct_compare(klass->is_final());
1664           append_split(c);
1665         }
1666       }
1667     }
1668   }
1669 
1670   if (cha_monomorphic_target != NULL) {
1671     if (cha_monomorphic_target->is_abstract()) {
1672       // Do not optimize for abstract methods
1673       cha_monomorphic_target = NULL;
1674     }
1675   }
1676 
1677   if (cha_monomorphic_target != NULL) {
1678     if (!(target->is_final_method())) {
1679       // If we inlined because CHA revealed only a single target method,
1680       // then we are dependent on that target method not getting overridden
1681       // by dynamic class loading.  Be sure to test the "static" receiver
1682       // dest_method here, as opposed to the actual receiver, which may
1683       // falsely lead us to believe that the receiver is final or private.
1684       dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target);
1685     }
1686     code = Bytecodes::_invokespecial;
1687   }
1688   // check if we could do inlining
1689   if (!PatchALot && Inline && klass->is_loaded() &&
1690       (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
1691       && target->will_link(klass, callee_holder, code)) {
1692     // callee is known => check if we have static binding
1693     assert(target->is_loaded(), "callee must be known");
1694     if (code == Bytecodes::_invokestatic
1695      || code == Bytecodes::_invokespecial
1696      || code == Bytecodes::_invokevirtual && target->is_final_method()
1697     ) {
1698       // static binding => check if callee is ok
1699       ciMethod* inline_target = (cha_monomorphic_target != NULL)
1700                                   ? cha_monomorphic_target
1701                                   : target;
1702       bool res = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
1703       CHECK_BAILOUT();
1704 
1705 #ifndef PRODUCT
1706       // printing
1707       if (PrintInlining && !res) {
1708         // if it was successfully inlined, then it was already printed.
1709         print_inline_result(inline_target, res);
1710       }
1711 #endif
1712       clear_inline_bailout();
1713       if (res) {
1714         // Register dependence if JVMTI has either breakpoint
1715         // setting or hotswapping of methods capabilities since they may
1716         // cause deoptimization.
1717         if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) {
1718           dependency_recorder()->assert_evol_method(inline_target);
1719         }
1720         return;
1721       }
1722     }
1723   }
1724   // If we attempted an inline which did not succeed because of a
1725   // bailout during construction of the callee graph, the entire
1726   // compilation has to be aborted. This is fairly rare and currently
1727   // seems to only occur for jasm-generated classes which contain
1728   // jsr/ret pairs which are not associated with finally clauses and
1729   // do not have exception handlers in the containing method, and are
1730   // therefore not caught early enough to abort the inlining without
1731   // corrupting the graph. (We currently bail out with a non-empty
1732   // stack at a ret in these situations.)
1733   CHECK_BAILOUT();
1734 
1735   // inlining not successful => standard invoke
1736   bool is_loaded = target->is_loaded();
1737   bool has_receiver =
1738     code == Bytecodes::_invokespecial   ||
1739     code == Bytecodes::_invokevirtual   ||
1740     code == Bytecodes::_invokeinterface;
1741   bool is_invokedynamic = code == Bytecodes::_invokedynamic;
1742   ValueType* result_type = as_ValueType(target->return_type());
1743 
1744   // We require the debug info to be the "state before" because
1745   // invokedynamics may deoptimize.
1746   ValueStack* state_before = is_invokedynamic ? copy_state_before() : copy_state_exhandling();
1747 
1748   Values* args = state()->pop_arguments(target->arg_size_no_receiver());
1749   Value recv = has_receiver ? apop() : NULL;
1750   int vtable_index = methodOopDesc::invalid_vtable_index;
1751 
1752 #ifdef SPARC
1753   // Currently only supported on Sparc.
1754   // The UseInlineCaches only controls dispatch to invokevirtuals for
1755   // loaded classes which we weren't able to statically bind.
1756   if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
1757       && !target->can_be_statically_bound()) {
1758     // Find a vtable index if one is available
1759     vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
1760   }
1761 #endif
1762 
1763   if (recv != NULL &&
1764       (code == Bytecodes::_invokespecial ||
1765        !is_loaded || target->is_final())) {
1766     // invokespecial always needs a NULL check.  invokevirtual where
1767     // the target is final or where it's not known that whether the
1768     // target is final requires a NULL check.  Otherwise normal
1769     // invokevirtual will perform the null check during the lookup
1770     // logic or the unverified entry point.  Profiling of calls
1771     // requires that the null check is performed in all cases.
1772     null_check(recv);
1773   }
1774 
1775   if (is_profiling()) {
1776     if (recv != NULL && profile_calls()) {
1777       null_check(recv);
1778     }
1779     // Note that we'd collect profile data in this method if we wanted it.
1780     compilation()->set_would_profile(true);
1781 
1782     if (profile_calls()) {
1783       assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
1784       ciKlass* target_klass = NULL;
1785       if (cha_monomorphic_target != NULL) {
1786         target_klass = cha_monomorphic_target->holder();
1787       } else if (exact_target != NULL) {
1788         target_klass = exact_target->holder();
1789       }
1790       profile_call(recv, target_klass);
1791     }
1792   }
1793 
1794   Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
1795   // push result
1796   append_split(result);
1797 
1798   if (result_type != voidType) {
1799     if (method()->is_strict()) {
1800       push(result_type, round_fp(result));
1801     } else {
1802       push(result_type, result);
1803     }
1804   }
1805 }
1806 
1807 
1808 void GraphBuilder::new_instance(int klass_index) {
1809   ValueStack* state_before = copy_state_exhandling();
1810   bool will_link;
1811   ciKlass* klass = stream()->get_klass(will_link);
1812   assert(klass->is_instance_klass(), "must be an instance klass");
1813   NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before);
1814   _memory->new_instance(new_instance);
1815   apush(append_split(new_instance));
1816 }
1817 
1818 
1819 void GraphBuilder::new_type_array() {
1820   ValueStack* state_before = copy_state_exhandling();
1821   apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before)));
1822 }
1823 
1824 
1825 void GraphBuilder::new_object_array() {
1826   bool will_link;
1827   ciKlass* klass = stream()->get_klass(will_link);
1828   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
1829   NewArray* n = new NewObjectArray(klass, ipop(), state_before);
1830   apush(append_split(n));
1831 }
1832 
1833 
1834 bool GraphBuilder::direct_compare(ciKlass* k) {
1835   if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) {
1836     ciInstanceKlass* ik = k->as_instance_klass();
1837     if (ik->is_final()) {
1838       return true;
1839     } else {
1840       if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
1841         // test class is leaf class
1842         dependency_recorder()->assert_leaf_type(ik);
1843         return true;
1844       }
1845     }
1846   }
1847   return false;
1848 }
1849 
1850 
1851 void GraphBuilder::check_cast(int klass_index) {
1852   bool will_link;
1853   ciKlass* klass = stream()->get_klass(will_link);
1854   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception();
1855   CheckCast* c = new CheckCast(klass, apop(), state_before);
1856   apush(append_split(c));
1857   c->set_direct_compare(direct_compare(klass));
1858 
1859   if (is_profiling()) {
1860     // Note that we'd collect profile data in this method if we wanted it.
1861     compilation()->set_would_profile(true);
1862 
1863     if (profile_checkcasts()) {
1864       c->set_profiled_method(method());
1865       c->set_profiled_bci(bci());
1866       c->set_should_profile(true);
1867     }
1868   }
1869 }
1870 
1871 
1872 void GraphBuilder::instance_of(int klass_index) {
1873   bool will_link;
1874   ciKlass* klass = stream()->get_klass(will_link);
1875   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
1876   InstanceOf* i = new InstanceOf(klass, apop(), state_before);
1877   ipush(append_split(i));
1878   i->set_direct_compare(direct_compare(klass));
1879 
1880   if (is_profiling()) {
1881     // Note that we'd collect profile data in this method if we wanted it.
1882     compilation()->set_would_profile(true);
1883 
1884     if (profile_checkcasts()) {
1885       i->set_profiled_method(method());
1886       i->set_profiled_bci(bci());
1887       i->set_should_profile(true);
1888     }
1889   }
1890 }
1891 
1892 
1893 void GraphBuilder::monitorenter(Value x, int bci) {
1894   // save state before locking in case of deoptimization after a NullPointerException
1895   ValueStack* state_before = copy_state_for_exception_with_bci(bci);
1896   append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci);
1897   kill_all();
1898 }
1899 
1900 
1901 void GraphBuilder::monitorexit(Value x, int bci) {
1902   append_with_bci(new MonitorExit(x, state()->unlock()), bci);
1903   kill_all();
1904 }
1905 
1906 
1907 void GraphBuilder::new_multi_array(int dimensions) {
1908   bool will_link;
1909   ciKlass* klass = stream()->get_klass(will_link);
1910   ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
1911 
1912   Values* dims = new Values(dimensions, NULL);
1913   // fill in all dimensions
1914   int i = dimensions;
1915   while (i-- > 0) dims->at_put(i, ipop());
1916   // create array
1917   NewArray* n = new NewMultiArray(klass, dims, state_before);
1918   apush(append_split(n));
1919 }
1920 
1921 
1922 void GraphBuilder::throw_op(int bci) {
1923   // We require that the debug info for a Throw be the "state before"
1924   // the Throw (i.e., exception oop is still on TOS)
1925   ValueStack* state_before = copy_state_before_with_bci(bci);
1926   Throw* t = new Throw(apop(), state_before);
1927   // operand stack not needed after a throw
1928   state()->truncate_stack(0);
1929   append_with_bci(t, bci);
1930 }
1931 
1932 
1933 Value GraphBuilder::round_fp(Value fp_value) {
1934   // no rounding needed if SSE2 is used
1935   if (RoundFPResults && UseSSE < 2) {
1936     // Must currently insert rounding node for doubleword values that
1937     // are results of expressions (i.e., not loads from memory or
1938     // constants)
1939     if (fp_value->type()->tag() == doubleTag &&
1940         fp_value->as_Constant() == NULL &&
1941         fp_value->as_Local() == NULL &&       // method parameters need no rounding
1942         fp_value->as_RoundFP() == NULL) {
1943       return append(new RoundFP(fp_value));
1944     }
1945   }
1946   return fp_value;
1947 }
1948 
1949 
1950 Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
1951   Canonicalizer canon(compilation(), instr, bci);
1952   Instruction* i1 = canon.canonical();
1953   if (i1->is_linked() || !i1->can_be_linked()) {
1954     // Canonicalizer returned an instruction which was already
1955     // appended so simply return it.
1956     return i1;
1957   }
1958 
1959   if (UseLocalValueNumbering) {
1960     // Lookup the instruction in the ValueMap and add it to the map if
1961     // it's not found.
1962     Instruction* i2 = vmap()->find_insert(i1);
1963     if (i2 != i1) {
1964       // found an entry in the value map, so just return it.
1965       assert(i2->is_linked(), "should already be linked");
1966       return i2;
1967     }
1968     ValueNumberingEffects vne(vmap());
1969     i1->visit(&vne);
1970   }
1971 
1972   // i1 was not eliminated => append it
1973   assert(i1->next() == NULL, "shouldn't already be linked");
1974   _last = _last->set_next(i1, canon.bci());
1975 
1976   if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) {
1977     // set the bailout state but complete normal processing.  We
1978     // might do a little more work before noticing the bailout so we
1979     // want processing to continue normally until it's noticed.
1980     bailout("Method and/or inlining is too large");
1981   }
1982 
1983 #ifndef PRODUCT
1984   if (PrintIRDuringConstruction) {
1985     InstructionPrinter ip;
1986     ip.print_line(i1);
1987     if (Verbose) {
1988       state()->print();
1989     }
1990   }
1991 #endif
1992 
1993   // save state after modification of operand stack for StateSplit instructions
1994   StateSplit* s = i1->as_StateSplit();
1995   if (s != NULL) {
1996     if (EliminateFieldAccess) {
1997       Intrinsic* intrinsic = s->as_Intrinsic();
1998       if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
1999         _memory->kill();
2000       }
2001     }
2002     s->set_state(state()->copy(ValueStack::StateAfter, canon.bci()));
2003   }
2004 
2005   // set up exception handlers for this instruction if necessary
2006   if (i1->can_trap()) {
2007     i1->set_exception_handlers(handle_exception(i1));
2008     assert(i1->exception_state() != NULL || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state");
2009   }
2010   return i1;
2011 }
2012 
2013 
2014 Instruction* GraphBuilder::append(Instruction* instr) {
2015   assert(instr->as_StateSplit() == NULL || instr->as_BlockEnd() != NULL, "wrong append used");
2016   return append_with_bci(instr, bci());
2017 }
2018 
2019 
2020 Instruction* GraphBuilder::append_split(StateSplit* instr) {
2021   return append_with_bci(instr, bci());
2022 }
2023 
2024 
2025 void GraphBuilder::null_check(Value value) {
2026   if (value->as_NewArray() != NULL || value->as_NewInstance() != NULL) {
2027     return;
2028   } else {
2029     Constant* con = value->as_Constant();
2030     if (con) {
2031       ObjectType* c = con->type()->as_ObjectType();
2032       if (c && c->is_loaded()) {
2033         ObjectConstant* oc = c->as_ObjectConstant();
2034         if (!oc || !oc->value()->is_null_object()) {
2035           return;
2036         }
2037       }
2038     }
2039   }
2040   append(new NullCheck(value, copy_state_for_exception()));
2041 }
2042 
2043 
2044 
2045 XHandlers* GraphBuilder::handle_exception(Instruction* instruction) {
2046   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != NULL)) {
2047     assert(instruction->exception_state() == NULL
2048            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
2049            || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->jvmti_can_access_local_variables()),
2050            "exception_state should be of exception kind");
2051     return new XHandlers();
2052   }
2053 
2054   XHandlers*  exception_handlers = new XHandlers();
2055   ScopeData*  cur_scope_data = scope_data();
2056   ValueStack* cur_state = instruction->state_before();
2057   ValueStack* prev_state = NULL;
2058   int scope_count = 0;
2059 
2060   assert(cur_state != NULL, "state_before must be set");
2061   do {
2062     int cur_bci = cur_state->bci();
2063     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2064     assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");
2065 
2066     // join with all potential exception handlers
2067     XHandlers* list = cur_scope_data->xhandlers();
2068     const int n = list->length();
2069     for (int i = 0; i < n; i++) {
2070       XHandler* h = list->handler_at(i);
2071       if (h->covers(cur_bci)) {
2072         // h is a potential exception handler => join it
2073         compilation()->set_has_exception_handlers(true);
2074 
2075         BlockBegin* entry = h->entry_block();
2076         if (entry == block()) {
2077           // It's acceptable for an exception handler to cover itself
2078           // but we don't handle that in the parser currently.  It's
2079           // very rare so we bailout instead of trying to handle it.
2080           BAILOUT_("exception handler covers itself", exception_handlers);
2081         }
2082         assert(entry->bci() == h->handler_bci(), "must match");
2083         assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
2084 
2085         // previously this was a BAILOUT, but this is not necessary
2086         // now because asynchronous exceptions are not handled this way.
2087         assert(entry->state() == NULL || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match");
2088 
2089         // xhandler start with an empty expression stack
2090         if (cur_state->stack_size() != 0) {
2091           cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
2092         }
2093         if (instruction->exception_state() == NULL) {
2094           instruction->set_exception_state(cur_state);
2095         }
2096 
2097         // Note: Usually this join must work. However, very
2098         // complicated jsr-ret structures where we don't ret from
2099         // the subroutine can cause the objects on the monitor
2100         // stacks to not match because blocks can be parsed twice.
2101         // The only test case we've seen so far which exhibits this
2102         // problem is caught by the infinite recursion test in
2103         // GraphBuilder::jsr() if the join doesn't work.
2104         if (!entry->try_merge(cur_state)) {
2105           BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
2106         }
2107 
2108         // add current state for correct handling of phi functions at begin of xhandler
2109         int phi_operand = entry->add_exception_state(cur_state);
2110 
2111         // add entry to the list of xhandlers of this block
2112         _block->add_exception_handler(entry);
2113 
2114         // add back-edge from xhandler entry to this block
2115         if (!entry->is_predecessor(_block)) {
2116           entry->add_predecessor(_block);
2117         }
2118 
2119         // clone XHandler because phi_operand and scope_count can not be shared
2120         XHandler* new_xhandler = new XHandler(h);
2121         new_xhandler->set_phi_operand(phi_operand);
2122         new_xhandler->set_scope_count(scope_count);
2123         exception_handlers->append(new_xhandler);
2124 
2125         // fill in exception handler subgraph lazily
2126         assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet");
2127         cur_scope_data->add_to_work_list(entry);
2128 
2129         // stop when reaching catchall
2130         if (h->catch_type() == 0) {
2131           return exception_handlers;
2132         }
2133       }
2134     }
2135 
2136     if (exception_handlers->length() == 0) {
2137       // This scope and all callees do not handle exceptions, so the local
2138       // variables of this scope are not needed. However, the scope itself is
2139       // required for a correct exception stack trace -> clear out the locals.
2140       if (_compilation->env()->jvmti_can_access_local_variables()) {
2141         cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
2142       } else {
2143         cur_state = cur_state->copy(ValueStack::EmptyExceptionState, cur_state->bci());
2144       }
2145       if (prev_state != NULL) {
2146         prev_state->set_caller_state(cur_state);
2147       }
2148       if (instruction->exception_state() == NULL) {
2149         instruction->set_exception_state(cur_state);
2150       }
2151     }
2152 
2153     // Set up iteration for next time.
2154     // If parsing a jsr, do not grab exception handlers from the
2155     // parent scopes for this method (already got them, and they
2156     // needed to be cloned)
2157 
2158     while (cur_scope_data->parsing_jsr()) {
2159       cur_scope_data = cur_scope_data->parent();
2160     }
2161 
2162     assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match");
2163     assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler");
2164 
2165     prev_state = cur_state;
2166     cur_state = cur_state->caller_state();
2167     cur_scope_data = cur_scope_data->parent();
2168     scope_count++;
2169   } while (cur_scope_data != NULL);
2170 
2171   return exception_handlers;
2172 }
2173 
2174 
2175 // Helper class for simplifying Phis.
2176 class PhiSimplifier : public BlockClosure {
2177  private:
2178   bool _has_substitutions;
2179   Value simplify(Value v);
2180 
2181  public:
2182   PhiSimplifier(BlockBegin* start) : _has_substitutions(false) {
2183     start->iterate_preorder(this);
2184     if (_has_substitutions) {
2185       SubstitutionResolver sr(start);
2186     }
2187   }
2188   void block_do(BlockBegin* b);
2189   bool has_substitutions() const { return _has_substitutions; }
2190 };
2191 
2192 
2193 Value PhiSimplifier::simplify(Value v) {
2194   Phi* phi = v->as_Phi();
2195 
2196   if (phi == NULL) {
2197     // no phi function
2198     return v;
2199   } else if (v->has_subst()) {
2200     // already substituted; subst can be phi itself -> simplify
2201     return simplify(v->subst());
2202   } else if (phi->is_set(Phi::cannot_simplify)) {
2203     // already tried to simplify phi before
2204     return phi;
2205   } else if (phi->is_set(Phi::visited)) {
2206     // break cycles in phi functions
2207     return phi;
2208   } else if (phi->type()->is_illegal()) {
2209     // illegal phi functions are ignored anyway
2210     return phi;
2211 
2212   } else {
2213     // mark phi function as processed to break cycles in phi functions
2214     phi->set(Phi::visited);
2215 
2216     // simplify x = [y, x] and x = [y, y] to y
2217     Value subst = NULL;
2218     int opd_count = phi->operand_count();
2219     for (int i = 0; i < opd_count; i++) {
2220       Value opd = phi->operand_at(i);
2221       assert(opd != NULL, "Operand must exist!");
2222 
2223       if (opd->type()->is_illegal()) {
2224         // if one operand is illegal, the entire phi function is illegal
2225         phi->make_illegal();
2226         phi->clear(Phi::visited);
2227         return phi;
2228       }
2229 
2230       Value new_opd = simplify(opd);
2231       assert(new_opd != NULL, "Simplified operand must exist!");
2232 
2233       if (new_opd != phi && new_opd != subst) {
2234         if (subst == NULL) {
2235           subst = new_opd;
2236         } else {
2237           // no simplification possible
2238           phi->set(Phi::cannot_simplify);
2239           phi->clear(Phi::visited);
2240           return phi;
2241         }
2242       }
2243     }
2244 
2245     // sucessfully simplified phi function
2246     assert(subst != NULL, "illegal phi function");
2247     _has_substitutions = true;
2248     phi->clear(Phi::visited);
2249     phi->set_subst(subst);
2250 
2251 #ifndef PRODUCT
2252     if (PrintPhiFunctions) {
2253       tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
2254     }
2255 #endif
2256 
2257     return subst;
2258   }
2259 }
2260 
2261 
2262 void PhiSimplifier::block_do(BlockBegin* b) {
2263   for_each_phi_fun(b, phi,
2264     simplify(phi);
2265   );
2266 
2267 #ifdef ASSERT
2268   for_each_phi_fun(b, phi,
2269                    assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification");
2270   );
2271 
2272   ValueStack* state = b->state()->caller_state();
2273   for_each_state_value(state, value,
2274     Phi* phi = value->as_Phi();
2275     assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state");
2276   );
2277 #endif
2278 }
2279 
2280 // This method is called after all blocks are filled with HIR instructions
2281 // It eliminates all Phi functions of the form x = [y, y] and x = [y, x]
2282 void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) {
2283   PhiSimplifier simplifier(start);
2284 }
2285 
2286 
2287 void GraphBuilder::connect_to_end(BlockBegin* beg) {
2288   // setup iteration
2289   kill_all();
2290   _block = beg;
2291   _state = beg->state()->copy_for_parsing();
2292   _last  = beg;
2293   iterate_bytecodes_for_block(beg->bci());
2294 }
2295 
2296 
2297 BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
2298 #ifndef PRODUCT
2299   if (PrintIRDuringConstruction) {
2300     tty->cr();
2301     InstructionPrinter ip;
2302     ip.print_instr(_block); tty->cr();
2303     ip.print_stack(_block->state()); tty->cr();
2304     ip.print_inline_level(_block);
2305     ip.print_head();
2306     tty->print_cr("locals size: %d stack size: %d", state()->locals_size(), state()->stack_size());
2307   }
2308 #endif
2309   _skip_block = false;
2310   assert(state() != NULL, "ValueStack missing!");
2311   ciBytecodeStream s(method());
2312   s.reset_to_bci(bci);
2313   int prev_bci = bci;
2314   scope_data()->set_stream(&s);
2315   // iterate
2316   Bytecodes::Code code = Bytecodes::_illegal;
2317   bool push_exception = false;
2318 
2319   if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == NULL) {
2320     // first thing in the exception entry block should be the exception object.
2321     push_exception = true;
2322   }
2323 
2324   while (!bailed_out() && last()->as_BlockEnd() == NULL &&
2325          (code = stream()->next()) != ciBytecodeStream::EOBC() &&
2326          (block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) {
2327     assert(state()->kind() == ValueStack::Parsing, "invalid state kind");
2328 
2329     // Check for active jsr during OSR compilation
2330     if (compilation()->is_osr_compile()
2331         && scope()->is_top_scope()
2332         && parsing_jsr()
2333         && s.cur_bci() == compilation()->osr_bci()) {
2334       bailout("OSR not supported while a jsr is active");
2335     }
2336 
2337     if (push_exception) {
2338       apush(append(new ExceptionObject()));
2339       push_exception = false;
2340     }
2341 
2342     // handle bytecode
2343     switch (code) {
2344       case Bytecodes::_nop            : /* nothing to do */ break;
2345       case Bytecodes::_aconst_null    : apush(append(new Constant(objectNull            ))); break;
2346       case Bytecodes::_iconst_m1      : ipush(append(new Constant(new IntConstant   (-1)))); break;
2347       case Bytecodes::_iconst_0       : ipush(append(new Constant(intZero               ))); break;
2348       case Bytecodes::_iconst_1       : ipush(append(new Constant(intOne                ))); break;
2349       case Bytecodes::_iconst_2       : ipush(append(new Constant(new IntConstant   ( 2)))); break;
2350       case Bytecodes::_iconst_3       : ipush(append(new Constant(new IntConstant   ( 3)))); break;
2351       case Bytecodes::_iconst_4       : ipush(append(new Constant(new IntConstant   ( 4)))); break;
2352       case Bytecodes::_iconst_5       : ipush(append(new Constant(new IntConstant   ( 5)))); break;
2353       case Bytecodes::_lconst_0       : lpush(append(new Constant(new LongConstant  ( 0)))); break;
2354       case Bytecodes::_lconst_1       : lpush(append(new Constant(new LongConstant  ( 1)))); break;
2355       case Bytecodes::_fconst_0       : fpush(append(new Constant(new FloatConstant ( 0)))); break;
2356       case Bytecodes::_fconst_1       : fpush(append(new Constant(new FloatConstant ( 1)))); break;
2357       case Bytecodes::_fconst_2       : fpush(append(new Constant(new FloatConstant ( 2)))); break;
2358       case Bytecodes::_dconst_0       : dpush(append(new Constant(new DoubleConstant( 0)))); break;
2359       case Bytecodes::_dconst_1       : dpush(append(new Constant(new DoubleConstant( 1)))); break;
2360       case Bytecodes::_bipush         : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break;
2361       case Bytecodes::_sipush         : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break;
2362       case Bytecodes::_ldc            : // fall through
2363       case Bytecodes::_ldc_w          : // fall through
2364       case Bytecodes::_ldc2_w         : load_constant(); break;
2365       case Bytecodes::_iload          : load_local(intType     , s.get_index()); break;
2366       case Bytecodes::_lload          : load_local(longType    , s.get_index()); break;
2367       case Bytecodes::_fload          : load_local(floatType   , s.get_index()); break;
2368       case Bytecodes::_dload          : load_local(doubleType  , s.get_index()); break;
2369       case Bytecodes::_aload          : load_local(instanceType, s.get_index()); break;
2370       case Bytecodes::_iload_0        : load_local(intType   , 0); break;
2371       case Bytecodes::_iload_1        : load_local(intType   , 1); break;
2372       case Bytecodes::_iload_2        : load_local(intType   , 2); break;
2373       case Bytecodes::_iload_3        : load_local(intType   , 3); break;
2374       case Bytecodes::_lload_0        : load_local(longType  , 0); break;
2375       case Bytecodes::_lload_1        : load_local(longType  , 1); break;
2376       case Bytecodes::_lload_2        : load_local(longType  , 2); break;
2377       case Bytecodes::_lload_3        : load_local(longType  , 3); break;
2378       case Bytecodes::_fload_0        : load_local(floatType , 0); break;
2379       case Bytecodes::_fload_1        : load_local(floatType , 1); break;
2380       case Bytecodes::_fload_2        : load_local(floatType , 2); break;
2381       case Bytecodes::_fload_3        : load_local(floatType , 3); break;
2382       case Bytecodes::_dload_0        : load_local(doubleType, 0); break;
2383       case Bytecodes::_dload_1        : load_local(doubleType, 1); break;
2384       case Bytecodes::_dload_2        : load_local(doubleType, 2); break;
2385       case Bytecodes::_dload_3        : load_local(doubleType, 3); break;
2386       case Bytecodes::_aload_0        : load_local(objectType, 0); break;
2387       case Bytecodes::_aload_1        : load_local(objectType, 1); break;
2388       case Bytecodes::_aload_2        : load_local(objectType, 2); break;
2389       case Bytecodes::_aload_3        : load_local(objectType, 3); break;
2390       case Bytecodes::_iaload         : load_indexed(T_INT   ); break;
2391       case Bytecodes::_laload         : load_indexed(T_LONG  ); break;
2392       case Bytecodes::_faload         : load_indexed(T_FLOAT ); break;
2393       case Bytecodes::_daload         : load_indexed(T_DOUBLE); break;
2394       case Bytecodes::_aaload         : load_indexed(T_OBJECT); break;
2395       case Bytecodes::_baload         : load_indexed(T_BYTE  ); break;
2396       case Bytecodes::_caload         : load_indexed(T_CHAR  ); break;
2397       case Bytecodes::_saload         : load_indexed(T_SHORT ); break;
2398       case Bytecodes::_istore         : store_local(intType   , s.get_index()); break;
2399       case Bytecodes::_lstore         : store_local(longType  , s.get_index()); break;
2400       case Bytecodes::_fstore         : store_local(floatType , s.get_index()); break;
2401       case Bytecodes::_dstore         : store_local(doubleType, s.get_index()); break;
2402       case Bytecodes::_astore         : store_local(objectType, s.get_index()); break;
2403       case Bytecodes::_istore_0       : store_local(intType   , 0); break;
2404       case Bytecodes::_istore_1       : store_local(intType   , 1); break;
2405       case Bytecodes::_istore_2       : store_local(intType   , 2); break;
2406       case Bytecodes::_istore_3       : store_local(intType   , 3); break;
2407       case Bytecodes::_lstore_0       : store_local(longType  , 0); break;
2408       case Bytecodes::_lstore_1       : store_local(longType  , 1); break;
2409       case Bytecodes::_lstore_2       : store_local(longType  , 2); break;
2410       case Bytecodes::_lstore_3       : store_local(longType  , 3); break;
2411       case Bytecodes::_fstore_0       : store_local(floatType , 0); break;
2412       case Bytecodes::_fstore_1       : store_local(floatType , 1); break;
2413       case Bytecodes::_fstore_2       : store_local(floatType , 2); break;
2414       case Bytecodes::_fstore_3       : store_local(floatType , 3); break;
2415       case Bytecodes::_dstore_0       : store_local(doubleType, 0); break;
2416       case Bytecodes::_dstore_1       : store_local(doubleType, 1); break;
2417       case Bytecodes::_dstore_2       : store_local(doubleType, 2); break;
2418       case Bytecodes::_dstore_3       : store_local(doubleType, 3); break;
2419       case Bytecodes::_astore_0       : store_local(objectType, 0); break;
2420       case Bytecodes::_astore_1       : store_local(objectType, 1); break;
2421       case Bytecodes::_astore_2       : store_local(objectType, 2); break;
2422       case Bytecodes::_astore_3       : store_local(objectType, 3); break;
2423       case Bytecodes::_iastore        : store_indexed(T_INT   ); break;
2424       case Bytecodes::_lastore        : store_indexed(T_LONG  ); break;
2425       case Bytecodes::_fastore        : store_indexed(T_FLOAT ); break;
2426       case Bytecodes::_dastore        : store_indexed(T_DOUBLE); break;
2427       case Bytecodes::_aastore        : store_indexed(T_OBJECT); break;
2428       case Bytecodes::_bastore        : store_indexed(T_BYTE  ); break;
2429       case Bytecodes::_castore        : store_indexed(T_CHAR  ); break;
2430       case Bytecodes::_sastore        : store_indexed(T_SHORT ); break;
2431       case Bytecodes::_pop            : // fall through
2432       case Bytecodes::_pop2           : // fall through
2433       case Bytecodes::_dup            : // fall through
2434       case Bytecodes::_dup_x1         : // fall through
2435       case Bytecodes::_dup_x2         : // fall through
2436       case Bytecodes::_dup2           : // fall through
2437       case Bytecodes::_dup2_x1        : // fall through
2438       case Bytecodes::_dup2_x2        : // fall through
2439       case Bytecodes::_swap           : stack_op(code); break;
2440       case Bytecodes::_iadd           : arithmetic_op(intType   , code); break;
2441       case Bytecodes::_ladd           : arithmetic_op(longType  , code); break;
2442       case Bytecodes::_fadd           : arithmetic_op(floatType , code); break;
2443       case Bytecodes::_dadd           : arithmetic_op(doubleType, code); break;
2444       case Bytecodes::_isub           : arithmetic_op(intType   , code); break;
2445       case Bytecodes::_lsub           : arithmetic_op(longType  , code); break;
2446       case Bytecodes::_fsub           : arithmetic_op(floatType , code); break;
2447       case Bytecodes::_dsub           : arithmetic_op(doubleType, code); break;
2448       case Bytecodes::_imul           : arithmetic_op(intType   , code); break;
2449       case Bytecodes::_lmul           : arithmetic_op(longType  , code); break;
2450       case Bytecodes::_fmul           : arithmetic_op(floatType , code); break;
2451       case Bytecodes::_dmul           : arithmetic_op(doubleType, code); break;
2452       case Bytecodes::_idiv           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
2453       case Bytecodes::_ldiv           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
2454       case Bytecodes::_fdiv           : arithmetic_op(floatType , code); break;
2455       case Bytecodes::_ddiv           : arithmetic_op(doubleType, code); break;
2456       case Bytecodes::_irem           : arithmetic_op(intType   , code, copy_state_for_exception()); break;
2457       case Bytecodes::_lrem           : arithmetic_op(longType  , code, copy_state_for_exception()); break;
2458       case Bytecodes::_frem           : arithmetic_op(floatType , code); break;
2459       case Bytecodes::_drem           : arithmetic_op(doubleType, code); break;
2460       case Bytecodes::_ineg           : negate_op(intType   ); break;
2461       case Bytecodes::_lneg           : negate_op(longType  ); break;
2462       case Bytecodes::_fneg           : negate_op(floatType ); break;
2463       case Bytecodes::_dneg           : negate_op(doubleType); break;
2464       case Bytecodes::_ishl           : shift_op(intType , code); break;
2465       case Bytecodes::_lshl           : shift_op(longType, code); break;
2466       case Bytecodes::_ishr           : shift_op(intType , code); break;
2467       case Bytecodes::_lshr           : shift_op(longType, code); break;
2468       case Bytecodes::_iushr          : shift_op(intType , code); break;
2469       case Bytecodes::_lushr          : shift_op(longType, code); break;
2470       case Bytecodes::_iand           : logic_op(intType , code); break;
2471       case Bytecodes::_land           : logic_op(longType, code); break;
2472       case Bytecodes::_ior            : logic_op(intType , code); break;
2473       case Bytecodes::_lor            : logic_op(longType, code); break;
2474       case Bytecodes::_ixor           : logic_op(intType , code); break;
2475       case Bytecodes::_lxor           : logic_op(longType, code); break;
2476       case Bytecodes::_iinc           : increment(); break;
2477       case Bytecodes::_i2l            : convert(code, T_INT   , T_LONG  ); break;
2478       case Bytecodes::_i2f            : convert(code, T_INT   , T_FLOAT ); break;
2479       case Bytecodes::_i2d            : convert(code, T_INT   , T_DOUBLE); break;
2480       case Bytecodes::_l2i            : convert(code, T_LONG  , T_INT   ); break;
2481       case Bytecodes::_l2f            : convert(code, T_LONG  , T_FLOAT ); break;
2482       case Bytecodes::_l2d            : convert(code, T_LONG  , T_DOUBLE); break;
2483       case Bytecodes::_f2i            : convert(code, T_FLOAT , T_INT   ); break;
2484       case Bytecodes::_f2l            : convert(code, T_FLOAT , T_LONG  ); break;
2485       case Bytecodes::_f2d            : convert(code, T_FLOAT , T_DOUBLE); break;
2486       case Bytecodes::_d2i            : convert(code, T_DOUBLE, T_INT   ); break;
2487       case Bytecodes::_d2l            : convert(code, T_DOUBLE, T_LONG  ); break;
2488       case Bytecodes::_d2f            : convert(code, T_DOUBLE, T_FLOAT ); break;
2489       case Bytecodes::_i2b            : convert(code, T_INT   , T_BYTE  ); break;
2490       case Bytecodes::_i2c            : convert(code, T_INT   , T_CHAR  ); break;
2491       case Bytecodes::_i2s            : convert(code, T_INT   , T_SHORT ); break;
2492       case Bytecodes::_lcmp           : compare_op(longType  , code); break;
2493       case Bytecodes::_fcmpl          : compare_op(floatType , code); break;
2494       case Bytecodes::_fcmpg          : compare_op(floatType , code); break;
2495       case Bytecodes::_dcmpl          : compare_op(doubleType, code); break;
2496       case Bytecodes::_dcmpg          : compare_op(doubleType, code); break;
2497       case Bytecodes::_ifeq           : if_zero(intType   , If::eql); break;
2498       case Bytecodes::_ifne           : if_zero(intType   , If::neq); break;
2499       case Bytecodes::_iflt           : if_zero(intType   , If::lss); break;
2500       case Bytecodes::_ifge           : if_zero(intType   , If::geq); break;
2501       case Bytecodes::_ifgt           : if_zero(intType   , If::gtr); break;
2502       case Bytecodes::_ifle           : if_zero(intType   , If::leq); break;
2503       case Bytecodes::_if_icmpeq      : if_same(intType   , If::eql); break;
2504       case Bytecodes::_if_icmpne      : if_same(intType   , If::neq); break;
2505       case Bytecodes::_if_icmplt      : if_same(intType   , If::lss); break;
2506       case Bytecodes::_if_icmpge      : if_same(intType   , If::geq); break;
2507       case Bytecodes::_if_icmpgt      : if_same(intType   , If::gtr); break;
2508       case Bytecodes::_if_icmple      : if_same(intType   , If::leq); break;
2509       case Bytecodes::_if_acmpeq      : if_same(objectType, If::eql); break;
2510       case Bytecodes::_if_acmpne      : if_same(objectType, If::neq); break;
2511       case Bytecodes::_goto           : _goto(s.cur_bci(), s.get_dest()); break;
2512       case Bytecodes::_jsr            : jsr(s.get_dest()); break;
2513       case Bytecodes::_ret            : ret(s.get_index()); break;
2514       case Bytecodes::_tableswitch    : table_switch(); break;
2515       case Bytecodes::_lookupswitch   : lookup_switch(); break;
2516       case Bytecodes::_ireturn        : method_return(ipop()); break;
2517       case Bytecodes::_lreturn        : method_return(lpop()); break;
2518       case Bytecodes::_freturn        : method_return(fpop()); break;
2519       case Bytecodes::_dreturn        : method_return(dpop()); break;
2520       case Bytecodes::_areturn        : method_return(apop()); break;
2521       case Bytecodes::_return         : method_return(NULL  ); break;
2522       case Bytecodes::_getstatic      : // fall through
2523       case Bytecodes::_putstatic      : // fall through
2524       case Bytecodes::_getfield       : // fall through
2525       case Bytecodes::_putfield       : access_field(code); break;
2526       case Bytecodes::_invokevirtual  : // fall through
2527       case Bytecodes::_invokespecial  : // fall through
2528       case Bytecodes::_invokestatic   : // fall through
2529       case Bytecodes::_invokedynamic  : // fall through
2530       case Bytecodes::_invokeinterface: invoke(code); break;
2531       case Bytecodes::_new            : new_instance(s.get_index_u2()); break;
2532       case Bytecodes::_newarray       : new_type_array(); break;
2533       case Bytecodes::_anewarray      : new_object_array(); break;
2534       case Bytecodes::_arraylength    : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; }
2535       case Bytecodes::_athrow         : throw_op(s.cur_bci()); break;
2536       case Bytecodes::_checkcast      : check_cast(s.get_index_u2()); break;
2537       case Bytecodes::_instanceof     : instance_of(s.get_index_u2()); break;
2538       case Bytecodes::_monitorenter   : monitorenter(apop(), s.cur_bci()); break;
2539       case Bytecodes::_monitorexit    : monitorexit (apop(), s.cur_bci()); break;
2540       case Bytecodes::_wide           : ShouldNotReachHere(); break;
2541       case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break;
2542       case Bytecodes::_ifnull         : if_null(objectType, If::eql); break;
2543       case Bytecodes::_ifnonnull      : if_null(objectType, If::neq); break;
2544       case Bytecodes::_goto_w         : _goto(s.cur_bci(), s.get_far_dest()); break;
2545       case Bytecodes::_jsr_w          : jsr(s.get_far_dest()); break;
2546       case Bytecodes::_breakpoint     : BAILOUT_("concurrent setting of breakpoint", NULL);
2547       default                         : ShouldNotReachHere(); break;
2548     }
2549     // save current bci to setup Goto at the end
2550     prev_bci = s.cur_bci();
2551   }
2552   CHECK_BAILOUT_(NULL);
2553   // stop processing of this block (see try_inline_full)
2554   if (_skip_block) {
2555     _skip_block = false;
2556     assert(_last && _last->as_BlockEnd(), "");
2557     return _last->as_BlockEnd();
2558   }
2559   // if there are any, check if last instruction is a BlockEnd instruction
2560   BlockEnd* end = last()->as_BlockEnd();
2561   if (end == NULL) {
2562     // all blocks must end with a BlockEnd instruction => add a Goto
2563     end = new Goto(block_at(s.cur_bci()), false);
2564     append(end);
2565   }
2566   assert(end == last()->as_BlockEnd(), "inconsistency");
2567 
2568   assert(end->state() != NULL, "state must already be present");
2569   assert(end->as_Return() == NULL || end->as_Throw() == NULL || end->state()->stack_size() == 0, "stack not needed for return and throw");
2570 
2571   // connect to begin & set state
2572   // NOTE that inlining may have changed the block we are parsing
2573   block()->set_end(end);
2574   // propagate state
2575   for (int i = end->number_of_sux() - 1; i >= 0; i--) {
2576     BlockBegin* sux = end->sux_at(i);
2577     assert(sux->is_predecessor(block()), "predecessor missing");
2578     // be careful, bailout if bytecodes are strange
2579     if (!sux->try_merge(end->state())) BAILOUT_("block join failed", NULL);
2580     scope_data()->add_to_work_list(end->sux_at(i));
2581   }
2582 
2583   scope_data()->set_stream(NULL);
2584 
2585   // done
2586   return end;
2587 }
2588 
2589 
2590 void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) {
2591   do {
2592     if (start_in_current_block_for_inlining && !bailed_out()) {
2593       iterate_bytecodes_for_block(0);
2594       start_in_current_block_for_inlining = false;
2595     } else {
2596       BlockBegin* b;
2597       while ((b = scope_data()->remove_from_work_list()) != NULL) {
2598         if (!b->is_set(BlockBegin::was_visited_flag)) {
2599           if (b->is_set(BlockBegin::osr_entry_flag)) {
2600             // we're about to parse the osr entry block, so make sure
2601             // we setup the OSR edge leading into this block so that
2602             // Phis get setup correctly.
2603             setup_osr_entry_block();
2604             // this is no longer the osr entry block, so clear it.
2605             b->clear(BlockBegin::osr_entry_flag);
2606           }
2607           b->set(BlockBegin::was_visited_flag);
2608           connect_to_end(b);
2609         }
2610       }
2611     }
2612   } while (!bailed_out() && !scope_data()->is_work_list_empty());
2613 }
2614 
2615 
2616 bool GraphBuilder::_can_trap      [Bytecodes::number_of_java_codes];
2617 
2618 void GraphBuilder::initialize() {
2619   // the following bytecodes are assumed to potentially
2620   // throw exceptions in compiled code - note that e.g.
2621   // monitorexit & the return bytecodes do not throw
2622   // exceptions since monitor pairing proved that they
2623   // succeed (if monitor pairing succeeded)
2624   Bytecodes::Code can_trap_list[] =
2625     { Bytecodes::_ldc
2626     , Bytecodes::_ldc_w
2627     , Bytecodes::_ldc2_w
2628     , Bytecodes::_iaload
2629     , Bytecodes::_laload
2630     , Bytecodes::_faload
2631     , Bytecodes::_daload
2632     , Bytecodes::_aaload
2633     , Bytecodes::_baload
2634     , Bytecodes::_caload
2635     , Bytecodes::_saload
2636     , Bytecodes::_iastore
2637     , Bytecodes::_lastore
2638     , Bytecodes::_fastore
2639     , Bytecodes::_dastore
2640     , Bytecodes::_aastore
2641     , Bytecodes::_bastore
2642     , Bytecodes::_castore
2643     , Bytecodes::_sastore
2644     , Bytecodes::_idiv
2645     , Bytecodes::_ldiv
2646     , Bytecodes::_irem
2647     , Bytecodes::_lrem
2648     , Bytecodes::_getstatic
2649     , Bytecodes::_putstatic
2650     , Bytecodes::_getfield
2651     , Bytecodes::_putfield
2652     , Bytecodes::_invokevirtual
2653     , Bytecodes::_invokespecial
2654     , Bytecodes::_invokestatic
2655     , Bytecodes::_invokedynamic
2656     , Bytecodes::_invokeinterface
2657     , Bytecodes::_new
2658     , Bytecodes::_newarray
2659     , Bytecodes::_anewarray
2660     , Bytecodes::_arraylength
2661     , Bytecodes::_athrow
2662     , Bytecodes::_checkcast
2663     , Bytecodes::_instanceof
2664     , Bytecodes::_monitorenter
2665     , Bytecodes::_multianewarray
2666     };
2667 
2668   // inititialize trap tables
2669   for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
2670     _can_trap[i] = false;
2671   }
2672   // set standard trap info
2673   for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) {
2674     _can_trap[can_trap_list[j]] = true;
2675   }
2676 }
2677 
2678 
2679 BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) {
2680   assert(entry->is_set(f), "entry/flag mismatch");
2681   // create header block
2682   BlockBegin* h = new BlockBegin(entry->bci());
2683   h->set_depth_first_number(0);
2684 
2685   Value l = h;
2686   BlockEnd* g = new Goto(entry, false);
2687   l->set_next(g, entry->bci());
2688   h->set_end(g);
2689   h->set(f);
2690   // setup header block end state
2691   ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis)
2692   assert(s->stack_is_empty(), "must have empty stack at entry point");
2693   g->set_state(s);
2694   return h;
2695 }
2696 
2697 
2698 
2699 BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) {
2700   BlockBegin* start = new BlockBegin(0);
2701 
2702   // This code eliminates the empty start block at the beginning of
2703   // each method.  Previously, each method started with the
2704   // start-block created below, and this block was followed by the
2705   // header block that was always empty.  This header block is only
2706   // necesary if std_entry is also a backward branch target because
2707   // then phi functions may be necessary in the header block.  It's
2708   // also necessary when profiling so that there's a single block that
2709   // can increment the interpreter_invocation_count.
2710   BlockBegin* new_header_block;
2711   if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) {
2712     new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
2713   } else {
2714     new_header_block = std_entry;
2715   }
2716 
2717   // setup start block (root for the IR graph)
2718   Base* base =
2719     new Base(
2720       new_header_block,
2721       osr_entry
2722     );
2723   start->set_next(base, 0);
2724   start->set_end(base);
2725   // create & setup state for start block
2726   start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
2727   base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci()));
2728 
2729   if (base->std_entry()->state() == NULL) {
2730     // setup states for header blocks
2731     base->std_entry()->merge(state);
2732   }
2733 
2734   assert(base->std_entry()->state() != NULL, "");
2735   return start;
2736 }
2737 
2738 
2739 void GraphBuilder::setup_osr_entry_block() {
2740   assert(compilation()->is_osr_compile(), "only for osrs");
2741 
2742   int osr_bci = compilation()->osr_bci();
2743   ciBytecodeStream s(method());
2744   s.reset_to_bci(osr_bci);
2745   s.next();
2746   scope_data()->set_stream(&s);
2747 
2748   // create a new block to be the osr setup code
2749   _osr_entry = new BlockBegin(osr_bci);
2750   _osr_entry->set(BlockBegin::osr_entry_flag);
2751   _osr_entry->set_depth_first_number(0);
2752   BlockBegin* target = bci2block()->at(osr_bci);
2753   assert(target != NULL && target->is_set(BlockBegin::osr_entry_flag), "must be there");
2754   // the osr entry has no values for locals
2755   ValueStack* state = target->state()->copy();
2756   _osr_entry->set_state(state);
2757 
2758   kill_all();
2759   _block = _osr_entry;
2760   _state = _osr_entry->state()->copy();
2761   assert(_state->bci() == osr_bci, "mismatch");
2762   _last  = _osr_entry;
2763   Value e = append(new OsrEntry());
2764   e->set_needs_null_check(false);
2765 
2766   // OSR buffer is
2767   //
2768   // locals[nlocals-1..0]
2769   // monitors[number_of_locks-1..0]
2770   //
2771   // locals is a direct copy of the interpreter frame so in the osr buffer
2772   // so first slot in the local array is the last local from the interpreter
2773   // and last slot is local[0] (receiver) from the interpreter
2774   //
2775   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
2776   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
2777   // in the interpreter frame (the method lock if a sync method)
2778 
2779   // Initialize monitors in the compiled activation.
2780 
2781   int index;
2782   Value local;
2783 
2784   // find all the locals that the interpreter thinks contain live oops
2785   const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
2786 
2787   // compute the offset into the locals so that we can treat the buffer
2788   // as if the locals were still in the interpreter frame
2789   int locals_offset = BytesPerWord * (method()->max_locals() - 1);
2790   for_each_local_value(state, index, local) {
2791     int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord;
2792     Value get;
2793     if (local->type()->is_object_kind() && !live_oops.at(index)) {
2794       // The interpreter thinks this local is dead but the compiler
2795       // doesn't so pretend that the interpreter passed in null.
2796       get = append(new Constant(objectNull));
2797     } else {
2798       get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
2799                                     append(new Constant(new IntConstant(offset))),
2800                                     0,
2801                                     true /*unaligned*/, true /*wide*/));
2802     }
2803     _state->store_local(index, get);
2804   }
2805 
2806   // the storage for the OSR buffer is freed manually in the LIRGenerator.
2807 
2808   assert(state->caller_state() == NULL, "should be top scope");
2809   state->clear_locals();
2810   Goto* g = new Goto(target, false);
2811   append(g);
2812   _osr_entry->set_end(g);
2813   target->merge(_osr_entry->end()->state());
2814 
2815   scope_data()->set_stream(NULL);
2816 }
2817 
2818 
2819 ValueStack* GraphBuilder::state_at_entry() {
2820   ValueStack* state = new ValueStack(scope(), NULL);
2821 
2822   // Set up locals for receiver
2823   int idx = 0;
2824   if (!method()->is_static()) {
2825     // we should always see the receiver
2826     state->store_local(idx, new Local(objectType, idx));
2827     idx = 1;
2828   }
2829 
2830   // Set up locals for incoming arguments
2831   ciSignature* sig = method()->signature();
2832   for (int i = 0; i < sig->count(); i++) {
2833     ciType* type = sig->type_at(i);
2834     BasicType basic_type = type->basic_type();
2835     // don't allow T_ARRAY to propagate into locals types
2836     if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2837     ValueType* vt = as_ValueType(basic_type);
2838     state->store_local(idx, new Local(vt, idx));
2839     idx += type->size();
2840   }
2841 
2842   // lock synchronized method
2843   if (method()->is_synchronized()) {
2844     state->lock(NULL);
2845   }
2846 
2847   return state;
2848 }
2849 
2850 
2851 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
2852   : _scope_data(NULL)
2853   , _instruction_count(0)
2854   , _osr_entry(NULL)
2855   , _memory(new MemoryBuffer())
2856   , _compilation(compilation)
2857   , _inline_bailout_msg(NULL)
2858 {
2859   int osr_bci = compilation->osr_bci();
2860 
2861   // determine entry points and bci2block mapping
2862   BlockListBuilder blm(compilation, scope, osr_bci);
2863   CHECK_BAILOUT();
2864 
2865   BlockList* bci2block = blm.bci2block();
2866   BlockBegin* start_block = bci2block->at(0);
2867 
2868   push_root_scope(scope, bci2block, start_block);
2869 
2870   // setup state for std entry
2871   _initial_state = state_at_entry();
2872   start_block->merge(_initial_state);
2873 
2874   // complete graph
2875   _vmap        = new ValueMap();
2876   switch (scope->method()->intrinsic_id()) {
2877   case vmIntrinsics::_dabs          : // fall through
2878   case vmIntrinsics::_dsqrt         : // fall through
2879   case vmIntrinsics::_dsin          : // fall through
2880   case vmIntrinsics::_dcos          : // fall through
2881   case vmIntrinsics::_dtan          : // fall through
2882   case vmIntrinsics::_dlog          : // fall through
2883   case vmIntrinsics::_dlog10        : // fall through
2884     {
2885       // Compiles where the root method is an intrinsic need a special
2886       // compilation environment because the bytecodes for the method
2887       // shouldn't be parsed during the compilation, only the special
2888       // Intrinsic node should be emitted.  If this isn't done the the
2889       // code for the inlined version will be different than the root
2890       // compiled version which could lead to monotonicity problems on
2891       // intel.
2892 
2893       // Set up a stream so that appending instructions works properly.
2894       ciBytecodeStream s(scope->method());
2895       s.reset_to_bci(0);
2896       scope_data()->set_stream(&s);
2897       s.next();
2898 
2899       // setup the initial block state
2900       _block = start_block;
2901       _state = start_block->state()->copy_for_parsing();
2902       _last  = start_block;
2903       load_local(doubleType, 0);
2904 
2905       // Emit the intrinsic node.
2906       bool result = try_inline_intrinsics(scope->method());
2907       if (!result) BAILOUT("failed to inline intrinsic");
2908       method_return(dpop());
2909 
2910       // connect the begin and end blocks and we're all done.
2911       BlockEnd* end = last()->as_BlockEnd();
2912       block()->set_end(end);
2913       break;
2914     }
2915   default:
2916     scope_data()->add_to_work_list(start_block);
2917     iterate_all_blocks();
2918     break;
2919   }
2920   CHECK_BAILOUT();
2921 
2922   _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
2923 
2924   eliminate_redundant_phis(_start);
2925 
2926   NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats());
2927   // for osr compile, bailout if some requirements are not fulfilled
2928   if (osr_bci != -1) {
2929     BlockBegin* osr_block = blm.bci2block()->at(osr_bci);
2930     assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile");
2931 
2932     // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points
2933     if (!osr_block->state()->stack_is_empty()) {
2934       BAILOUT("stack not empty at OSR entry point");
2935     }
2936   }
2937 #ifndef PRODUCT
2938   if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions", _instruction_count);
2939 #endif
2940 }
2941 
2942 
2943 ValueStack* GraphBuilder::copy_state_before() {
2944   return copy_state_before_with_bci(bci());
2945 }
2946 
2947 ValueStack* GraphBuilder::copy_state_exhandling() {
2948   return copy_state_exhandling_with_bci(bci());
2949 }
2950 
2951 ValueStack* GraphBuilder::copy_state_for_exception() {
2952   return copy_state_for_exception_with_bci(bci());
2953 }
2954 
2955 ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) {
2956   return state()->copy(ValueStack::StateBefore, bci);
2957 }
2958 
2959 ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) {
2960   if (!has_handler()) return NULL;
2961   return state()->copy(ValueStack::StateBefore, bci);
2962 }
2963 
2964 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) {
2965   ValueStack* s = copy_state_exhandling_with_bci(bci);
2966   if (s == NULL) {
2967     if (_compilation->env()->jvmti_can_access_local_variables()) {
2968       s = state()->copy(ValueStack::ExceptionState, bci);
2969     } else {
2970       s = state()->copy(ValueStack::EmptyExceptionState, bci);
2971     }
2972   }
2973   return s;
2974 }
2975 
2976 int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
2977   int recur_level = 0;
2978   for (IRScope* s = scope(); s != NULL; s = s->caller()) {
2979     if (s->method() == cur_callee) {
2980       ++recur_level;
2981     }
2982   }
2983   return recur_level;
2984 }
2985 
2986 
2987 bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
2988   // Clear out any existing inline bailout condition
2989   clear_inline_bailout();
2990 
2991   if (callee->should_exclude()) {
2992     // callee is excluded
2993     INLINE_BAILOUT("excluded by CompilerOracle")
2994   } else if (!callee->can_be_compiled()) {
2995     // callee is not compilable (prob. has breakpoints)
2996     INLINE_BAILOUT("not compilable")
2997   } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) {
2998     // intrinsics can be native or not
2999     return true;
3000   } else if (callee->is_native()) {
3001     // non-intrinsic natives cannot be inlined
3002     INLINE_BAILOUT("non-intrinsic native")
3003   } else if (callee->is_abstract()) {
3004     INLINE_BAILOUT("abstract")
3005   } else {
3006     return try_inline_full(callee, holder_known);
3007   }
3008 }
3009 
3010 
3011 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
3012   if (!InlineNatives           ) INLINE_BAILOUT("intrinsic method inlining disabled");
3013   if (callee->is_synchronized()) {
3014     // We don't currently support any synchronized intrinsics
3015     return false;
3016   }
3017 
3018   // callee seems like a good candidate
3019   // determine id
3020   bool preserves_state = false;
3021   bool cantrap = true;
3022   vmIntrinsics::ID id = callee->intrinsic_id();
3023   switch (id) {
3024     case vmIntrinsics::_arraycopy     :
3025       if (!InlineArrayCopy) return false;
3026       break;
3027 
3028     case vmIntrinsics::_currentTimeMillis:
3029     case vmIntrinsics::_nanoTime:
3030       preserves_state = true;
3031       cantrap = false;
3032       break;
3033 
3034     case vmIntrinsics::_floatToRawIntBits   :
3035     case vmIntrinsics::_intBitsToFloat      :
3036     case vmIntrinsics::_doubleToRawLongBits :
3037     case vmIntrinsics::_longBitsToDouble    :
3038       if (!InlineMathNatives) return false;
3039       preserves_state = true;
3040       cantrap = false;
3041       break;
3042 
3043     case vmIntrinsics::_getClass      :
3044       if (!InlineClassNatives) return false;
3045       preserves_state = true;
3046       break;
3047 
3048     case vmIntrinsics::_currentThread :
3049       if (!InlineThreadNatives) return false;
3050       preserves_state = true;
3051       cantrap = false;
3052       break;
3053 
3054     case vmIntrinsics::_dabs          : // fall through
3055     case vmIntrinsics::_dsqrt         : // fall through
3056     case vmIntrinsics::_dsin          : // fall through
3057     case vmIntrinsics::_dcos          : // fall through
3058     case vmIntrinsics::_dtan          : // fall through
3059     case vmIntrinsics::_dlog          : // fall through
3060     case vmIntrinsics::_dlog10        : // fall through
3061       if (!InlineMathNatives) return false;
3062       cantrap = false;
3063       preserves_state = true;
3064       break;
3065 
3066     // sun/misc/AtomicLong.attemptUpdate
3067     case vmIntrinsics::_attemptUpdate :
3068       if (!VM_Version::supports_cx8()) return false;
3069       if (!InlineAtomicLong) return false;
3070       preserves_state = true;
3071       break;
3072 
3073     // Use special nodes for Unsafe instructions so we can more easily
3074     // perform an address-mode optimization on the raw variants
3075     case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT,  false);
3076     case vmIntrinsics::_getBoolean: return append_unsafe_get_obj(callee, T_BOOLEAN, false);
3077     case vmIntrinsics::_getByte   : return append_unsafe_get_obj(callee, T_BYTE,    false);
3078     case vmIntrinsics::_getShort  : return append_unsafe_get_obj(callee, T_SHORT,   false);
3079     case vmIntrinsics::_getChar   : return append_unsafe_get_obj(callee, T_CHAR,    false);
3080     case vmIntrinsics::_getInt    : return append_unsafe_get_obj(callee, T_INT,     false);
3081     case vmIntrinsics::_getLong   : return append_unsafe_get_obj(callee, T_LONG,    false);
3082     case vmIntrinsics::_getFloat  : return append_unsafe_get_obj(callee, T_FLOAT,   false);
3083     case vmIntrinsics::_getDouble : return append_unsafe_get_obj(callee, T_DOUBLE,  false);
3084 
3085     case vmIntrinsics::_putObject : return append_unsafe_put_obj(callee, T_OBJECT,  false);
3086     case vmIntrinsics::_putBoolean: return append_unsafe_put_obj(callee, T_BOOLEAN, false);
3087     case vmIntrinsics::_putByte   : return append_unsafe_put_obj(callee, T_BYTE,    false);
3088     case vmIntrinsics::_putShort  : return append_unsafe_put_obj(callee, T_SHORT,   false);
3089     case vmIntrinsics::_putChar   : return append_unsafe_put_obj(callee, T_CHAR,    false);
3090     case vmIntrinsics::_putInt    : return append_unsafe_put_obj(callee, T_INT,     false);
3091     case vmIntrinsics::_putLong   : return append_unsafe_put_obj(callee, T_LONG,    false);
3092     case vmIntrinsics::_putFloat  : return append_unsafe_put_obj(callee, T_FLOAT,   false);
3093     case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE,  false);
3094 
3095     case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT,  true);
3096     case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
3097     case vmIntrinsics::_getByteVolatile   : return append_unsafe_get_obj(callee, T_BYTE,    true);
3098     case vmIntrinsics::_getShortVolatile  : return append_unsafe_get_obj(callee, T_SHORT,   true);
3099     case vmIntrinsics::_getCharVolatile   : return append_unsafe_get_obj(callee, T_CHAR,    true);
3100     case vmIntrinsics::_getIntVolatile    : return append_unsafe_get_obj(callee, T_INT,     true);
3101     case vmIntrinsics::_getLongVolatile   : return append_unsafe_get_obj(callee, T_LONG,    true);
3102     case vmIntrinsics::_getFloatVolatile  : return append_unsafe_get_obj(callee, T_FLOAT,   true);
3103     case vmIntrinsics::_getDoubleVolatile : return append_unsafe_get_obj(callee, T_DOUBLE,  true);
3104 
3105     case vmIntrinsics::_putObjectVolatile : return append_unsafe_put_obj(callee, T_OBJECT,  true);
3106     case vmIntrinsics::_putBooleanVolatile: return append_unsafe_put_obj(callee, T_BOOLEAN, true);
3107     case vmIntrinsics::_putByteVolatile   : return append_unsafe_put_obj(callee, T_BYTE,    true);
3108     case vmIntrinsics::_putShortVolatile  : return append_unsafe_put_obj(callee, T_SHORT,   true);
3109     case vmIntrinsics::_putCharVolatile   : return append_unsafe_put_obj(callee, T_CHAR,    true);
3110     case vmIntrinsics::_putIntVolatile    : return append_unsafe_put_obj(callee, T_INT,     true);
3111     case vmIntrinsics::_putLongVolatile   : return append_unsafe_put_obj(callee, T_LONG,    true);
3112     case vmIntrinsics::_putFloatVolatile  : return append_unsafe_put_obj(callee, T_FLOAT,   true);
3113     case vmIntrinsics::_putDoubleVolatile : return append_unsafe_put_obj(callee, T_DOUBLE,  true);
3114 
3115     case vmIntrinsics::_getByte_raw   : return append_unsafe_get_raw(callee, T_BYTE);
3116     case vmIntrinsics::_getShort_raw  : return append_unsafe_get_raw(callee, T_SHORT);
3117     case vmIntrinsics::_getChar_raw   : return append_unsafe_get_raw(callee, T_CHAR);
3118     case vmIntrinsics::_getInt_raw    : return append_unsafe_get_raw(callee, T_INT);
3119     case vmIntrinsics::_getLong_raw   : return append_unsafe_get_raw(callee, T_LONG);
3120     case vmIntrinsics::_getFloat_raw  : return append_unsafe_get_raw(callee, T_FLOAT);
3121     case vmIntrinsics::_getDouble_raw : return append_unsafe_get_raw(callee, T_DOUBLE);
3122 
3123     case vmIntrinsics::_putByte_raw   : return append_unsafe_put_raw(callee, T_BYTE);
3124     case vmIntrinsics::_putShort_raw  : return append_unsafe_put_raw(callee, T_SHORT);
3125     case vmIntrinsics::_putChar_raw   : return append_unsafe_put_raw(callee, T_CHAR);
3126     case vmIntrinsics::_putInt_raw    : return append_unsafe_put_raw(callee, T_INT);
3127     case vmIntrinsics::_putLong_raw   : return append_unsafe_put_raw(callee, T_LONG);
3128     case vmIntrinsics::_putFloat_raw  : return append_unsafe_put_raw(callee, T_FLOAT);
3129     case vmIntrinsics::_putDouble_raw : return append_unsafe_put_raw(callee, T_DOUBLE);
3130 
3131     case vmIntrinsics::_prefetchRead        : return append_unsafe_prefetch(callee, false, false);
3132     case vmIntrinsics::_prefetchWrite       : return append_unsafe_prefetch(callee, false, true);
3133     case vmIntrinsics::_prefetchReadStatic  : return append_unsafe_prefetch(callee, true,  false);
3134     case vmIntrinsics::_prefetchWriteStatic : return append_unsafe_prefetch(callee, true,  true);
3135 
3136     case vmIntrinsics::_checkIndex    :
3137       if (!InlineNIOCheckIndex) return false;
3138       preserves_state = true;
3139       break;
3140     case vmIntrinsics::_putOrderedObject : return append_unsafe_put_obj(callee, T_OBJECT,  true);
3141     case vmIntrinsics::_putOrderedInt    : return append_unsafe_put_obj(callee, T_INT,     true);
3142     case vmIntrinsics::_putOrderedLong   : return append_unsafe_put_obj(callee, T_LONG,    true);
3143 
3144     case vmIntrinsics::_compareAndSwapLong:
3145       if (!VM_Version::supports_cx8()) return false;
3146       // fall through
3147     case vmIntrinsics::_compareAndSwapInt:
3148     case vmIntrinsics::_compareAndSwapObject:
3149       append_unsafe_CAS(callee);
3150       return true;
3151 
3152     default                       : return false; // do not inline
3153   }
3154   // create intrinsic node
3155   const bool has_receiver = !callee->is_static();
3156   ValueType* result_type = as_ValueType(callee->return_type());
3157   ValueStack* state_before = copy_state_for_exception();
3158 
3159   Values* args = state()->pop_arguments(callee->arg_size());
3160 
3161   if (is_profiling()) {
3162     // Don't profile in the special case where the root method
3163     // is the intrinsic
3164     if (callee != method()) {
3165       // Note that we'd collect profile data in this method if we wanted it.
3166       compilation()->set_would_profile(true);
3167       if (profile_calls()) {
3168         Value recv = NULL;
3169         if (has_receiver) {
3170           recv = args->at(0);
3171           null_check(recv);
3172         }
3173         profile_call(recv, NULL);
3174       }
3175     }
3176   }
3177 
3178   Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, state_before,
3179                                     preserves_state, cantrap);
3180   // append instruction & push result
3181   Value value = append_split(result);
3182   if (result_type != voidType) push(result_type, value);
3183 
3184 #ifndef PRODUCT
3185   // printing
3186   if (PrintInlining) {
3187     print_inline_result(callee, true);
3188   }
3189 #endif
3190 
3191   // done
3192   return true;
3193 }
3194 
3195 
3196 bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) {
3197   // Introduce a new callee continuation point - all Ret instructions
3198   // will be replaced with Gotos to this point.
3199   BlockBegin* cont = block_at(next_bci());
3200   assert(cont != NULL, "continuation must exist (BlockListBuilder starts a new block after a jsr");
3201 
3202   // Note: can not assign state to continuation yet, as we have to
3203   // pick up the state from the Ret instructions.
3204 
3205   // Push callee scope
3206   push_scope_for_jsr(cont, jsr_dest_bci);
3207 
3208   // Temporarily set up bytecode stream so we can append instructions
3209   // (only using the bci of this stream)
3210   scope_data()->set_stream(scope_data()->parent()->stream());
3211 
3212   BlockBegin* jsr_start_block = block_at(jsr_dest_bci);
3213   assert(jsr_start_block != NULL, "jsr start block must exist");
3214   assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet");
3215   Goto* goto_sub = new Goto(jsr_start_block, false);
3216   // Must copy state to avoid wrong sharing when parsing bytecodes
3217   assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block");
3218   jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci));
3219   append(goto_sub);
3220   _block->set_end(goto_sub);
3221   _last = _block = jsr_start_block;
3222 
3223   // Clear out bytecode stream
3224   scope_data()->set_stream(NULL);
3225 
3226   scope_data()->add_to_work_list(jsr_start_block);
3227 
3228   // Ready to resume parsing in subroutine
3229   iterate_all_blocks();
3230 
3231   // If we bailed out during parsing, return immediately (this is bad news)
3232   CHECK_BAILOUT_(false);
3233 
3234   // Detect whether the continuation can actually be reached. If not,
3235   // it has not had state set by the join() operations in
3236   // iterate_bytecodes_for_block()/ret() and we should not touch the
3237   // iteration state. The calling activation of
3238   // iterate_bytecodes_for_block will then complete normally.
3239   if (cont->state() != NULL) {
3240     if (!cont->is_set(BlockBegin::was_visited_flag)) {
3241       // add continuation to work list instead of parsing it immediately
3242       scope_data()->parent()->add_to_work_list(cont);
3243     }
3244   }
3245 
3246   assert(jsr_continuation() == cont, "continuation must not have changed");
3247   assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) ||
3248          jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag),
3249          "continuation can only be visited in case of backward branches");
3250   assert(_last && _last->as_BlockEnd(), "block must have end");
3251 
3252   // continuation is in work list, so end iteration of current block
3253   _skip_block = true;
3254   pop_scope_for_jsr();
3255 
3256   return true;
3257 }
3258 
3259 
3260 // Inline the entry of a synchronized method as a monitor enter and
3261 // register the exception handler which releases the monitor if an
3262 // exception is thrown within the callee. Note that the monitor enter
3263 // cannot throw an exception itself, because the receiver is
3264 // guaranteed to be non-null by the explicit null check at the
3265 // beginning of inlining.
3266 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) {
3267   assert(lock != NULL && sync_handler != NULL, "lock or handler missing");
3268 
3269   monitorenter(lock, SynchronizationEntryBCI);
3270   assert(_last->as_MonitorEnter() != NULL, "monitor enter expected");
3271   _last->set_needs_null_check(false);
3272 
3273   sync_handler->set(BlockBegin::exception_entry_flag);
3274   sync_handler->set(BlockBegin::is_on_work_list_flag);
3275 
3276   ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
3277   XHandler* h = new XHandler(desc);
3278   h->set_entry_block(sync_handler);
3279   scope_data()->xhandlers()->append(h);
3280   scope_data()->set_has_handler();
3281 }
3282 
3283 
3284 // If an exception is thrown and not handled within an inlined
3285 // synchronized method, the monitor must be released before the
3286 // exception is rethrown in the outer scope. Generate the appropriate
3287 // instructions here.
3288 void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) {
3289   BlockBegin* orig_block = _block;
3290   ValueStack* orig_state = _state;
3291   Instruction* orig_last = _last;
3292   _last = _block = sync_handler;
3293   _state = sync_handler->state()->copy();
3294 
3295   assert(sync_handler != NULL, "handler missing");
3296   assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here");
3297 
3298   assert(lock != NULL || default_handler, "lock or handler missing");
3299 
3300   XHandler* h = scope_data()->xhandlers()->remove_last();
3301   assert(h->entry_block() == sync_handler, "corrupt list of handlers");
3302 
3303   block()->set(BlockBegin::was_visited_flag);
3304   Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI);
3305   assert(exception->is_pinned(), "must be");
3306 
3307   int bci = SynchronizationEntryBCI;
3308   if (compilation()->env()->dtrace_method_probes()) {
3309     // Report exit from inline methods.  We don't have a stream here
3310     // so pass an explicit bci of SynchronizationEntryBCI.
3311     Values* args = new Values(1);
3312     args->push(append_with_bci(new Constant(new ObjectConstant(method())), bci));
3313     append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci);
3314   }
3315 
3316   if (lock) {
3317     assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
3318     if (!lock->is_linked()) {
3319       lock = append_with_bci(lock, bci);
3320     }
3321 
3322     // exit the monitor in the context of the synchronized method
3323     monitorexit(lock, bci);
3324 
3325     // exit the context of the synchronized method
3326     if (!default_handler) {
3327       pop_scope();
3328       bci = _state->caller_state()->bci();
3329       _state = _state->caller_state()->copy_for_parsing();
3330     }
3331   }
3332 
3333   // perform the throw as if at the the call site
3334   apush(exception);
3335   throw_op(bci);
3336 
3337   BlockEnd* end = last()->as_BlockEnd();
3338   block()->set_end(end);
3339 
3340   _block = orig_block;
3341   _state = orig_state;
3342   _last = orig_last;
3343 }
3344 
3345 
3346 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
3347   assert(!callee->is_native(), "callee must not be native");
3348   if (count_backedges() && callee->has_loops()) {
3349     INLINE_BAILOUT("too complex for tiered");
3350   }
3351   // first perform tests of things it's not possible to inline
3352   if (callee->has_exception_handlers() &&
3353       !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
3354   if (callee->is_synchronized() &&
3355       !InlineSynchronizedMethods         ) INLINE_BAILOUT("callee is synchronized");
3356   if (!callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet");
3357   if (!callee->has_balanced_monitors())    INLINE_BAILOUT("callee's monitors do not match");
3358 
3359   // Proper inlining of methods with jsrs requires a little more work.
3360   if (callee->has_jsrs()                 ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
3361 
3362   // now perform tests that are based on flag settings
3363   if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("too-deep inlining");
3364   if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
3365   if (callee->code_size() > max_inline_size()                 ) INLINE_BAILOUT("callee is too large");
3366 
3367   // don't inline throwable methods unless the inlining tree is rooted in a throwable class
3368   if (callee->name() == ciSymbol::object_initializer_name() &&
3369       callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
3370     // Throwable constructor call
3371     IRScope* top = scope();
3372     while (top->caller() != NULL) {
3373       top = top->caller();
3374     }
3375     if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) {
3376       INLINE_BAILOUT("don't inline Throwable constructors");
3377     }
3378   }
3379 
3380   // When SSE2 is used on intel, then no special handling is needed
3381   // for strictfp because the enum-constant is fixed at compile time,
3382   // the check for UseSSE2 is needed here
3383   if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) {
3384     INLINE_BAILOUT("caller and callee have different strict fp requirements");
3385   }
3386 
3387   if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) {
3388     INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
3389   }
3390 
3391   if (is_profiling() && !callee->ensure_method_data()) {
3392     INLINE_BAILOUT("mdo allocation failed");
3393   }
3394 #ifndef PRODUCT
3395       // printing
3396   if (PrintInlining) {
3397     print_inline_result(callee, true);
3398   }
3399 #endif
3400 
3401   // NOTE: Bailouts from this point on, which occur at the
3402   // GraphBuilder level, do not cause bailout just of the inlining but
3403   // in fact of the entire compilation.
3404 
3405   BlockBegin* orig_block = block();
3406 
3407   const int args_base = state()->stack_size() - callee->arg_size();
3408   assert(args_base >= 0, "stack underflow during inlining");
3409 
3410   // Insert null check if necessary
3411   Value recv = NULL;
3412   if (code() != Bytecodes::_invokestatic) {
3413     // note: null check must happen even if first instruction of callee does
3414     //       an implicit null check since the callee is in a different scope
3415     //       and we must make sure exception handling does the right thing
3416     assert(!callee->is_static(), "callee must not be static");
3417     assert(callee->arg_size() > 0, "must have at least a receiver");
3418     recv = state()->stack_at(args_base);
3419     null_check(recv);
3420   }
3421 
3422   if (is_profiling()) {
3423     // Note that we'd collect profile data in this method if we wanted it.
3424     // this may be redundant here...
3425     compilation()->set_would_profile(true);
3426 
3427     if (profile_calls()) {
3428       profile_call(recv, holder_known ? callee->holder() : NULL);
3429     }
3430     if (profile_inlined_calls()) {
3431       profile_invocation(callee, copy_state_before());
3432     }
3433   }
3434 
3435   // Introduce a new callee continuation point - if the callee has
3436   // more than one return instruction or the return does not allow
3437   // fall-through of control flow, all return instructions of the
3438   // callee will need to be replaced by Goto's pointing to this
3439   // continuation point.
3440   BlockBegin* cont = block_at(next_bci());
3441   bool continuation_existed = true;
3442   if (cont == NULL) {
3443     cont = new BlockBegin(next_bci());
3444     // low number so that continuation gets parsed as early as possible
3445     cont->set_depth_first_number(0);
3446 #ifndef PRODUCT
3447     if (PrintInitialBlockList) {
3448       tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
3449                     cont->block_id(), cont->bci(), bci());
3450     }
3451 #endif
3452     continuation_existed = false;
3453   }
3454   // Record number of predecessors of continuation block before
3455   // inlining, to detect if inlined method has edges to its
3456   // continuation after inlining.
3457   int continuation_preds = cont->number_of_preds();
3458 
3459   // Push callee scope
3460   push_scope(callee, cont);
3461 
3462   // the BlockListBuilder for the callee could have bailed out
3463   CHECK_BAILOUT_(false);
3464 
3465   // Temporarily set up bytecode stream so we can append instructions
3466   // (only using the bci of this stream)
3467   scope_data()->set_stream(scope_data()->parent()->stream());
3468 
3469   // Pass parameters into callee state: add assignments
3470   // note: this will also ensure that all arguments are computed before being passed
3471   ValueStack* callee_state = state();
3472   ValueStack* caller_state = state()->caller_state();
3473   { int i = args_base;
3474     while (i < caller_state->stack_size()) {
3475       const int par_no = i - args_base;
3476       Value  arg = caller_state->stack_at_inc(i);
3477       // NOTE: take base() of arg->type() to avoid problems storing
3478       // constants
3479       store_local(callee_state, arg, arg->type()->base(), par_no);
3480     }
3481   }
3482 
3483   // Remove args from stack.
3484   // Note that we preserve locals state in case we can use it later
3485   // (see use of pop_scope() below)
3486   caller_state->truncate_stack(args_base);
3487   assert(callee_state->stack_size() == 0, "callee stack must be empty");
3488 
3489   Value lock;
3490   BlockBegin* sync_handler;
3491 
3492   // Inline the locking of the receiver if the callee is synchronized
3493   if (callee->is_synchronized()) {
3494     lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
3495                                : state()->local_at(0);
3496     sync_handler = new BlockBegin(SynchronizationEntryBCI);
3497     inline_sync_entry(lock, sync_handler);
3498   }
3499 
3500   if (compilation()->env()->dtrace_method_probes()) {
3501     Values* args = new Values(1);
3502     args->push(append(new Constant(new ObjectConstant(method()))));
3503     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
3504   }
3505 
3506   BlockBegin* callee_start_block = block_at(0);
3507   if (callee_start_block != NULL) {
3508     assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
3509     Goto* goto_callee = new Goto(callee_start_block, false);
3510     // The state for this goto is in the scope of the callee, so use
3511     // the entry bci for the callee instead of the call site bci.
3512     append_with_bci(goto_callee, 0);
3513     _block->set_end(goto_callee);
3514     callee_start_block->merge(callee_state);
3515 
3516     _last = _block = callee_start_block;
3517 
3518     scope_data()->add_to_work_list(callee_start_block);
3519   }
3520 
3521   // Clear out bytecode stream
3522   scope_data()->set_stream(NULL);
3523 
3524   // Ready to resume parsing in callee (either in the same block we
3525   // were in before or in the callee's start block)
3526   iterate_all_blocks(callee_start_block == NULL);
3527 
3528   // If we bailed out during parsing, return immediately (this is bad news)
3529   if (bailed_out()) return false;
3530 
3531   // iterate_all_blocks theoretically traverses in random order; in
3532   // practice, we have only traversed the continuation if we are
3533   // inlining into a subroutine
3534   assert(continuation_existed ||
3535          !continuation()->is_set(BlockBegin::was_visited_flag),
3536          "continuation should not have been parsed yet if we created it");
3537 
3538   // If we bailed out during parsing, return immediately (this is bad news)
3539   CHECK_BAILOUT_(false);
3540 
3541   // At this point we are almost ready to return and resume parsing of
3542   // the caller back in the GraphBuilder. The only thing we want to do
3543   // first is an optimization: during parsing of the callee we
3544   // generated at least one Goto to the continuation block. If we
3545   // generated exactly one, and if the inlined method spanned exactly
3546   // one block (and we didn't have to Goto its entry), then we snip
3547   // off the Goto to the continuation, allowing control to fall
3548   // through back into the caller block and effectively performing
3549   // block merging. This allows load elimination and CSE to take place
3550   // across multiple callee scopes if they are relatively simple, and
3551   // is currently essential to making inlining profitable.
3552   if (   num_returns() == 1
3553       && block() == orig_block
3554       && block() == inline_cleanup_block()) {
3555     _last = inline_cleanup_return_prev();
3556     _state = inline_cleanup_state();
3557   } else if (continuation_preds == cont->number_of_preds()) {
3558     // Inlining caused that the instructions after the invoke in the
3559     // caller are not reachable any more. So skip filling this block
3560     // with instructions!
3561     assert (cont == continuation(), "");
3562     assert(_last && _last->as_BlockEnd(), "");
3563     _skip_block = true;
3564   } else {
3565     // Resume parsing in continuation block unless it was already parsed.
3566     // Note that if we don't change _last here, iteration in
3567     // iterate_bytecodes_for_block will stop when we return.
3568     if (!continuation()->is_set(BlockBegin::was_visited_flag)) {
3569       // add continuation to work list instead of parsing it immediately
3570       assert(_last && _last->as_BlockEnd(), "");
3571       scope_data()->parent()->add_to_work_list(continuation());
3572       _skip_block = true;
3573     }
3574   }
3575 
3576   // Fill the exception handler for synchronized methods with instructions
3577   if (callee->is_synchronized() && sync_handler->state() != NULL) {
3578     fill_sync_handler(lock, sync_handler);
3579   } else {
3580     pop_scope();
3581   }
3582 
3583   compilation()->notice_inlined_method(callee);
3584 
3585   return true;
3586 }
3587 
3588 
3589 void GraphBuilder::inline_bailout(const char* msg) {
3590   assert(msg != NULL, "inline bailout msg must exist");
3591   _inline_bailout_msg = msg;
3592 }
3593 
3594 
3595 void GraphBuilder::clear_inline_bailout() {
3596   _inline_bailout_msg = NULL;
3597 }
3598 
3599 
3600 void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) {
3601   ScopeData* data = new ScopeData(NULL);
3602   data->set_scope(scope);
3603   data->set_bci2block(bci2block);
3604   _scope_data = data;
3605   _block = start;
3606 }
3607 
3608 
3609 void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) {
3610   IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false);
3611   scope()->add_callee(callee_scope);
3612 
3613   BlockListBuilder blb(compilation(), callee_scope, -1);
3614   CHECK_BAILOUT();
3615 
3616   if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) {
3617     // this scope can be inlined directly into the caller so remove
3618     // the block at bci 0.
3619     blb.bci2block()->at_put(0, NULL);
3620   }
3621 
3622   set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci())));
3623 
3624   ScopeData* data = new ScopeData(scope_data());
3625   data->set_scope(callee_scope);
3626   data->set_bci2block(blb.bci2block());
3627   data->set_continuation(continuation);
3628   _scope_data = data;
3629 }
3630 
3631 
3632 void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) {
3633   ScopeData* data = new ScopeData(scope_data());
3634   data->set_parsing_jsr();
3635   data->set_jsr_entry_bci(jsr_dest_bci);
3636   data->set_jsr_return_address_local(-1);
3637   // Must clone bci2block list as we will be mutating it in order to
3638   // properly clone all blocks in jsr region as well as exception
3639   // handlers containing rets
3640   BlockList* new_bci2block = new BlockList(bci2block()->length());
3641   new_bci2block->push_all(bci2block());
3642   data->set_bci2block(new_bci2block);
3643   data->set_scope(scope());
3644   data->setup_jsr_xhandlers();
3645   data->set_continuation(continuation());
3646   data->set_jsr_continuation(jsr_continuation);
3647   _scope_data = data;
3648 }
3649 
3650 
3651 void GraphBuilder::pop_scope() {
3652   int number_of_locks = scope()->number_of_locks();
3653   _scope_data = scope_data()->parent();
3654   // accumulate minimum number of monitor slots to be reserved
3655   scope()->set_min_number_of_locks(number_of_locks);
3656 }
3657 
3658 
3659 void GraphBuilder::pop_scope_for_jsr() {
3660   _scope_data = scope_data()->parent();
3661 }
3662 
3663 bool GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) {
3664   if (InlineUnsafeOps) {
3665     Values* args = state()->pop_arguments(callee->arg_size());
3666     null_check(args->at(0));
3667     Instruction* offset = args->at(2);
3668 #ifndef _LP64
3669     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
3670 #endif
3671     Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile));
3672     push(op->type(), op);
3673     compilation()->set_has_unsafe_access(true);
3674   }
3675   return InlineUnsafeOps;
3676 }
3677 
3678 
3679 bool GraphBuilder::append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile) {
3680   if (InlineUnsafeOps) {
3681     Values* args = state()->pop_arguments(callee->arg_size());
3682     null_check(args->at(0));
3683     Instruction* offset = args->at(2);
3684 #ifndef _LP64
3685     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
3686 #endif
3687     Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, args->at(3), is_volatile));
3688     compilation()->set_has_unsafe_access(true);
3689     kill_all();
3690   }
3691   return InlineUnsafeOps;
3692 }
3693 
3694 
3695 bool GraphBuilder::append_unsafe_get_raw(ciMethod* callee, BasicType t) {
3696   if (InlineUnsafeOps) {
3697     Values* args = state()->pop_arguments(callee->arg_size());
3698     null_check(args->at(0));
3699     Instruction* op = append(new UnsafeGetRaw(t, args->at(1), false));
3700     push(op->type(), op);
3701     compilation()->set_has_unsafe_access(true);
3702   }
3703   return InlineUnsafeOps;
3704 }
3705 
3706 
3707 bool GraphBuilder::append_unsafe_put_raw(ciMethod* callee, BasicType t) {
3708   if (InlineUnsafeOps) {
3709     Values* args = state()->pop_arguments(callee->arg_size());
3710     null_check(args->at(0));
3711     Instruction* op = append(new UnsafePutRaw(t, args->at(1), args->at(2)));
3712     compilation()->set_has_unsafe_access(true);
3713   }
3714   return InlineUnsafeOps;
3715 }
3716 
3717 
3718 bool GraphBuilder::append_unsafe_prefetch(ciMethod* callee, bool is_static, bool is_store) {
3719   if (InlineUnsafeOps) {
3720     Values* args = state()->pop_arguments(callee->arg_size());
3721     int obj_arg_index = 1; // Assume non-static case
3722     if (is_static) {
3723       obj_arg_index = 0;
3724     } else {
3725       null_check(args->at(0));
3726     }
3727     Instruction* offset = args->at(obj_arg_index + 1);
3728 #ifndef _LP64
3729     offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
3730 #endif
3731     Instruction* op = is_store ? append(new UnsafePrefetchWrite(args->at(obj_arg_index), offset))
3732                                : append(new UnsafePrefetchRead (args->at(obj_arg_index), offset));
3733     compilation()->set_has_unsafe_access(true);
3734   }
3735   return InlineUnsafeOps;
3736 }
3737 
3738 
3739 void GraphBuilder::append_unsafe_CAS(ciMethod* callee) {
3740   ValueStack* state_before = copy_state_for_exception();
3741   ValueType* result_type = as_ValueType(callee->return_type());
3742   assert(result_type->is_int(), "int result");
3743   Values* args = state()->pop_arguments(callee->arg_size());
3744 
3745   // Pop off some args to speically handle, then push back
3746   Value newval = args->pop();
3747   Value cmpval = args->pop();
3748   Value offset = args->pop();
3749   Value src = args->pop();
3750   Value unsafe_obj = args->pop();
3751 
3752   // Separately handle the unsafe arg. It is not needed for code
3753   // generation, but must be null checked
3754   null_check(unsafe_obj);
3755 
3756 #ifndef _LP64
3757   offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT)));
3758 #endif
3759 
3760   args->push(src);
3761   args->push(offset);
3762   args->push(cmpval);
3763   args->push(newval);
3764 
3765   // An unsafe CAS can alias with other field accesses, but we don't
3766   // know which ones so mark the state as no preserved.  This will
3767   // cause CSE to invalidate memory across it.
3768   bool preserves_state = false;
3769   Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state);
3770   append_split(result);
3771   push(result_type, result);
3772   compilation()->set_has_unsafe_access(true);
3773 }
3774 
3775 
3776 #ifndef PRODUCT
3777 void GraphBuilder::print_inline_result(ciMethod* callee, bool res) {
3778   const char sync_char      = callee->is_synchronized()        ? 's' : ' ';
3779   const char exception_char = callee->has_exception_handlers() ? '!' : ' ';
3780   const char monitors_char  = callee->has_monitor_bytecodes()  ? 'm' : ' ';
3781   tty->print("     %c%c%c ", sync_char, exception_char, monitors_char);
3782   for (int i = 0; i < scope()->level(); i++) tty->print("  ");
3783   if (res) {
3784     tty->print("  ");
3785   } else {
3786     tty->print("- ");
3787   }
3788   tty->print("@ %d  ", bci());
3789   callee->print_short_name();
3790   tty->print(" (%d bytes)", callee->code_size());
3791   if (_inline_bailout_msg) {
3792     tty->print("  %s", _inline_bailout_msg);
3793   }
3794   tty->cr();
3795 
3796   if (res && CIPrintMethodCodes) {
3797     callee->print_codes();
3798   }
3799 }
3800 
3801 
3802 void GraphBuilder::print_stats() {
3803   vmap()->print();
3804 }
3805 #endif // PRODUCT
3806 
3807 void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
3808   append(new ProfileCall(method(), bci(), recv, known_holder));
3809 }
3810 
3811 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
3812   append(new ProfileInvoke(callee, state));
3813 }