1 /*
   2  * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "interpreter/linkResolver.hpp"
  31 #include "memory/universe.inline.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/divnode.hpp"
  37 #include "opto/idealGraphPrinter.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/memnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/opaquenode.hpp"
  42 #include "opto/parse.hpp"
  43 #include "opto/runtime.hpp"
  44 #include "runtime/deoptimization.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 
  47 extern int explicit_null_checks_inserted,
  48            explicit_null_checks_elided;
  49 
  50 //---------------------------------array_load----------------------------------
  51 void Parse::array_load(BasicType elem_type) {
  52   const Type* elem = Type::TOP;
  53   Node* adr = array_addressing(elem_type, 0, &elem);
  54   if (stopped())  return;     // guaranteed null or range check
  55   dec_sp(2);                  // Pop array and index
  56   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
  57   Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered);
  58   push(ld);
  59 }
  60 
  61 
  62 //--------------------------------array_store----------------------------------
  63 void Parse::array_store(BasicType elem_type) {
  64   Node* adr = array_addressing(elem_type, 1);
  65   if (stopped())  return;     // guaranteed null or range check
  66   Node* val = pop();
  67   dec_sp(2);                  // Pop array and index
  68   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
  69   store_to_memory(control(), adr, val, elem_type, adr_type, StoreNode::release_if_reference(elem_type));
  70 }
  71 
  72 
  73 //------------------------------array_addressing-------------------------------
  74 // Pull array and index from the stack.  Compute pointer-to-element.
  75 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
  76   Node *idx   = peek(0+vals);   // Get from stack without popping
  77   Node *ary   = peek(1+vals);   // in case of exception
  78 
  79   // Null check the array base, with correct stack contents
  80   ary = null_check(ary, T_ARRAY);
  81   // Compile-time detect of null-exception?
  82   if (stopped())  return top();
  83 
  84   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
  85   const TypeInt*    sizetype = arytype->size();
  86   const Type*       elemtype = arytype->elem();
  87 
  88   if (UseUniqueSubclasses && result2 != NULL) {
  89     const Type* el = elemtype->make_ptr();
  90     if (el && el->isa_instptr()) {
  91       const TypeInstPtr* toop = el->is_instptr();
  92       if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
  93         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
  94         const Type* subklass = Type::get_const_type(toop->klass());
  95         elemtype = subklass->join_speculative(el);
  96       }
  97     }
  98   }
  99 
 100   // Check for big class initializers with all constant offsets
 101   // feeding into a known-size array.
 102   const TypeInt* idxtype = _gvn.type(idx)->is_int();
 103   // See if the highest idx value is less than the lowest array bound,
 104   // and if the idx value cannot be negative:
 105   bool need_range_check = true;
 106   if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
 107     need_range_check = false;
 108     if (C->log() != NULL)   C->log()->elem("observe that='!need_range_check'");
 109   }
 110 
 111   ciKlass * arytype_klass = arytype->klass();
 112   if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) {
 113     // Only fails for some -Xcomp runs
 114     // The class is unloaded.  We have to run this bytecode in the interpreter.
 115     uncommon_trap(Deoptimization::Reason_unloaded,
 116                   Deoptimization::Action_reinterpret,
 117                   arytype->klass(), "!loaded array");
 118     return top();
 119   }
 120 
 121   // Do the range check
 122   if (GenerateRangeChecks && need_range_check) {
 123     Node* tst;
 124     if (sizetype->_hi <= 0) {
 125       // The greatest array bound is negative, so we can conclude that we're
 126       // compiling unreachable code, but the unsigned compare trick used below
 127       // only works with non-negative lengths.  Instead, hack "tst" to be zero so
 128       // the uncommon_trap path will always be taken.
 129       tst = _gvn.intcon(0);
 130     } else {
 131       // Range is constant in array-oop, so we can use the original state of mem
 132       Node* len = load_array_length(ary);
 133 
 134       // Test length vs index (standard trick using unsigned compare)
 135       Node* chk = _gvn.transform( new CmpUNode(idx, len) );
 136       BoolTest::mask btest = BoolTest::lt;
 137       tst = _gvn.transform( new BoolNode(chk, btest) );
 138     }
 139     // Branch to failure if out of bounds
 140     { BuildCutout unless(this, tst, PROB_MAX);
 141       if (C->allow_range_check_smearing()) {
 142         // Do not use builtin_throw, since range checks are sometimes
 143         // made more stringent by an optimistic transformation.
 144         // This creates "tentative" range checks at this point,
 145         // which are not guaranteed to throw exceptions.
 146         // See IfNode::Ideal, is_range_check, adjust_check.
 147         uncommon_trap(Deoptimization::Reason_range_check,
 148                       Deoptimization::Action_make_not_entrant,
 149                       NULL, "range_check");
 150       } else {
 151         // If we have already recompiled with the range-check-widening
 152         // heroic optimization turned off, then we must really be throwing
 153         // range check exceptions.
 154         builtin_throw(Deoptimization::Reason_range_check, idx);
 155       }
 156     }
 157   }
 158   // Check for always knowing you are throwing a range-check exception
 159   if (stopped())  return top();
 160 
 161   Node* ptr = array_element_address(ary, idx, type, sizetype);
 162 
 163   if (result2 != NULL)  *result2 = elemtype;
 164 
 165   assert(ptr != top(), "top should go hand-in-hand with stopped");
 166 
 167   return ptr;
 168 }
 169 
 170 
 171 // returns IfNode
 172 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
 173   Node   *cmp = _gvn.transform( new CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 174   Node   *tst = _gvn.transform( new BoolNode( cmp, mask));
 175   IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
 176   return iff;
 177 }
 178 
 179 // return Region node
 180 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
 181   Node *region  = new RegionNode(3); // 2 results
 182   record_for_igvn(region);
 183   region->init_req(1, iffalse);
 184   region->init_req(2, iftrue );
 185   _gvn.set_type(region, Type::CONTROL);
 186   region = _gvn.transform(region);
 187   set_control (region);
 188   return region;
 189 }
 190 
 191 
 192 //------------------------------helper for tableswitch-------------------------
 193 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
 194   // True branch, use existing map info
 195   { PreserveJVMState pjvms(this);
 196     Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
 197     set_control( iftrue );
 198     profile_switch_case(prof_table_index);
 199     merge_new_path(dest_bci_if_true);
 200   }
 201 
 202   // False branch
 203   Node *iffalse = _gvn.transform( new IfFalseNode(iff) );
 204   set_control( iffalse );
 205 }
 206 
 207 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
 208   // True branch, use existing map info
 209   { PreserveJVMState pjvms(this);
 210     Node *iffalse  = _gvn.transform( new IfFalseNode (iff) );
 211     set_control( iffalse );
 212     profile_switch_case(prof_table_index);
 213     merge_new_path(dest_bci_if_true);
 214   }
 215 
 216   // False branch
 217   Node *iftrue = _gvn.transform( new IfTrueNode(iff) );
 218   set_control( iftrue );
 219 }
 220 
 221 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
 222   // False branch, use existing map and control()
 223   profile_switch_case(prof_table_index);
 224   merge_new_path(dest_bci);
 225 }
 226 
 227 
 228 extern "C" {
 229   static int jint_cmp(const void *i, const void *j) {
 230     int a = *(jint *)i;
 231     int b = *(jint *)j;
 232     return a > b ? 1 : a < b ? -1 : 0;
 233   }
 234 }
 235 
 236 
 237 // Default value for methodData switch indexing. Must be a negative value to avoid
 238 // conflict with any legal switch index.
 239 #define NullTableIndex -1
 240 
 241 class SwitchRange : public StackObj {
 242   // a range of integers coupled with a bci destination
 243   jint _lo;                     // inclusive lower limit
 244   jint _hi;                     // inclusive upper limit
 245   int _dest;
 246   int _table_index;             // index into method data table
 247 
 248 public:
 249   jint lo() const              { return _lo;   }
 250   jint hi() const              { return _hi;   }
 251   int  dest() const            { return _dest; }
 252   int  table_index() const     { return _table_index; }
 253   bool is_singleton() const    { return _lo == _hi; }
 254 
 255   void setRange(jint lo, jint hi, int dest, int table_index) {
 256     assert(lo <= hi, "must be a non-empty range");
 257     _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
 258   }
 259   bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
 260     assert(lo <= hi, "must be a non-empty range");
 261     if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
 262       _hi = hi;
 263       return true;
 264     }
 265     return false;
 266   }
 267 
 268   void set (jint value, int dest, int table_index) {
 269     setRange(value, value, dest, table_index);
 270   }
 271   bool adjoin(jint value, int dest, int table_index) {
 272     return adjoinRange(value, value, dest, table_index);
 273   }
 274 
 275   void print() {
 276     if (is_singleton())
 277       tty->print(" {%d}=>%d", lo(), dest());
 278     else if (lo() == min_jint)
 279       tty->print(" {..%d}=>%d", hi(), dest());
 280     else if (hi() == max_jint)
 281       tty->print(" {%d..}=>%d", lo(), dest());
 282     else
 283       tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
 284   }
 285 };
 286 
 287 
 288 //-------------------------------do_tableswitch--------------------------------
 289 void Parse::do_tableswitch() {
 290   Node* lookup = pop();
 291 
 292   // Get information about tableswitch
 293   int default_dest = iter().get_dest_table(0);
 294   int lo_index     = iter().get_int_table(1);
 295   int hi_index     = iter().get_int_table(2);
 296   int len          = hi_index - lo_index + 1;
 297 
 298   if (len < 1) {
 299     // If this is a backward branch, add safepoint
 300     maybe_add_safepoint(default_dest);
 301     merge(default_dest);
 302     return;
 303   }
 304 
 305   // generate decision tree, using trichotomy when possible
 306   int rnum = len+2;
 307   bool makes_backward_branch = false;
 308   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 309   int rp = -1;
 310   if (lo_index != min_jint) {
 311     ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
 312   }
 313   for (int j = 0; j < len; j++) {
 314     jint match_int = lo_index+j;
 315     int  dest      = iter().get_dest_table(j+3);
 316     makes_backward_branch |= (dest <= bci());
 317     int  table_index = method_data_update() ? j : NullTableIndex;
 318     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
 319       ranges[++rp].set(match_int, dest, table_index);
 320     }
 321   }
 322   jint highest = lo_index+(len-1);
 323   assert(ranges[rp].hi() == highest, "");
 324   if (highest != max_jint
 325       && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
 326     ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
 327   }
 328   assert(rp < len+2, "not too many ranges");
 329 
 330   // Safepoint in case if backward branch observed
 331   if( makes_backward_branch && UseLoopSafepoints )
 332     add_safepoint();
 333 
 334   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 335 }
 336 
 337 
 338 //------------------------------do_lookupswitch--------------------------------
 339 void Parse::do_lookupswitch() {
 340   Node *lookup = pop();         // lookup value
 341   // Get information about lookupswitch
 342   int default_dest = iter().get_dest_table(0);
 343   int len          = iter().get_int_table(1);
 344 
 345   if (len < 1) {    // If this is a backward branch, add safepoint
 346     maybe_add_safepoint(default_dest);
 347     merge(default_dest);
 348     return;
 349   }
 350 
 351   // generate decision tree, using trichotomy when possible
 352   jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
 353   {
 354     for( int j = 0; j < len; j++ ) {
 355       table[j+j+0] = iter().get_int_table(2+j+j);
 356       table[j+j+1] = iter().get_dest_table(2+j+j+1);
 357     }
 358     qsort( table, len, 2*sizeof(table[0]), jint_cmp );
 359   }
 360 
 361   int rnum = len*2+1;
 362   bool makes_backward_branch = false;
 363   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 364   int rp = -1;
 365   for( int j = 0; j < len; j++ ) {
 366     jint match_int   = table[j+j+0];
 367     int  dest        = table[j+j+1];
 368     int  next_lo     = rp < 0 ? min_jint : ranges[rp].hi()+1;
 369     int  table_index = method_data_update() ? j : NullTableIndex;
 370     makes_backward_branch |= (dest <= bci());
 371     if( match_int != next_lo ) {
 372       ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
 373     }
 374     if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
 375       ranges[++rp].set(match_int, dest, table_index);
 376     }
 377   }
 378   jint highest = table[2*(len-1)];
 379   assert(ranges[rp].hi() == highest, "");
 380   if( highest != max_jint
 381       && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
 382     ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
 383   }
 384   assert(rp < rnum, "not too many ranges");
 385 
 386   // Safepoint in case backward branch observed
 387   if( makes_backward_branch && UseLoopSafepoints )
 388     add_safepoint();
 389 
 390   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 391 }
 392 
 393 //----------------------------create_jump_tables-------------------------------
 394 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
 395   // Are jumptables enabled
 396   if (!UseJumpTables)  return false;
 397 
 398   // Are jumptables supported
 399   if (!Matcher::has_match_rule(Op_Jump))  return false;
 400 
 401   // Don't make jump table if profiling
 402   if (method_data_update())  return false;
 403 
 404   // Decide if a guard is needed to lop off big ranges at either (or
 405   // both) end(s) of the input set. We'll call this the default target
 406   // even though we can't be sure that it is the true "default".
 407 
 408   bool needs_guard = false;
 409   int default_dest;
 410   int64_t total_outlier_size = 0;
 411   int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
 412   int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
 413 
 414   if (lo->dest() == hi->dest()) {
 415     total_outlier_size = hi_size + lo_size;
 416     default_dest = lo->dest();
 417   } else if (lo_size > hi_size) {
 418     total_outlier_size = lo_size;
 419     default_dest = lo->dest();
 420   } else {
 421     total_outlier_size = hi_size;
 422     default_dest = hi->dest();
 423   }
 424 
 425   // If a guard test will eliminate very sparse end ranges, then
 426   // it is worth the cost of an extra jump.
 427   if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
 428     needs_guard = true;
 429     if (default_dest == lo->dest()) lo++;
 430     if (default_dest == hi->dest()) hi--;
 431   }
 432 
 433   // Find the total number of cases and ranges
 434   int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1;
 435   int num_range = hi - lo + 1;
 436 
 437   // Don't create table if: too large, too small, or too sparse.
 438   if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
 439     return false;
 440   if (num_cases > (MaxJumpTableSparseness * num_range))
 441     return false;
 442 
 443   // Normalize table lookups to zero
 444   int lowval = lo->lo();
 445   key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) );
 446 
 447   // Generate a guard to protect against input keyvals that aren't
 448   // in the switch domain.
 449   if (needs_guard) {
 450     Node*   size = _gvn.intcon(num_cases);
 451     Node*   cmp = _gvn.transform( new CmpUNode(key_val, size) );
 452     Node*   tst = _gvn.transform( new BoolNode(cmp, BoolTest::ge) );
 453     IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
 454     jump_if_true_fork(iff, default_dest, NullTableIndex);
 455   }
 456 
 457   // Create an ideal node JumpTable that has projections
 458   // of all possible ranges for a switch statement
 459   // The key_val input must be converted to a pointer offset and scaled.
 460   // Compare Parse::array_addressing above.
 461 #ifdef _LP64
 462   // Clean the 32-bit int into a real 64-bit offset.
 463   // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
 464   const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
 465   key_val       = _gvn.transform( new ConvI2LNode(key_val, lkeytype) );
 466 #endif
 467   // Shift the value by wordsize so we have an index into the table, rather
 468   // than a switch value
 469   Node *shiftWord = _gvn.MakeConX(wordSize);
 470   key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
 471 
 472   // Create the JumpNode
 473   Node* jtn = _gvn.transform( new JumpNode(control(), key_val, num_cases) );
 474 
 475   // These are the switch destinations hanging off the jumpnode
 476   int i = 0;
 477   for (SwitchRange* r = lo; r <= hi; r++) {
 478     for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
 479       Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
 480       {
 481         PreserveJVMState pjvms(this);
 482         set_control(input);
 483         jump_if_always_fork(r->dest(), r->table_index());
 484       }
 485     }
 486   }
 487   assert(i == num_cases, "miscount of cases");
 488   stop_and_kill_map();  // no more uses for this JVMS
 489   return true;
 490 }
 491 
 492 //----------------------------jump_switch_ranges-------------------------------
 493 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
 494   Block* switch_block = block();
 495 
 496   if (switch_depth == 0) {
 497     // Do special processing for the top-level call.
 498     assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
 499     assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
 500 
 501     // Decrement pred-numbers for the unique set of nodes.
 502 #ifdef ASSERT
 503     // Ensure that the block's successors are a (duplicate-free) set.
 504     int successors_counted = 0;  // block occurrences in [hi..lo]
 505     int unique_successors = switch_block->num_successors();
 506     for (int i = 0; i < unique_successors; i++) {
 507       Block* target = switch_block->successor_at(i);
 508 
 509       // Check that the set of successors is the same in both places.
 510       int successors_found = 0;
 511       for (SwitchRange* p = lo; p <= hi; p++) {
 512         if (p->dest() == target->start())  successors_found++;
 513       }
 514       assert(successors_found > 0, "successor must be known");
 515       successors_counted += successors_found;
 516     }
 517     assert(successors_counted == (hi-lo)+1, "no unexpected successors");
 518 #endif
 519 
 520     // Maybe prune the inputs, based on the type of key_val.
 521     jint min_val = min_jint;
 522     jint max_val = max_jint;
 523     const TypeInt* ti = key_val->bottom_type()->isa_int();
 524     if (ti != NULL) {
 525       min_val = ti->_lo;
 526       max_val = ti->_hi;
 527       assert(min_val <= max_val, "invalid int type");
 528     }
 529     while (lo->hi() < min_val)  lo++;
 530     if (lo->lo() < min_val)  lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
 531     while (hi->lo() > max_val)  hi--;
 532     if (hi->hi() > max_val)  hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
 533   }
 534 
 535 #ifndef PRODUCT
 536   if (switch_depth == 0) {
 537     _max_switch_depth = 0;
 538     _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
 539   }
 540 #endif
 541 
 542   assert(lo <= hi, "must be a non-empty set of ranges");
 543   if (lo == hi) {
 544     jump_if_always_fork(lo->dest(), lo->table_index());
 545   } else {
 546     assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
 547     assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
 548 
 549     if (create_jump_tables(key_val, lo, hi)) return;
 550 
 551     int nr = hi - lo + 1;
 552 
 553     SwitchRange* mid = lo + nr/2;
 554     // if there is an easy choice, pivot at a singleton:
 555     if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;
 556 
 557     assert(lo < mid && mid <= hi, "good pivot choice");
 558     assert(nr != 2 || mid == hi,   "should pick higher of 2");
 559     assert(nr != 3 || mid == hi-1, "should pick middle of 3");
 560 
 561     Node *test_val = _gvn.intcon(mid->lo());
 562 
 563     if (mid->is_singleton()) {
 564       IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
 565       jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
 566 
 567       // Special Case:  If there are exactly three ranges, and the high
 568       // and low range each go to the same place, omit the "gt" test,
 569       // since it will not discriminate anything.
 570       bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
 571       if (eq_test_only) {
 572         assert(mid == hi-1, "");
 573       }
 574 
 575       // if there is a higher range, test for it and process it:
 576       if (mid < hi && !eq_test_only) {
 577         // two comparisons of same values--should enable 1 test for 2 branches
 578         // Use BoolTest::le instead of BoolTest::gt
 579         IfNode *iff_le  = jump_if_fork_int(key_val, test_val, BoolTest::le);
 580         Node   *iftrue  = _gvn.transform( new IfTrueNode(iff_le) );
 581         Node   *iffalse = _gvn.transform( new IfFalseNode(iff_le) );
 582         { PreserveJVMState pjvms(this);
 583           set_control(iffalse);
 584           jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
 585         }
 586         set_control(iftrue);
 587       }
 588 
 589     } else {
 590       // mid is a range, not a singleton, so treat mid..hi as a unit
 591       IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
 592 
 593       // if there is a higher range, test for it and process it:
 594       if (mid == hi) {
 595         jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
 596       } else {
 597         Node *iftrue  = _gvn.transform( new IfTrueNode(iff_ge) );
 598         Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
 599         { PreserveJVMState pjvms(this);
 600           set_control(iftrue);
 601           jump_switch_ranges(key_val, mid, hi, switch_depth+1);
 602         }
 603         set_control(iffalse);
 604       }
 605     }
 606 
 607     // in any case, process the lower range
 608     jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
 609   }
 610 
 611   // Decrease pred_count for each successor after all is done.
 612   if (switch_depth == 0) {
 613     int unique_successors = switch_block->num_successors();
 614     for (int i = 0; i < unique_successors; i++) {
 615       Block* target = switch_block->successor_at(i);
 616       // Throw away the pre-allocated path for each unique successor.
 617       target->next_path_num();
 618     }
 619   }
 620 
 621 #ifndef PRODUCT
 622   _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
 623   if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
 624     SwitchRange* r;
 625     int nsing = 0;
 626     for( r = lo; r <= hi; r++ ) {
 627       if( r->is_singleton() )  nsing++;
 628     }
 629     tty->print(">>> ");
 630     _method->print_short_name();
 631     tty->print_cr(" switch decision tree");
 632     tty->print_cr("    %d ranges (%d singletons), max_depth=%d, est_depth=%d",
 633                   (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
 634     if (_max_switch_depth > _est_switch_depth) {
 635       tty->print_cr("******** BAD SWITCH DEPTH ********");
 636     }
 637     tty->print("   ");
 638     for( r = lo; r <= hi; r++ ) {
 639       r->print();
 640     }
 641     tty->cr();
 642   }
 643 #endif
 644 }
 645 
 646 void Parse::modf() {
 647   Node *f2 = pop();
 648   Node *f1 = pop();
 649   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
 650                               CAST_FROM_FN_PTR(address, SharedRuntime::frem),
 651                               "frem", NULL, //no memory effects
 652                               f1, f2);
 653   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
 654 
 655   push(res);
 656 }
 657 
 658 void Parse::modd() {
 659   Node *d2 = pop_pair();
 660   Node *d1 = pop_pair();
 661   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
 662                               CAST_FROM_FN_PTR(address, SharedRuntime::drem),
 663                               "drem", NULL, //no memory effects
 664                               d1, top(), d2, top());
 665   Node* res_d   = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
 666 
 667 #ifdef ASSERT
 668   Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1));
 669   assert(res_top == top(), "second value must be top");
 670 #endif
 671 
 672   push_pair(res_d);
 673 }
 674 
 675 void Parse::l2f() {
 676   Node* f2 = pop();
 677   Node* f1 = pop();
 678   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
 679                               CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
 680                               "l2f", NULL, //no memory effects
 681                               f1, f2);
 682   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
 683 
 684   push(res);
 685 }
 686 
 687 void Parse::do_irem() {
 688   // Must keep both values on the expression-stack during null-check
 689   zero_check_int(peek());
 690   // Compile-time detect of null-exception?
 691   if (stopped())  return;
 692 
 693   Node* b = pop();
 694   Node* a = pop();
 695 
 696   const Type *t = _gvn.type(b);
 697   if (t != Type::TOP) {
 698     const TypeInt *ti = t->is_int();
 699     if (ti->is_con()) {
 700       int divisor = ti->get_con();
 701       // check for positive power of 2
 702       if (divisor > 0 &&
 703           (divisor & ~(divisor-1)) == divisor) {
 704         // yes !
 705         Node *mask = _gvn.intcon((divisor - 1));
 706         // Sigh, must handle negative dividends
 707         Node *zero = _gvn.intcon(0);
 708         IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
 709         Node *iff = _gvn.transform( new IfFalseNode(ifff) );
 710         Node *ift = _gvn.transform( new IfTrueNode (ifff) );
 711         Node *reg = jump_if_join(ift, iff);
 712         Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
 713         // Negative path; negate/and/negate
 714         Node *neg = _gvn.transform( new SubINode(zero, a) );
 715         Node *andn= _gvn.transform( new AndINode(neg, mask) );
 716         Node *negn= _gvn.transform( new SubINode(zero, andn) );
 717         phi->init_req(1, negn);
 718         // Fast positive case
 719         Node *andx = _gvn.transform( new AndINode(a, mask) );
 720         phi->init_req(2, andx);
 721         // Push the merge
 722         push( _gvn.transform(phi) );
 723         return;
 724       }
 725     }
 726   }
 727   // Default case
 728   push( _gvn.transform( new ModINode(control(),a,b) ) );
 729 }
 730 
 731 // Handle jsr and jsr_w bytecode
 732 void Parse::do_jsr() {
 733   assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
 734 
 735   // Store information about current state, tagged with new _jsr_bci
 736   int return_bci = iter().next_bci();
 737   int jsr_bci    = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
 738 
 739   // Update method data
 740   profile_taken_branch(jsr_bci);
 741 
 742   // The way we do things now, there is only one successor block
 743   // for the jsr, because the target code is cloned by ciTypeFlow.
 744   Block* target = successor_for_bci(jsr_bci);
 745 
 746   // What got pushed?
 747   const Type* ret_addr = target->peek();
 748   assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
 749 
 750   // Effect on jsr on stack
 751   push(_gvn.makecon(ret_addr));
 752 
 753   // Flow to the jsr.
 754   merge(jsr_bci);
 755 }
 756 
 757 // Handle ret bytecode
 758 void Parse::do_ret() {
 759   // Find to whom we return.
 760   assert(block()->num_successors() == 1, "a ret can only go one place now");
 761   Block* target = block()->successor_at(0);
 762   assert(!target->is_ready(), "our arrival must be expected");
 763   profile_ret(target->flow()->start());
 764   int pnum = target->next_path_num();
 765   merge_common(target, pnum);
 766 }
 767 
 768 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
 769   if (btest != BoolTest::eq && btest != BoolTest::ne) {
 770     // Only ::eq and ::ne are supported for profile injection.
 771     return false;
 772   }
 773   if (test->is_Cmp() &&
 774       test->in(1)->Opcode() == Op_ProfileBoolean) {
 775     ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
 776     int false_cnt = profile->false_count();
 777     int  true_cnt = profile->true_count();
 778 
 779     // Counts matching depends on the actual test operation (::eq or ::ne).
 780     // No need to scale the counts because profile injection was designed
 781     // to feed exact counts into VM.
 782     taken     = (btest == BoolTest::eq) ? false_cnt :  true_cnt;
 783     not_taken = (btest == BoolTest::eq) ?  true_cnt : false_cnt;
 784 
 785     profile->consume();
 786     return true;
 787   }
 788   return false;
 789 }
 790 //--------------------------dynamic_branch_prediction--------------------------
 791 // Try to gather dynamic branch prediction behavior.  Return a probability
 792 // of the branch being taken and set the "cnt" field.  Returns a -1.0
 793 // if we need to use static prediction for some reason.
 794 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
 795   ResourceMark rm;
 796 
 797   cnt  = COUNT_UNKNOWN;
 798 
 799   int     taken = 0;
 800   int not_taken = 0;
 801 
 802   bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
 803 
 804   if (use_mdo) {
 805     // Use MethodData information if it is available
 806     // FIXME: free the ProfileData structure
 807     ciMethodData* methodData = method()->method_data();
 808     if (!methodData->is_mature())  return PROB_UNKNOWN;
 809     ciProfileData* data = methodData->bci_to_data(bci());
 810     if (!data->is_JumpData())  return PROB_UNKNOWN;
 811 
 812     // get taken and not taken values
 813     taken = data->as_JumpData()->taken();
 814     not_taken = 0;
 815     if (data->is_BranchData()) {
 816       not_taken = data->as_BranchData()->not_taken();
 817     }
 818 
 819     // scale the counts to be commensurate with invocation counts:
 820     taken = method()->scale_count(taken);
 821     not_taken = method()->scale_count(not_taken);
 822   }
 823 
 824   // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
 825   // We also check that individual counters are positive first, otherwise the sum can become positive.
 826   if (taken < 0 || not_taken < 0 || taken + not_taken < 40) {
 827     if (C->log() != NULL) {
 828       C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
 829     }
 830     return PROB_UNKNOWN;
 831   }
 832 
 833   // Compute frequency that we arrive here
 834   float sum = taken + not_taken;
 835   // Adjust, if this block is a cloned private block but the
 836   // Jump counts are shared.  Taken the private counts for
 837   // just this path instead of the shared counts.
 838   if( block()->count() > 0 )
 839     sum = block()->count();
 840   cnt = sum / FreqCountInvocations;
 841 
 842   // Pin probability to sane limits
 843   float prob;
 844   if( !taken )
 845     prob = (0+PROB_MIN) / 2;
 846   else if( !not_taken )
 847     prob = (1+PROB_MAX) / 2;
 848   else {                         // Compute probability of true path
 849     prob = (float)taken / (float)(taken + not_taken);
 850     if (prob > PROB_MAX)  prob = PROB_MAX;
 851     if (prob < PROB_MIN)   prob = PROB_MIN;
 852   }
 853 
 854   assert((cnt > 0.0f) && (prob > 0.0f),
 855          "Bad frequency assignment in if");
 856 
 857   if (C->log() != NULL) {
 858     const char* prob_str = NULL;
 859     if (prob >= PROB_MAX)  prob_str = (prob == PROB_MAX) ? "max" : "always";
 860     if (prob <= PROB_MIN)  prob_str = (prob == PROB_MIN) ? "min" : "never";
 861     char prob_str_buf[30];
 862     if (prob_str == NULL) {
 863       sprintf(prob_str_buf, "%g", prob);
 864       prob_str = prob_str_buf;
 865     }
 866     C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
 867                    iter().get_dest(), taken, not_taken, cnt, prob_str);
 868   }
 869   return prob;
 870 }
 871 
 872 //-----------------------------branch_prediction-------------------------------
 873 float Parse::branch_prediction(float& cnt,
 874                                BoolTest::mask btest,
 875                                int target_bci,
 876                                Node* test) {
 877   float prob = dynamic_branch_prediction(cnt, btest, test);
 878   // If prob is unknown, switch to static prediction
 879   if (prob != PROB_UNKNOWN)  return prob;
 880 
 881   prob = PROB_FAIR;                   // Set default value
 882   if (btest == BoolTest::eq)          // Exactly equal test?
 883     prob = PROB_STATIC_INFREQUENT;    // Assume its relatively infrequent
 884   else if (btest == BoolTest::ne)
 885     prob = PROB_STATIC_FREQUENT;      // Assume its relatively frequent
 886 
 887   // If this is a conditional test guarding a backwards branch,
 888   // assume its a loop-back edge.  Make it a likely taken branch.
 889   if (target_bci < bci()) {
 890     if (is_osr_parse()) {    // Could be a hot OSR'd loop; force deopt
 891       // Since it's an OSR, we probably have profile data, but since
 892       // branch_prediction returned PROB_UNKNOWN, the counts are too small.
 893       // Let's make a special check here for completely zero counts.
 894       ciMethodData* methodData = method()->method_data();
 895       if (!methodData->is_empty()) {
 896         ciProfileData* data = methodData->bci_to_data(bci());
 897         // Only stop for truly zero counts, which mean an unknown part
 898         // of the OSR-ed method, and we want to deopt to gather more stats.
 899         // If you have ANY counts, then this loop is simply 'cold' relative
 900         // to the OSR loop.
 901         if (data->as_BranchData()->taken() +
 902             data->as_BranchData()->not_taken() == 0 ) {
 903           // This is the only way to return PROB_UNKNOWN:
 904           return PROB_UNKNOWN;
 905         }
 906       }
 907     }
 908     prob = PROB_STATIC_FREQUENT;     // Likely to take backwards branch
 909   }
 910 
 911   assert(prob != PROB_UNKNOWN, "must have some guess at this point");
 912   return prob;
 913 }
 914 
 915 // The magic constants are chosen so as to match the output of
 916 // branch_prediction() when the profile reports a zero taken count.
 917 // It is important to distinguish zero counts unambiguously, because
 918 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
 919 // very small but nonzero probabilities, which if confused with zero
 920 // counts would keep the program recompiling indefinitely.
 921 bool Parse::seems_never_taken(float prob) const {
 922   return prob < PROB_MIN;
 923 }
 924 
 925 // True if the comparison seems to be the kind that will not change its
 926 // statistics from true to false.  See comments in adjust_map_after_if.
 927 // This question is only asked along paths which are already
 928 // classifed as untaken (by seems_never_taken), so really,
 929 // if a path is never taken, its controlling comparison is
 930 // already acting in a stable fashion.  If the comparison
 931 // seems stable, we will put an expensive uncommon trap
 932 // on the untaken path.
 933 bool Parse::seems_stable_comparison() const {
 934   if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) {
 935     return false;
 936   }
 937   return true;
 938 }
 939 
 940 //-------------------------------repush_if_args--------------------------------
 941 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
 942 inline int Parse::repush_if_args() {
 943 #ifndef PRODUCT
 944   if (PrintOpto && WizardMode) {
 945     tty->print("defending against excessive implicit null exceptions on %s @%d in ",
 946                Bytecodes::name(iter().cur_bc()), iter().cur_bci());
 947     method()->print_name(); tty->cr();
 948   }
 949 #endif
 950   int bc_depth = - Bytecodes::depth(iter().cur_bc());
 951   assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
 952   DEBUG_ONLY(sync_jvms());   // argument(n) requires a synced jvms
 953   assert(argument(0) != NULL, "must exist");
 954   assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
 955   inc_sp(bc_depth);
 956   return bc_depth;
 957 }
 958 
 959 //----------------------------------do_ifnull----------------------------------
 960 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
 961   int target_bci = iter().get_dest();
 962 
 963   Block* branch_block = successor_for_bci(target_bci);
 964   Block* next_block   = successor_for_bci(iter().next_bci());
 965 
 966   float cnt;
 967   float prob = branch_prediction(cnt, btest, target_bci, c);
 968   if (prob == PROB_UNKNOWN) {
 969     // (An earlier version of do_ifnull omitted this trap for OSR methods.)
 970 #ifndef PRODUCT
 971     if (PrintOpto && Verbose)
 972       tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
 973 #endif
 974     repush_if_args(); // to gather stats on loop
 975     // We need to mark this branch as taken so that if we recompile we will
 976     // see that it is possible. In the tiered system the interpreter doesn't
 977     // do profiling and by the time we get to the lower tier from the interpreter
 978     // the path may be cold again. Make sure it doesn't look untaken
 979     profile_taken_branch(target_bci, !ProfileInterpreter);
 980     uncommon_trap(Deoptimization::Reason_unreached,
 981                   Deoptimization::Action_reinterpret,
 982                   NULL, "cold");
 983     if (C->eliminate_boxing()) {
 984       // Mark the successor blocks as parsed
 985       branch_block->next_path_num();
 986       next_block->next_path_num();
 987     }
 988     return;
 989   }
 990 
 991   explicit_null_checks_inserted++;
 992 
 993   // Generate real control flow
 994   Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
 995 
 996   // Sanity check the probability value
 997   assert(prob > 0.0f,"Bad probability in Parser");
 998  // Need xform to put node in hash table
 999   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1000   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1001   // True branch
1002   { PreserveJVMState pjvms(this);
1003     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1004     set_control(iftrue);
1005 
1006     if (stopped()) {            // Path is dead?
1007       explicit_null_checks_elided++;
1008       if (C->eliminate_boxing()) {
1009         // Mark the successor block as parsed
1010         branch_block->next_path_num();
1011       }
1012     } else {                    // Path is live.
1013       // Update method data
1014       profile_taken_branch(target_bci);
1015       adjust_map_after_if(btest, c, prob, branch_block, next_block);
1016       if (!stopped()) {
1017         merge(target_bci);
1018       }
1019     }
1020   }
1021 
1022   // False branch
1023   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1024   set_control(iffalse);
1025 
1026   if (stopped()) {              // Path is dead?
1027     explicit_null_checks_elided++;
1028     if (C->eliminate_boxing()) {
1029       // Mark the successor block as parsed
1030       next_block->next_path_num();
1031     }
1032   } else  {                     // Path is live.
1033     // Update method data
1034     profile_not_taken_branch();
1035     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
1036                         next_block, branch_block);
1037   }
1038 }
1039 
1040 //------------------------------------do_if------------------------------------
1041 void Parse::do_if(BoolTest::mask btest, Node* c) {
1042   int target_bci = iter().get_dest();
1043 
1044   Block* branch_block = successor_for_bci(target_bci);
1045   Block* next_block   = successor_for_bci(iter().next_bci());
1046 
1047   float cnt;
1048   float prob = branch_prediction(cnt, btest, target_bci, c);
1049   float untaken_prob = 1.0 - prob;
1050 
1051   if (prob == PROB_UNKNOWN) {
1052 #ifndef PRODUCT
1053     if (PrintOpto && Verbose)
1054       tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
1055 #endif
1056     repush_if_args(); // to gather stats on loop
1057     // We need to mark this branch as taken so that if we recompile we will
1058     // see that it is possible. In the tiered system the interpreter doesn't
1059     // do profiling and by the time we get to the lower tier from the interpreter
1060     // the path may be cold again. Make sure it doesn't look untaken
1061     profile_taken_branch(target_bci, !ProfileInterpreter);
1062     uncommon_trap(Deoptimization::Reason_unreached,
1063                   Deoptimization::Action_reinterpret,
1064                   NULL, "cold");
1065     if (C->eliminate_boxing()) {
1066       // Mark the successor blocks as parsed
1067       branch_block->next_path_num();
1068       next_block->next_path_num();
1069     }
1070     return;
1071   }
1072 
1073   // Sanity check the probability value
1074   assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1075 
1076   bool taken_if_true = true;
1077   // Convert BoolTest to canonical form:
1078   if (!BoolTest(btest).is_canonical()) {
1079     btest         = BoolTest(btest).negate();
1080     taken_if_true = false;
1081     // prob is NOT updated here; it remains the probability of the taken
1082     // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1083   }
1084   assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1085 
1086   Node* tst0 = new BoolNode(c, btest);
1087   Node* tst = _gvn.transform(tst0);
1088   BoolTest::mask taken_btest   = BoolTest::illegal;
1089   BoolTest::mask untaken_btest = BoolTest::illegal;
1090 
1091   if (tst->is_Bool()) {
1092     // Refresh c from the transformed bool node, since it may be
1093     // simpler than the original c.  Also re-canonicalize btest.
1094     // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1095     // That can arise from statements like: if (x instanceof C) ...
1096     if (tst != tst0) {
1097       // Canonicalize one more time since transform can change it.
1098       btest = tst->as_Bool()->_test._test;
1099       if (!BoolTest(btest).is_canonical()) {
1100         // Reverse edges one more time...
1101         tst   = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1102         btest = tst->as_Bool()->_test._test;
1103         assert(BoolTest(btest).is_canonical(), "sanity");
1104         taken_if_true = !taken_if_true;
1105       }
1106       c = tst->in(1);
1107     }
1108     BoolTest::mask neg_btest = BoolTest(btest).negate();
1109     taken_btest   = taken_if_true ?     btest : neg_btest;
1110     untaken_btest = taken_if_true ? neg_btest :     btest;
1111   }
1112 
1113   // Generate real control flow
1114   float true_prob = (taken_if_true ? prob : untaken_prob);
1115   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1116   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1117   Node* taken_branch   = new IfTrueNode(iff);
1118   Node* untaken_branch = new IfFalseNode(iff);
1119   if (!taken_if_true) {  // Finish conversion to canonical form
1120     Node* tmp      = taken_branch;
1121     taken_branch   = untaken_branch;
1122     untaken_branch = tmp;
1123   }
1124 
1125   // Branch is taken:
1126   { PreserveJVMState pjvms(this);
1127     taken_branch = _gvn.transform(taken_branch);
1128     set_control(taken_branch);
1129 
1130     if (stopped()) {
1131       if (C->eliminate_boxing()) {
1132         // Mark the successor block as parsed
1133         branch_block->next_path_num();
1134       }
1135     } else {
1136       // Update method data
1137       profile_taken_branch(target_bci);
1138       adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1139       if (!stopped()) {
1140         merge(target_bci);
1141       }
1142     }
1143   }
1144 
1145   untaken_branch = _gvn.transform(untaken_branch);
1146   set_control(untaken_branch);
1147 
1148   // Branch not taken.
1149   if (stopped()) {
1150     if (C->eliminate_boxing()) {
1151       // Mark the successor block as parsed
1152       next_block->next_path_num();
1153     }
1154   } else {
1155     // Update method data
1156     profile_not_taken_branch();
1157     adjust_map_after_if(untaken_btest, c, untaken_prob,
1158                         next_block, branch_block);
1159   }
1160 }
1161 
1162 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
1163   // Don't want to speculate on uncommon traps when running with -Xcomp
1164   if (!UseInterpreter) {
1165     return false;
1166   }
1167   return (seems_never_taken(prob) && seems_stable_comparison());
1168 }
1169 
1170 //----------------------------adjust_map_after_if------------------------------
1171 // Adjust the JVM state to reflect the result of taking this path.
1172 // Basically, it means inspecting the CmpNode controlling this
1173 // branch, seeing how it constrains a tested value, and then
1174 // deciding if it's worth our while to encode this constraint
1175 // as graph nodes in the current abstract interpretation map.
1176 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1177                                 Block* path, Block* other_path) {
1178   if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1179     return;                             // nothing to do
1180 
1181   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1182 
1183   if (path_is_suitable_for_uncommon_trap(prob)) {
1184     repush_if_args();
1185     uncommon_trap(Deoptimization::Reason_unstable_if,
1186                   Deoptimization::Action_reinterpret,
1187                   NULL,
1188                   (is_fallthrough ? "taken always" : "taken never"));
1189     return;
1190   }
1191 
1192   Node* val = c->in(1);
1193   Node* con = c->in(2);
1194   const Type* tcon = _gvn.type(con);
1195   const Type* tval = _gvn.type(val);
1196   bool have_con = tcon->singleton();
1197   if (tval->singleton()) {
1198     if (!have_con) {
1199       // Swap, so constant is in con.
1200       con  = val;
1201       tcon = tval;
1202       val  = c->in(2);
1203       tval = _gvn.type(val);
1204       btest = BoolTest(btest).commute();
1205       have_con = true;
1206     } else {
1207       // Do we have two constants?  Then leave well enough alone.
1208       have_con = false;
1209     }
1210   }
1211   if (!have_con)                        // remaining adjustments need a con
1212     return;
1213 
1214   sharpen_type_after_if(btest, con, tcon, val, tval);
1215 }
1216 
1217 
1218 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
1219   Node* ldk;
1220   if (n->is_DecodeNKlass()) {
1221     if (n->in(1)->Opcode() != Op_LoadNKlass) {
1222       return NULL;
1223     } else {
1224       ldk = n->in(1);
1225     }
1226   } else if (n->Opcode() != Op_LoadKlass) {
1227     return NULL;
1228   } else {
1229     ldk = n;
1230   }
1231   assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
1232 
1233   Node* adr = ldk->in(MemNode::Address);
1234   intptr_t off = 0;
1235   Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
1236   if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
1237     return NULL;
1238   const TypePtr* tp = gvn->type(obj)->is_ptr();
1239   if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
1240     return NULL;
1241 
1242   return obj;
1243 }
1244 
1245 void Parse::sharpen_type_after_if(BoolTest::mask btest,
1246                                   Node* con, const Type* tcon,
1247                                   Node* val, const Type* tval) {
1248   // Look for opportunities to sharpen the type of a node
1249   // whose klass is compared with a constant klass.
1250   if (btest == BoolTest::eq && tcon->isa_klassptr()) {
1251     Node* obj = extract_obj_from_klass_load(&_gvn, val);
1252     const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
1253     if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
1254        // Found:
1255        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
1256        // or the narrowOop equivalent.
1257        const Type* obj_type = _gvn.type(obj);
1258        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
1259        if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
1260            tboth->higher_equal(obj_type)) {
1261           // obj has to be of the exact type Foo if the CmpP succeeds.
1262           int obj_in_map = map()->find_edge(obj);
1263           JVMState* jvms = this->jvms();
1264           if (obj_in_map >= 0 &&
1265               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1266             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
1267             const Type* tcc = ccast->as_Type()->type();
1268             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
1269             // Delay transform() call to allow recovery of pre-cast value
1270             // at the control merge.
1271             _gvn.set_type_bottom(ccast);
1272             record_for_igvn(ccast);
1273             // Here's the payoff.
1274             replace_in_map(obj, ccast);
1275           }
1276        }
1277     }
1278   }
1279 
1280   int val_in_map = map()->find_edge(val);
1281   if (val_in_map < 0)  return;          // replace_in_map would be useless
1282   {
1283     JVMState* jvms = this->jvms();
1284     if (!(jvms->is_loc(val_in_map) ||
1285           jvms->is_stk(val_in_map)))
1286       return;                           // again, it would be useless
1287   }
1288 
1289   // Check for a comparison to a constant, and "know" that the compared
1290   // value is constrained on this path.
1291   assert(tcon->singleton(), "");
1292   ConstraintCastNode* ccast = NULL;
1293   Node* cast = NULL;
1294 
1295   switch (btest) {
1296   case BoolTest::eq:                    // Constant test?
1297     {
1298       const Type* tboth = tcon->join_speculative(tval);
1299       if (tboth == tval)  break;        // Nothing to gain.
1300       if (tcon->isa_int()) {
1301         ccast = new CastIINode(val, tboth);
1302       } else if (tcon == TypePtr::NULL_PTR) {
1303         // Cast to null, but keep the pointer identity temporarily live.
1304         ccast = new CastPPNode(val, tboth);
1305       } else {
1306         const TypeF* tf = tcon->isa_float_constant();
1307         const TypeD* td = tcon->isa_double_constant();
1308         // Exclude tests vs float/double 0 as these could be
1309         // either +0 or -0.  Just because you are equal to +0
1310         // doesn't mean you ARE +0!
1311         // Note, following code also replaces Long and Oop values.
1312         if ((!tf || tf->_f != 0.0) &&
1313             (!td || td->_d != 0.0))
1314           cast = con;                   // Replace non-constant val by con.
1315       }
1316     }
1317     break;
1318 
1319   case BoolTest::ne:
1320     if (tcon == TypePtr::NULL_PTR) {
1321       cast = cast_not_null(val, false);
1322     }
1323     break;
1324 
1325   default:
1326     // (At this point we could record int range types with CastII.)
1327     break;
1328   }
1329 
1330   if (ccast != NULL) {
1331     const Type* tcc = ccast->as_Type()->type();
1332     assert(tcc != tval && tcc->higher_equal(tval), "must improve");
1333     // Delay transform() call to allow recovery of pre-cast value
1334     // at the control merge.
1335     ccast->set_req(0, control());
1336     _gvn.set_type_bottom(ccast);
1337     record_for_igvn(ccast);
1338     cast = ccast;
1339   }
1340 
1341   if (cast != NULL) {                   // Here's the payoff.
1342     replace_in_map(val, cast);
1343   }
1344 }
1345 
1346 /**
1347  * Use speculative type to optimize CmpP node: if comparison is
1348  * against the low level class, cast the object to the speculative
1349  * type if any. CmpP should then go away.
1350  *
1351  * @param c  expected CmpP node
1352  * @return   result of CmpP on object casted to speculative type
1353  *
1354  */
1355 Node* Parse::optimize_cmp_with_klass(Node* c) {
1356   // If this is transformed by the _gvn to a comparison with the low
1357   // level klass then we may be able to use speculation
1358   if (c->Opcode() == Op_CmpP &&
1359       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1360       c->in(2)->is_Con()) {
1361     Node* load_klass = NULL;
1362     Node* decode = NULL;
1363     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1364       decode = c->in(1);
1365       load_klass = c->in(1)->in(1);
1366     } else {
1367       load_klass = c->in(1);
1368     }
1369     if (load_klass->in(2)->is_AddP()) {
1370       Node* addp = load_klass->in(2);
1371       Node* obj = addp->in(AddPNode::Address);
1372       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1373       if (obj_type->speculative_type_not_null() != NULL) {
1374         ciKlass* k = obj_type->speculative_type();
1375         inc_sp(2);
1376         obj = maybe_cast_profiled_obj(obj, k);
1377         dec_sp(2);
1378         // Make the CmpP use the casted obj
1379         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1380         load_klass = load_klass->clone();
1381         load_klass->set_req(2, addp);
1382         load_klass = _gvn.transform(load_klass);
1383         if (decode != NULL) {
1384           decode = decode->clone();
1385           decode->set_req(1, load_klass);
1386           load_klass = _gvn.transform(decode);
1387         }
1388         c = c->clone();
1389         c->set_req(1, load_klass);
1390         c = _gvn.transform(c);
1391       }
1392     }
1393   }
1394   return c;
1395 }
1396 
1397 //------------------------------do_one_bytecode--------------------------------
1398 // Parse this bytecode, and alter the Parsers JVM->Node mapping
1399 void Parse::do_one_bytecode() {
1400   Node *a, *b, *c, *d;          // Handy temps
1401   BoolTest::mask btest;
1402   int i;
1403 
1404   assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1405 
1406   if (C->check_node_count(NodeLimitFudgeFactor * 5,
1407                           "out of nodes parsing method")) {
1408     return;
1409   }
1410 
1411 #ifdef ASSERT
1412   // for setting breakpoints
1413   if (TraceOptoParse) {
1414     tty->print(" @");
1415     dump_bci(bci());
1416     tty->cr();
1417   }
1418 #endif
1419 
1420   switch (bc()) {
1421   case Bytecodes::_nop:
1422     // do nothing
1423     break;
1424   case Bytecodes::_lconst_0:
1425     push_pair(longcon(0));
1426     break;
1427 
1428   case Bytecodes::_lconst_1:
1429     push_pair(longcon(1));
1430     break;
1431 
1432   case Bytecodes::_fconst_0:
1433     push(zerocon(T_FLOAT));
1434     break;
1435 
1436   case Bytecodes::_fconst_1:
1437     push(makecon(TypeF::ONE));
1438     break;
1439 
1440   case Bytecodes::_fconst_2:
1441     push(makecon(TypeF::make(2.0f)));
1442     break;
1443 
1444   case Bytecodes::_dconst_0:
1445     push_pair(zerocon(T_DOUBLE));
1446     break;
1447 
1448   case Bytecodes::_dconst_1:
1449     push_pair(makecon(TypeD::ONE));
1450     break;
1451 
1452   case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1453   case Bytecodes::_iconst_0: push(intcon( 0)); break;
1454   case Bytecodes::_iconst_1: push(intcon( 1)); break;
1455   case Bytecodes::_iconst_2: push(intcon( 2)); break;
1456   case Bytecodes::_iconst_3: push(intcon( 3)); break;
1457   case Bytecodes::_iconst_4: push(intcon( 4)); break;
1458   case Bytecodes::_iconst_5: push(intcon( 5)); break;
1459   case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
1460   case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
1461   case Bytecodes::_aconst_null: push(null());  break;
1462   case Bytecodes::_ldc:
1463   case Bytecodes::_ldc_w:
1464   case Bytecodes::_ldc2_w:
1465     // If the constant is unresolved, run this BC once in the interpreter.
1466     {
1467       ciConstant constant = iter().get_constant();
1468       if (constant.basic_type() == T_OBJECT &&
1469           !constant.as_object()->is_loaded()) {
1470         int index = iter().get_constant_pool_index();
1471         constantTag tag = iter().get_constant_pool_tag(index);
1472         uncommon_trap(Deoptimization::make_trap_request
1473                       (Deoptimization::Reason_unloaded,
1474                        Deoptimization::Action_reinterpret,
1475                        index),
1476                       NULL, tag.internal_name());
1477         break;
1478       }
1479       assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(),
1480              "must be java_mirror of klass");
1481       const Type* con_type = Type::make_from_constant(constant);
1482       if (con_type != NULL) {
1483         push_node(con_type->basic_type(), makecon(con_type));
1484       }
1485     }
1486 
1487     break;
1488 
1489   case Bytecodes::_aload_0:
1490     push( local(0) );
1491     break;
1492   case Bytecodes::_aload_1:
1493     push( local(1) );
1494     break;
1495   case Bytecodes::_aload_2:
1496     push( local(2) );
1497     break;
1498   case Bytecodes::_aload_3:
1499     push( local(3) );
1500     break;
1501   case Bytecodes::_aload:
1502     push( local(iter().get_index()) );
1503     break;
1504 
1505   case Bytecodes::_fload_0:
1506   case Bytecodes::_iload_0:
1507     push( local(0) );
1508     break;
1509   case Bytecodes::_fload_1:
1510   case Bytecodes::_iload_1:
1511     push( local(1) );
1512     break;
1513   case Bytecodes::_fload_2:
1514   case Bytecodes::_iload_2:
1515     push( local(2) );
1516     break;
1517   case Bytecodes::_fload_3:
1518   case Bytecodes::_iload_3:
1519     push( local(3) );
1520     break;
1521   case Bytecodes::_fload:
1522   case Bytecodes::_iload:
1523     push( local(iter().get_index()) );
1524     break;
1525   case Bytecodes::_lload_0:
1526     push_pair_local( 0 );
1527     break;
1528   case Bytecodes::_lload_1:
1529     push_pair_local( 1 );
1530     break;
1531   case Bytecodes::_lload_2:
1532     push_pair_local( 2 );
1533     break;
1534   case Bytecodes::_lload_3:
1535     push_pair_local( 3 );
1536     break;
1537   case Bytecodes::_lload:
1538     push_pair_local( iter().get_index() );
1539     break;
1540 
1541   case Bytecodes::_dload_0:
1542     push_pair_local(0);
1543     break;
1544   case Bytecodes::_dload_1:
1545     push_pair_local(1);
1546     break;
1547   case Bytecodes::_dload_2:
1548     push_pair_local(2);
1549     break;
1550   case Bytecodes::_dload_3:
1551     push_pair_local(3);
1552     break;
1553   case Bytecodes::_dload:
1554     push_pair_local(iter().get_index());
1555     break;
1556   case Bytecodes::_fstore_0:
1557   case Bytecodes::_istore_0:
1558   case Bytecodes::_astore_0:
1559     set_local( 0, pop() );
1560     break;
1561   case Bytecodes::_fstore_1:
1562   case Bytecodes::_istore_1:
1563   case Bytecodes::_astore_1:
1564     set_local( 1, pop() );
1565     break;
1566   case Bytecodes::_fstore_2:
1567   case Bytecodes::_istore_2:
1568   case Bytecodes::_astore_2:
1569     set_local( 2, pop() );
1570     break;
1571   case Bytecodes::_fstore_3:
1572   case Bytecodes::_istore_3:
1573   case Bytecodes::_astore_3:
1574     set_local( 3, pop() );
1575     break;
1576   case Bytecodes::_fstore:
1577   case Bytecodes::_istore:
1578   case Bytecodes::_astore:
1579     set_local( iter().get_index(), pop() );
1580     break;
1581   // long stores
1582   case Bytecodes::_lstore_0:
1583     set_pair_local( 0, pop_pair() );
1584     break;
1585   case Bytecodes::_lstore_1:
1586     set_pair_local( 1, pop_pair() );
1587     break;
1588   case Bytecodes::_lstore_2:
1589     set_pair_local( 2, pop_pair() );
1590     break;
1591   case Bytecodes::_lstore_3:
1592     set_pair_local( 3, pop_pair() );
1593     break;
1594   case Bytecodes::_lstore:
1595     set_pair_local( iter().get_index(), pop_pair() );
1596     break;
1597 
1598   // double stores
1599   case Bytecodes::_dstore_0:
1600     set_pair_local( 0, dstore_rounding(pop_pair()) );
1601     break;
1602   case Bytecodes::_dstore_1:
1603     set_pair_local( 1, dstore_rounding(pop_pair()) );
1604     break;
1605   case Bytecodes::_dstore_2:
1606     set_pair_local( 2, dstore_rounding(pop_pair()) );
1607     break;
1608   case Bytecodes::_dstore_3:
1609     set_pair_local( 3, dstore_rounding(pop_pair()) );
1610     break;
1611   case Bytecodes::_dstore:
1612     set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1613     break;
1614 
1615   case Bytecodes::_pop:  dec_sp(1);   break;
1616   case Bytecodes::_pop2: dec_sp(2);   break;
1617   case Bytecodes::_swap:
1618     a = pop();
1619     b = pop();
1620     push(a);
1621     push(b);
1622     break;
1623   case Bytecodes::_dup:
1624     a = pop();
1625     push(a);
1626     push(a);
1627     break;
1628   case Bytecodes::_dup_x1:
1629     a = pop();
1630     b = pop();
1631     push( a );
1632     push( b );
1633     push( a );
1634     break;
1635   case Bytecodes::_dup_x2:
1636     a = pop();
1637     b = pop();
1638     c = pop();
1639     push( a );
1640     push( c );
1641     push( b );
1642     push( a );
1643     break;
1644   case Bytecodes::_dup2:
1645     a = pop();
1646     b = pop();
1647     push( b );
1648     push( a );
1649     push( b );
1650     push( a );
1651     break;
1652 
1653   case Bytecodes::_dup2_x1:
1654     // before: .. c, b, a
1655     // after:  .. b, a, c, b, a
1656     // not tested
1657     a = pop();
1658     b = pop();
1659     c = pop();
1660     push( b );
1661     push( a );
1662     push( c );
1663     push( b );
1664     push( a );
1665     break;
1666   case Bytecodes::_dup2_x2:
1667     // before: .. d, c, b, a
1668     // after:  .. b, a, d, c, b, a
1669     // not tested
1670     a = pop();
1671     b = pop();
1672     c = pop();
1673     d = pop();
1674     push( b );
1675     push( a );
1676     push( d );
1677     push( c );
1678     push( b );
1679     push( a );
1680     break;
1681 
1682   case Bytecodes::_arraylength: {
1683     // Must do null-check with value on expression stack
1684     Node *ary = null_check(peek(), T_ARRAY);
1685     // Compile-time detect of null-exception?
1686     if (stopped())  return;
1687     a = pop();
1688     push(load_array_length(a));
1689     break;
1690   }
1691 
1692   case Bytecodes::_baload: array_load(T_BYTE);   break;
1693   case Bytecodes::_caload: array_load(T_CHAR);   break;
1694   case Bytecodes::_iaload: array_load(T_INT);    break;
1695   case Bytecodes::_saload: array_load(T_SHORT);  break;
1696   case Bytecodes::_faload: array_load(T_FLOAT);  break;
1697   case Bytecodes::_aaload: array_load(T_OBJECT); break;
1698   case Bytecodes::_laload: {
1699     a = array_addressing(T_LONG, 0);
1700     if (stopped())  return;     // guaranteed null or range check
1701     dec_sp(2);                  // Pop array and index
1702     push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS, MemNode::unordered));
1703     break;
1704   }
1705   case Bytecodes::_daload: {
1706     a = array_addressing(T_DOUBLE, 0);
1707     if (stopped())  return;     // guaranteed null or range check
1708     dec_sp(2);                  // Pop array and index
1709     push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered));
1710     break;
1711   }
1712   case Bytecodes::_bastore: array_store(T_BYTE);  break;
1713   case Bytecodes::_castore: array_store(T_CHAR);  break;
1714   case Bytecodes::_iastore: array_store(T_INT);   break;
1715   case Bytecodes::_sastore: array_store(T_SHORT); break;
1716   case Bytecodes::_fastore: array_store(T_FLOAT); break;
1717   case Bytecodes::_aastore: {
1718     d = array_addressing(T_OBJECT, 1);
1719     if (stopped())  return;     // guaranteed null or range check
1720     array_store_check();
1721     c = pop();                  // Oop to store
1722     b = pop();                  // index (already used)
1723     a = pop();                  // the array itself
1724     const TypeOopPtr* elemtype  = _gvn.type(a)->is_aryptr()->elem()->make_oopptr();
1725     const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1726     Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT,
1727                                      StoreNode::release_if_reference(T_OBJECT));
1728     break;
1729   }
1730   case Bytecodes::_lastore: {
1731     a = array_addressing(T_LONG, 2);
1732     if (stopped())  return;     // guaranteed null or range check
1733     c = pop_pair();
1734     dec_sp(2);                  // Pop array and index
1735     store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS, MemNode::unordered);
1736     break;
1737   }
1738   case Bytecodes::_dastore: {
1739     a = array_addressing(T_DOUBLE, 2);
1740     if (stopped())  return;     // guaranteed null or range check
1741     c = pop_pair();
1742     dec_sp(2);                  // Pop array and index
1743     c = dstore_rounding(c);
1744     store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered);
1745     break;
1746   }
1747   case Bytecodes::_getfield:
1748     do_getfield();
1749     break;
1750 
1751   case Bytecodes::_getstatic:
1752     do_getstatic();
1753     break;
1754 
1755   case Bytecodes::_putfield:
1756     do_putfield();
1757     break;
1758 
1759   case Bytecodes::_putstatic:
1760     do_putstatic();
1761     break;
1762 
1763   case Bytecodes::_irem:
1764     do_irem();
1765     break;
1766   case Bytecodes::_idiv:
1767     // Must keep both values on the expression-stack during null-check
1768     zero_check_int(peek());
1769     // Compile-time detect of null-exception?
1770     if (stopped())  return;
1771     b = pop();
1772     a = pop();
1773     push( _gvn.transform( new DivINode(control(),a,b) ) );
1774     break;
1775   case Bytecodes::_imul:
1776     b = pop(); a = pop();
1777     push( _gvn.transform( new MulINode(a,b) ) );
1778     break;
1779   case Bytecodes::_iadd:
1780     b = pop(); a = pop();
1781     push( _gvn.transform( new AddINode(a,b) ) );
1782     break;
1783   case Bytecodes::_ineg:
1784     a = pop();
1785     push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) );
1786     break;
1787   case Bytecodes::_isub:
1788     b = pop(); a = pop();
1789     push( _gvn.transform( new SubINode(a,b) ) );
1790     break;
1791   case Bytecodes::_iand:
1792     b = pop(); a = pop();
1793     push( _gvn.transform( new AndINode(a,b) ) );
1794     break;
1795   case Bytecodes::_ior:
1796     b = pop(); a = pop();
1797     push( _gvn.transform( new OrINode(a,b) ) );
1798     break;
1799   case Bytecodes::_ixor:
1800     b = pop(); a = pop();
1801     push( _gvn.transform( new XorINode(a,b) ) );
1802     break;
1803   case Bytecodes::_ishl:
1804     b = pop(); a = pop();
1805     push( _gvn.transform( new LShiftINode(a,b) ) );
1806     break;
1807   case Bytecodes::_ishr:
1808     b = pop(); a = pop();
1809     push( _gvn.transform( new RShiftINode(a,b) ) );
1810     break;
1811   case Bytecodes::_iushr:
1812     b = pop(); a = pop();
1813     push( _gvn.transform( new URShiftINode(a,b) ) );
1814     break;
1815 
1816   case Bytecodes::_fneg:
1817     a = pop();
1818     b = _gvn.transform(new NegFNode (a));
1819     push(b);
1820     break;
1821 
1822   case Bytecodes::_fsub:
1823     b = pop();
1824     a = pop();
1825     c = _gvn.transform( new SubFNode(a,b) );
1826     d = precision_rounding(c);
1827     push( d );
1828     break;
1829 
1830   case Bytecodes::_fadd:
1831     b = pop();
1832     a = pop();
1833     c = _gvn.transform( new AddFNode(a,b) );
1834     d = precision_rounding(c);
1835     push( d );
1836     break;
1837 
1838   case Bytecodes::_fmul:
1839     b = pop();
1840     a = pop();
1841     c = _gvn.transform( new MulFNode(a,b) );
1842     d = precision_rounding(c);
1843     push( d );
1844     break;
1845 
1846   case Bytecodes::_fdiv:
1847     b = pop();
1848     a = pop();
1849     c = _gvn.transform( new DivFNode(0,a,b) );
1850     d = precision_rounding(c);
1851     push( d );
1852     break;
1853 
1854   case Bytecodes::_frem:
1855     if (Matcher::has_match_rule(Op_ModF)) {
1856       // Generate a ModF node.
1857       b = pop();
1858       a = pop();
1859       c = _gvn.transform( new ModFNode(0,a,b) );
1860       d = precision_rounding(c);
1861       push( d );
1862     }
1863     else {
1864       // Generate a call.
1865       modf();
1866     }
1867     break;
1868 
1869   case Bytecodes::_fcmpl:
1870     b = pop();
1871     a = pop();
1872     c = _gvn.transform( new CmpF3Node( a, b));
1873     push(c);
1874     break;
1875   case Bytecodes::_fcmpg:
1876     b = pop();
1877     a = pop();
1878 
1879     // Same as fcmpl but need to flip the unordered case.  Swap the inputs,
1880     // which negates the result sign except for unordered.  Flip the unordered
1881     // as well by using CmpF3 which implements unordered-lesser instead of
1882     // unordered-greater semantics.  Finally, commute the result bits.  Result
1883     // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1884     c = _gvn.transform( new CmpF3Node( b, a));
1885     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
1886     push(c);
1887     break;
1888 
1889   case Bytecodes::_f2i:
1890     a = pop();
1891     push(_gvn.transform(new ConvF2INode(a)));
1892     break;
1893 
1894   case Bytecodes::_d2i:
1895     a = pop_pair();
1896     b = _gvn.transform(new ConvD2INode(a));
1897     push( b );
1898     break;
1899 
1900   case Bytecodes::_f2d:
1901     a = pop();
1902     b = _gvn.transform( new ConvF2DNode(a));
1903     push_pair( b );
1904     break;
1905 
1906   case Bytecodes::_d2f:
1907     a = pop_pair();
1908     b = _gvn.transform( new ConvD2FNode(a));
1909     // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1910     //b = _gvn.transform(new RoundFloatNode(0, b) );
1911     push( b );
1912     break;
1913 
1914   case Bytecodes::_l2f:
1915     if (Matcher::convL2FSupported()) {
1916       a = pop_pair();
1917       b = _gvn.transform( new ConvL2FNode(a));
1918       // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1919       // Rather than storing the result into an FP register then pushing
1920       // out to memory to round, the machine instruction that implements
1921       // ConvL2D is responsible for rounding.
1922       // c = precision_rounding(b);
1923       c = _gvn.transform(b);
1924       push(c);
1925     } else {
1926       l2f();
1927     }
1928     break;
1929 
1930   case Bytecodes::_l2d:
1931     a = pop_pair();
1932     b = _gvn.transform( new ConvL2DNode(a));
1933     // For i486.ad, rounding is always necessary (see _l2f above).
1934     // c = dprecision_rounding(b);
1935     c = _gvn.transform(b);
1936     push_pair(c);
1937     break;
1938 
1939   case Bytecodes::_f2l:
1940     a = pop();
1941     b = _gvn.transform( new ConvF2LNode(a));
1942     push_pair(b);
1943     break;
1944 
1945   case Bytecodes::_d2l:
1946     a = pop_pair();
1947     b = _gvn.transform( new ConvD2LNode(a));
1948     push_pair(b);
1949     break;
1950 
1951   case Bytecodes::_dsub:
1952     b = pop_pair();
1953     a = pop_pair();
1954     c = _gvn.transform( new SubDNode(a,b) );
1955     d = dprecision_rounding(c);
1956     push_pair( d );
1957     break;
1958 
1959   case Bytecodes::_dadd:
1960     b = pop_pair();
1961     a = pop_pair();
1962     c = _gvn.transform( new AddDNode(a,b) );
1963     d = dprecision_rounding(c);
1964     push_pair( d );
1965     break;
1966 
1967   case Bytecodes::_dmul:
1968     b = pop_pair();
1969     a = pop_pair();
1970     c = _gvn.transform( new MulDNode(a,b) );
1971     d = dprecision_rounding(c);
1972     push_pair( d );
1973     break;
1974 
1975   case Bytecodes::_ddiv:
1976     b = pop_pair();
1977     a = pop_pair();
1978     c = _gvn.transform( new DivDNode(0,a,b) );
1979     d = dprecision_rounding(c);
1980     push_pair( d );
1981     break;
1982 
1983   case Bytecodes::_dneg:
1984     a = pop_pair();
1985     b = _gvn.transform(new NegDNode (a));
1986     push_pair(b);
1987     break;
1988 
1989   case Bytecodes::_drem:
1990     if (Matcher::has_match_rule(Op_ModD)) {
1991       // Generate a ModD node.
1992       b = pop_pair();
1993       a = pop_pair();
1994       // a % b
1995 
1996       c = _gvn.transform( new ModDNode(0,a,b) );
1997       d = dprecision_rounding(c);
1998       push_pair( d );
1999     }
2000     else {
2001       // Generate a call.
2002       modd();
2003     }
2004     break;
2005 
2006   case Bytecodes::_dcmpl:
2007     b = pop_pair();
2008     a = pop_pair();
2009     c = _gvn.transform( new CmpD3Node( a, b));
2010     push(c);
2011     break;
2012 
2013   case Bytecodes::_dcmpg:
2014     b = pop_pair();
2015     a = pop_pair();
2016     // Same as dcmpl but need to flip the unordered case.
2017     // Commute the inputs, which negates the result sign except for unordered.
2018     // Flip the unordered as well by using CmpD3 which implements
2019     // unordered-lesser instead of unordered-greater semantics.
2020     // Finally, negate the result bits.  Result is same as using a
2021     // CmpD3Greater except we did it with CmpD3 alone.
2022     c = _gvn.transform( new CmpD3Node( b, a));
2023     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
2024     push(c);
2025     break;
2026 
2027 
2028     // Note for longs -> lo word is on TOS, hi word is on TOS - 1
2029   case Bytecodes::_land:
2030     b = pop_pair();
2031     a = pop_pair();
2032     c = _gvn.transform( new AndLNode(a,b) );
2033     push_pair(c);
2034     break;
2035   case Bytecodes::_lor:
2036     b = pop_pair();
2037     a = pop_pair();
2038     c = _gvn.transform( new OrLNode(a,b) );
2039     push_pair(c);
2040     break;
2041   case Bytecodes::_lxor:
2042     b = pop_pair();
2043     a = pop_pair();
2044     c = _gvn.transform( new XorLNode(a,b) );
2045     push_pair(c);
2046     break;
2047 
2048   case Bytecodes::_lshl:
2049     b = pop();                  // the shift count
2050     a = pop_pair();             // value to be shifted
2051     c = _gvn.transform( new LShiftLNode(a,b) );
2052     push_pair(c);
2053     break;
2054   case Bytecodes::_lshr:
2055     b = pop();                  // the shift count
2056     a = pop_pair();             // value to be shifted
2057     c = _gvn.transform( new RShiftLNode(a,b) );
2058     push_pair(c);
2059     break;
2060   case Bytecodes::_lushr:
2061     b = pop();                  // the shift count
2062     a = pop_pair();             // value to be shifted
2063     c = _gvn.transform( new URShiftLNode(a,b) );
2064     push_pair(c);
2065     break;
2066   case Bytecodes::_lmul:
2067     b = pop_pair();
2068     a = pop_pair();
2069     c = _gvn.transform( new MulLNode(a,b) );
2070     push_pair(c);
2071     break;
2072 
2073   case Bytecodes::_lrem:
2074     // Must keep both values on the expression-stack during null-check
2075     assert(peek(0) == top(), "long word order");
2076     zero_check_long(peek(1));
2077     // Compile-time detect of null-exception?
2078     if (stopped())  return;
2079     b = pop_pair();
2080     a = pop_pair();
2081     c = _gvn.transform( new ModLNode(control(),a,b) );
2082     push_pair(c);
2083     break;
2084 
2085   case Bytecodes::_ldiv:
2086     // Must keep both values on the expression-stack during null-check
2087     assert(peek(0) == top(), "long word order");
2088     zero_check_long(peek(1));
2089     // Compile-time detect of null-exception?
2090     if (stopped())  return;
2091     b = pop_pair();
2092     a = pop_pair();
2093     c = _gvn.transform( new DivLNode(control(),a,b) );
2094     push_pair(c);
2095     break;
2096 
2097   case Bytecodes::_ladd:
2098     b = pop_pair();
2099     a = pop_pair();
2100     c = _gvn.transform( new AddLNode(a,b) );
2101     push_pair(c);
2102     break;
2103   case Bytecodes::_lsub:
2104     b = pop_pair();
2105     a = pop_pair();
2106     c = _gvn.transform( new SubLNode(a,b) );
2107     push_pair(c);
2108     break;
2109   case Bytecodes::_lcmp:
2110     // Safepoints are now inserted _before_ branches.  The long-compare
2111     // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
2112     // slew of control flow.  These are usually followed by a CmpI vs zero and
2113     // a branch; this pattern then optimizes to the obvious long-compare and
2114     // branch.  However, if the branch is backwards there's a Safepoint
2115     // inserted.  The inserted Safepoint captures the JVM state at the
2116     // pre-branch point, i.e. it captures the 3-way value.  Thus if a
2117     // long-compare is used to control a loop the debug info will force
2118     // computation of the 3-way value, even though the generated code uses a
2119     // long-compare and branch.  We try to rectify the situation by inserting
2120     // a SafePoint here and have it dominate and kill the safepoint added at a
2121     // following backwards branch.  At this point the JVM state merely holds 2
2122     // longs but not the 3-way value.
2123     if( UseLoopSafepoints ) {
2124       switch( iter().next_bc() ) {
2125       case Bytecodes::_ifgt:
2126       case Bytecodes::_iflt:
2127       case Bytecodes::_ifge:
2128       case Bytecodes::_ifle:
2129       case Bytecodes::_ifne:
2130       case Bytecodes::_ifeq:
2131         // If this is a backwards branch in the bytecodes, add Safepoint
2132         maybe_add_safepoint(iter().next_get_dest());
2133       }
2134     }
2135     b = pop_pair();
2136     a = pop_pair();
2137     c = _gvn.transform( new CmpL3Node( a, b ));
2138     push(c);
2139     break;
2140 
2141   case Bytecodes::_lneg:
2142     a = pop_pair();
2143     b = _gvn.transform( new SubLNode(longcon(0),a));
2144     push_pair(b);
2145     break;
2146   case Bytecodes::_l2i:
2147     a = pop_pair();
2148     push( _gvn.transform( new ConvL2INode(a)));
2149     break;
2150   case Bytecodes::_i2l:
2151     a = pop();
2152     b = _gvn.transform( new ConvI2LNode(a));
2153     push_pair(b);
2154     break;
2155   case Bytecodes::_i2b:
2156     // Sign extend
2157     a = pop();
2158     a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) );
2159     a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) );
2160     push( a );
2161     break;
2162   case Bytecodes::_i2s:
2163     a = pop();
2164     a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) );
2165     a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) );
2166     push( a );
2167     break;
2168   case Bytecodes::_i2c:
2169     a = pop();
2170     push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) );
2171     break;
2172 
2173   case Bytecodes::_i2f:
2174     a = pop();
2175     b = _gvn.transform( new ConvI2FNode(a) ) ;
2176     c = precision_rounding(b);
2177     push (b);
2178     break;
2179 
2180   case Bytecodes::_i2d:
2181     a = pop();
2182     b = _gvn.transform( new ConvI2DNode(a));
2183     push_pair(b);
2184     break;
2185 
2186   case Bytecodes::_iinc:        // Increment local
2187     i = iter().get_index();     // Get local index
2188     set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2189     break;
2190 
2191   // Exit points of synchronized methods must have an unlock node
2192   case Bytecodes::_return:
2193     return_current(NULL);
2194     break;
2195 
2196   case Bytecodes::_ireturn:
2197   case Bytecodes::_areturn:
2198   case Bytecodes::_freturn:
2199     return_current(pop());
2200     break;
2201   case Bytecodes::_lreturn:
2202     return_current(pop_pair());
2203     break;
2204   case Bytecodes::_dreturn:
2205     return_current(pop_pair());
2206     break;
2207 
2208   case Bytecodes::_athrow:
2209     // null exception oop throws NULL pointer exception
2210     null_check(peek());
2211     if (stopped())  return;
2212     // Hook the thrown exception directly to subsequent handlers.
2213     if (BailoutToInterpreterForThrows) {
2214       // Keep method interpreted from now on.
2215       uncommon_trap(Deoptimization::Reason_unhandled,
2216                     Deoptimization::Action_make_not_compilable);
2217       return;
2218     }
2219     if (env()->jvmti_can_post_on_exceptions()) {
2220       // check if we must post exception events, take uncommon trap if so (with must_throw = false)
2221       uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
2222     }
2223     // Here if either can_post_on_exceptions or should_post_on_exceptions is false
2224     add_exception_state(make_exception_state(peek()));
2225     break;
2226 
2227   case Bytecodes::_goto:   // fall through
2228   case Bytecodes::_goto_w: {
2229     int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2230 
2231     // If this is a backwards branch in the bytecodes, add Safepoint
2232     maybe_add_safepoint(target_bci);
2233 
2234     // Update method data
2235     profile_taken_branch(target_bci);
2236 
2237     // Merge the current control into the target basic block
2238     merge(target_bci);
2239 
2240     // See if we can get some profile data and hand it off to the next block
2241     Block *target_block = block()->successor_for_bci(target_bci);
2242     if (target_block->pred_count() != 1)  break;
2243     ciMethodData* methodData = method()->method_data();
2244     if (!methodData->is_mature())  break;
2245     ciProfileData* data = methodData->bci_to_data(bci());
2246     assert( data->is_JumpData(), "" );
2247     int taken = ((ciJumpData*)data)->taken();
2248     taken = method()->scale_count(taken);
2249     target_block->set_count(taken);
2250     break;
2251   }
2252 
2253   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
2254   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2255   handle_if_null:
2256     // If this is a backwards branch in the bytecodes, add Safepoint
2257     maybe_add_safepoint(iter().get_dest());
2258     a = null();
2259     b = pop();
2260     if (!_gvn.type(b)->speculative_maybe_null() &&
2261         !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2262       inc_sp(1);
2263       Node* null_ctl = top();
2264       b = null_check_oop(b, &null_ctl, true, true, true);
2265       assert(null_ctl->is_top(), "no null control here");
2266       dec_sp(1);
2267     }
2268     c = _gvn.transform( new CmpPNode(b, a) );
2269     do_ifnull(btest, c);
2270     break;
2271 
2272   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2273   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2274   handle_if_acmp:
2275     // If this is a backwards branch in the bytecodes, add Safepoint
2276     maybe_add_safepoint(iter().get_dest());
2277     a = pop();
2278     b = pop();
2279     c = _gvn.transform( new CmpPNode(b, a) );
2280     c = optimize_cmp_with_klass(c);
2281     do_if(btest, c);
2282     break;
2283 
2284   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2285   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2286   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2287   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2288   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2289   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2290   handle_ifxx:
2291     // If this is a backwards branch in the bytecodes, add Safepoint
2292     maybe_add_safepoint(iter().get_dest());
2293     a = _gvn.intcon(0);
2294     b = pop();
2295     c = _gvn.transform( new CmpINode(b, a) );
2296     do_if(btest, c);
2297     break;
2298 
2299   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2300   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2301   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2302   case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2303   case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2304   case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2305   handle_if_icmp:
2306     // If this is a backwards branch in the bytecodes, add Safepoint
2307     maybe_add_safepoint(iter().get_dest());
2308     a = pop();
2309     b = pop();
2310     c = _gvn.transform( new CmpINode( b, a ) );
2311     do_if(btest, c);
2312     break;
2313 
2314   case Bytecodes::_tableswitch:
2315     do_tableswitch();
2316     break;
2317 
2318   case Bytecodes::_lookupswitch:
2319     do_lookupswitch();
2320     break;
2321 
2322   case Bytecodes::_invokestatic:
2323   case Bytecodes::_invokedynamic:
2324   case Bytecodes::_invokespecial:
2325   case Bytecodes::_invokevirtual:
2326   case Bytecodes::_invokeinterface:
2327     do_call();
2328     break;
2329   case Bytecodes::_checkcast:
2330     do_checkcast();
2331     break;
2332   case Bytecodes::_instanceof:
2333     do_instanceof();
2334     break;
2335   case Bytecodes::_anewarray:
2336     do_anewarray();
2337     break;
2338   case Bytecodes::_newarray:
2339     do_newarray((BasicType)iter().get_index());
2340     break;
2341   case Bytecodes::_multianewarray:
2342     do_multianewarray();
2343     break;
2344   case Bytecodes::_new:
2345     do_new();
2346     break;
2347 
2348   case Bytecodes::_jsr:
2349   case Bytecodes::_jsr_w:
2350     do_jsr();
2351     break;
2352 
2353   case Bytecodes::_ret:
2354     do_ret();
2355     break;
2356 
2357 
2358   case Bytecodes::_monitorenter:
2359     do_monitor_enter();
2360     break;
2361 
2362   case Bytecodes::_monitorexit:
2363     do_monitor_exit();
2364     break;
2365 
2366   case Bytecodes::_breakpoint:
2367     // Breakpoint set concurrently to compile
2368     // %%% use an uncommon trap?
2369     C->record_failure("breakpoint in method");
2370     return;
2371 
2372   default:
2373 #ifndef PRODUCT
2374     map()->dump(99);
2375 #endif
2376     tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2377     ShouldNotReachHere();
2378   }
2379 
2380 #ifndef PRODUCT
2381   IdealGraphPrinter *printer = C->printer();
2382   if (printer && printer->should_print(1)) {
2383     char buffer[256];
2384     sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2385     bool old = printer->traverse_outs();
2386     printer->set_traverse_outs(true);
2387     printer->print_method(buffer, 4);
2388     printer->set_traverse_outs(old);
2389   }
2390 #endif
2391 }