1 /*
   2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_graphKit.cpp.incl"
  27 
  28 //----------------------------GraphKit-----------------------------------------
  29 // Main utility constructor.
  30 GraphKit::GraphKit(JVMState* jvms)
  31   : Phase(Phase::Parser),
  32     _env(C->env()),
  33     _gvn(*C->initial_gvn())
  34 {
  35   _exceptions = jvms->map()->next_exception();
  36   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  37   set_jvms(jvms);
  38 }
  39 
  40 // Private constructor for parser.
  41 GraphKit::GraphKit()
  42   : Phase(Phase::Parser),
  43     _env(C->env()),
  44     _gvn(*C->initial_gvn())
  45 {
  46   _exceptions = NULL;
  47   set_map(NULL);
  48   debug_only(_sp = -99);
  49   debug_only(set_bci(-99));
  50 }
  51 
  52 
  53 
  54 //---------------------------clean_stack---------------------------------------
  55 // Clear away rubbish from the stack area of the JVM state.
  56 // This destroys any arguments that may be waiting on the stack.
  57 void GraphKit::clean_stack(int from_sp) {
  58   SafePointNode* map      = this->map();
  59   JVMState*      jvms     = this->jvms();
  60   int            stk_size = jvms->stk_size();
  61   int            stkoff   = jvms->stkoff();
  62   Node*          top      = this->top();
  63   for (int i = from_sp; i < stk_size; i++) {
  64     if (map->in(stkoff + i) != top) {
  65       map->set_req(stkoff + i, top);
  66     }
  67   }
  68 }
  69 
  70 
  71 //--------------------------------sync_jvms-----------------------------------
  72 // Make sure our current jvms agrees with our parse state.
  73 JVMState* GraphKit::sync_jvms() const {
  74   JVMState* jvms = this->jvms();
  75   jvms->set_bci(bci());       // Record the new bci in the JVMState
  76   jvms->set_sp(sp());         // Record the new sp in the JVMState
  77   assert(jvms_in_sync(), "jvms is now in sync");
  78   return jvms;
  79 }
  80 
  81 #ifdef ASSERT
  82 bool GraphKit::jvms_in_sync() const {
  83   Parse* parse = is_Parse();
  84   if (parse == NULL) {
  85     if (bci() !=      jvms()->bci())          return false;
  86     if (sp()  != (int)jvms()->sp())           return false;
  87     return true;
  88   }
  89   if (jvms()->method() != parse->method())    return false;
  90   if (jvms()->bci()    != parse->bci())       return false;
  91   int jvms_sp = jvms()->sp();
  92   if (jvms_sp          != parse->sp())        return false;
  93   int jvms_depth = jvms()->depth();
  94   if (jvms_depth       != parse->depth())     return false;
  95   return true;
  96 }
  97 
  98 // Local helper checks for special internal merge points
  99 // used to accumulate and merge exception states.
 100 // They are marked by the region's in(0) edge being the map itself.
 101 // Such merge points must never "escape" into the parser at large,
 102 // until they have been handed to gvn.transform.
 103 static bool is_hidden_merge(Node* reg) {
 104   if (reg == NULL)  return false;
 105   if (reg->is_Phi()) {
 106     reg = reg->in(0);
 107     if (reg == NULL)  return false;
 108   }
 109   return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root();
 110 }
 111 
 112 void GraphKit::verify_map() const {
 113   if (map() == NULL)  return;  // null map is OK
 114   assert(map()->req() <= jvms()->endoff(), "no extra garbage on map");
 115   assert(!map()->has_exceptions(),    "call add_exception_states_from 1st");
 116   assert(!is_hidden_merge(control()), "call use_exception_state, not set_map");
 117 }
 118 
 119 void GraphKit::verify_exception_state(SafePointNode* ex_map) {
 120   assert(ex_map->next_exception() == NULL, "not already part of a chain");
 121   assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop");
 122 }
 123 #endif
 124 
 125 //---------------------------stop_and_kill_map---------------------------------
 126 // Set _map to NULL, signalling a stop to further bytecode execution.
 127 // First smash the current map's control to a constant, to mark it dead.
 128 void GraphKit::stop_and_kill_map() {
 129   SafePointNode* dead_map = stop();
 130   if (dead_map != NULL) {
 131     dead_map->disconnect_inputs(NULL); // Mark the map as killed.
 132     assert(dead_map->is_killed(), "must be so marked");
 133   }
 134 }
 135 
 136 
 137 //--------------------------------stopped--------------------------------------
 138 // Tell if _map is NULL, or control is top.
 139 bool GraphKit::stopped() {
 140   if (map() == NULL)           return true;
 141   else if (control() == top()) return true;
 142   else                         return false;
 143 }
 144 
 145 
 146 //-----------------------------has_ex_handler----------------------------------
 147 // Tell if this method or any caller method has exception handlers.
 148 bool GraphKit::has_ex_handler() {
 149   for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) {
 150     if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) {
 151       return true;
 152     }
 153   }
 154   return false;
 155 }
 156 
 157 //------------------------------save_ex_oop------------------------------------
 158 // Save an exception without blowing stack contents or other JVM state.
 159 void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) {
 160   assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again");
 161   ex_map->add_req(ex_oop);
 162   debug_only(verify_exception_state(ex_map));
 163 }
 164 
 165 inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) {
 166   assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there");
 167   Node* ex_oop = ex_map->in(ex_map->req()-1);
 168   if (clear_it)  ex_map->del_req(ex_map->req()-1);
 169   return ex_oop;
 170 }
 171 
 172 //-----------------------------saved_ex_oop------------------------------------
 173 // Recover a saved exception from its map.
 174 Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) {
 175   return common_saved_ex_oop(ex_map, false);
 176 }
 177 
 178 //--------------------------clear_saved_ex_oop---------------------------------
 179 // Erase a previously saved exception from its map.
 180 Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) {
 181   return common_saved_ex_oop(ex_map, true);
 182 }
 183 
 184 #ifdef ASSERT
 185 //---------------------------has_saved_ex_oop----------------------------------
 186 // Erase a previously saved exception from its map.
 187 bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) {
 188   return ex_map->req() == ex_map->jvms()->endoff()+1;
 189 }
 190 #endif
 191 
 192 //-------------------------make_exception_state--------------------------------
 193 // Turn the current JVM state into an exception state, appending the ex_oop.
 194 SafePointNode* GraphKit::make_exception_state(Node* ex_oop) {
 195   sync_jvms();
 196   SafePointNode* ex_map = stop();  // do not manipulate this map any more
 197   set_saved_ex_oop(ex_map, ex_oop);
 198   return ex_map;
 199 }
 200 
 201 
 202 //--------------------------add_exception_state--------------------------------
 203 // Add an exception to my list of exceptions.
 204 void GraphKit::add_exception_state(SafePointNode* ex_map) {
 205   if (ex_map == NULL || ex_map->control() == top()) {
 206     return;
 207   }
 208 #ifdef ASSERT
 209   verify_exception_state(ex_map);
 210   if (has_exceptions()) {
 211     assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place");
 212   }
 213 #endif
 214 
 215   // If there is already an exception of exactly this type, merge with it.
 216   // In particular, null-checks and other low-level exceptions common up here.
 217   Node*       ex_oop  = saved_ex_oop(ex_map);
 218   const Type* ex_type = _gvn.type(ex_oop);
 219   if (ex_oop == top()) {
 220     // No action needed.
 221     return;
 222   }
 223   assert(ex_type->isa_instptr(), "exception must be an instance");
 224   for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) {
 225     const Type* ex_type2 = _gvn.type(saved_ex_oop(e2));
 226     // We check sp also because call bytecodes can generate exceptions
 227     // both before and after arguments are popped!
 228     if (ex_type2 == ex_type
 229         && e2->_jvms->sp() == ex_map->_jvms->sp()) {
 230       combine_exception_states(ex_map, e2);
 231       return;
 232     }
 233   }
 234 
 235   // No pre-existing exception of the same type.  Chain it on the list.
 236   push_exception_state(ex_map);
 237 }
 238 
 239 //-----------------------add_exception_states_from-----------------------------
 240 void GraphKit::add_exception_states_from(JVMState* jvms) {
 241   SafePointNode* ex_map = jvms->map()->next_exception();
 242   if (ex_map != NULL) {
 243     jvms->map()->set_next_exception(NULL);
 244     for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) {
 245       next_map = ex_map->next_exception();
 246       ex_map->set_next_exception(NULL);
 247       add_exception_state(ex_map);
 248     }
 249   }
 250 }
 251 
 252 //-----------------------transfer_exceptions_into_jvms-------------------------
 253 JVMState* GraphKit::transfer_exceptions_into_jvms() {
 254   if (map() == NULL) {
 255     // We need a JVMS to carry the exceptions, but the map has gone away.
 256     // Create a scratch JVMS, cloned from any of the exception states...
 257     if (has_exceptions()) {
 258       _map = _exceptions;
 259       _map = clone_map();
 260       _map->set_next_exception(NULL);
 261       clear_saved_ex_oop(_map);
 262       debug_only(verify_map());
 263     } else {
 264       // ...or created from scratch
 265       JVMState* jvms = new (C) JVMState(_method, NULL);
 266       jvms->set_bci(_bci);
 267       jvms->set_sp(_sp);
 268       jvms->set_map(new (C, TypeFunc::Parms) SafePointNode(TypeFunc::Parms, jvms));
 269       set_jvms(jvms);
 270       for (uint i = 0; i < map()->req(); i++)  map()->init_req(i, top());
 271       set_all_memory(top());
 272       while (map()->req() < jvms->endoff())  map()->add_req(top());
 273     }
 274     // (This is a kludge, in case you didn't notice.)
 275     set_control(top());
 276   }
 277   JVMState* jvms = sync_jvms();
 278   assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet");
 279   jvms->map()->set_next_exception(_exceptions);
 280   _exceptions = NULL;   // done with this set of exceptions
 281   return jvms;
 282 }
 283 
 284 static inline void add_n_reqs(Node* dstphi, Node* srcphi) {
 285   assert(is_hidden_merge(dstphi), "must be a special merge node");
 286   assert(is_hidden_merge(srcphi), "must be a special merge node");
 287   uint limit = srcphi->req();
 288   for (uint i = PhiNode::Input; i < limit; i++) {
 289     dstphi->add_req(srcphi->in(i));
 290   }
 291 }
 292 static inline void add_one_req(Node* dstphi, Node* src) {
 293   assert(is_hidden_merge(dstphi), "must be a special merge node");
 294   assert(!is_hidden_merge(src), "must not be a special merge node");
 295   dstphi->add_req(src);
 296 }
 297 
 298 //-----------------------combine_exception_states------------------------------
 299 // This helper function combines exception states by building phis on a
 300 // specially marked state-merging region.  These regions and phis are
 301 // untransformed, and can build up gradually.  The region is marked by
 302 // having a control input of its exception map, rather than NULL.  Such
 303 // regions do not appear except in this function, and in use_exception_state.
 304 void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) {
 305   if (failing())  return;  // dying anyway...
 306   JVMState* ex_jvms = ex_map->_jvms;
 307   assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");
 308   assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
 309   assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
 310   assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
 311   assert(ex_map->req() == phi_map->req(), "matching maps");
 312   uint tos = ex_jvms->stkoff() + ex_jvms->sp();
 313   Node*         hidden_merge_mark = root();
 314   Node*         region  = phi_map->control();
 315   MergeMemNode* phi_mem = phi_map->merged_memory();
 316   MergeMemNode* ex_mem  = ex_map->merged_memory();
 317   if (region->in(0) != hidden_merge_mark) {
 318     // The control input is not (yet) a specially-marked region in phi_map.
 319     // Make it so, and build some phis.
 320     region = new (C, 2) RegionNode(2);
 321     _gvn.set_type(region, Type::CONTROL);
 322     region->set_req(0, hidden_merge_mark);  // marks an internal ex-state
 323     region->init_req(1, phi_map->control());
 324     phi_map->set_control(region);
 325     Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);
 326     record_for_igvn(io_phi);
 327     _gvn.set_type(io_phi, Type::ABIO);
 328     phi_map->set_i_o(io_phi);
 329     for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) {
 330       Node* m = mms.memory();
 331       Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C));
 332       record_for_igvn(m_phi);
 333       _gvn.set_type(m_phi, Type::MEMORY);
 334       mms.set_memory(m_phi);
 335     }
 336   }
 337 
 338   // Either or both of phi_map and ex_map might already be converted into phis.
 339   Node* ex_control = ex_map->control();
 340   // if there is special marking on ex_map also, we add multiple edges from src
 341   bool add_multiple = (ex_control->in(0) == hidden_merge_mark);
 342   // how wide was the destination phi_map, originally?
 343   uint orig_width = region->req();
 344 
 345   if (add_multiple) {
 346     add_n_reqs(region, ex_control);
 347     add_n_reqs(phi_map->i_o(), ex_map->i_o());
 348   } else {
 349     // ex_map has no merges, so we just add single edges everywhere
 350     add_one_req(region, ex_control);
 351     add_one_req(phi_map->i_o(), ex_map->i_o());
 352   }
 353   for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) {
 354     if (mms.is_empty()) {
 355       // get a copy of the base memory, and patch some inputs into it
 356       const TypePtr* adr_type = mms.adr_type(C);
 357       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
 358       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
 359       mms.set_memory(phi);
 360       // Prepare to append interesting stuff onto the newly sliced phi:
 361       while (phi->req() > orig_width)  phi->del_req(phi->req()-1);
 362     }
 363     // Append stuff from ex_map:
 364     if (add_multiple) {
 365       add_n_reqs(mms.memory(), mms.memory2());
 366     } else {
 367       add_one_req(mms.memory(), mms.memory2());
 368     }
 369   }
 370   uint limit = ex_map->req();
 371   for (uint i = TypeFunc::Parms; i < limit; i++) {
 372     // Skip everything in the JVMS after tos.  (The ex_oop follows.)
 373     if (i == tos)  i = ex_jvms->monoff();
 374     Node* src = ex_map->in(i);
 375     Node* dst = phi_map->in(i);
 376     if (src != dst) {
 377       PhiNode* phi;
 378       if (dst->in(0) != region) {
 379         dst = phi = PhiNode::make(region, dst, _gvn.type(dst));
 380         record_for_igvn(phi);
 381         _gvn.set_type(phi, phi->type());
 382         phi_map->set_req(i, dst);
 383         // Prepare to append interesting stuff onto the new phi:
 384         while (dst->req() > orig_width)  dst->del_req(dst->req()-1);
 385       } else {
 386         assert(dst->is_Phi(), "nobody else uses a hidden region");
 387         phi = (PhiNode*)dst;
 388       }
 389       if (add_multiple && src->in(0) == ex_control) {
 390         // Both are phis.
 391         add_n_reqs(dst, src);
 392       } else {
 393         while (dst->req() < region->req())  add_one_req(dst, src);
 394       }
 395       const Type* srctype = _gvn.type(src);
 396       if (phi->type() != srctype) {
 397         const Type* dsttype = phi->type()->meet(srctype);
 398         if (phi->type() != dsttype) {
 399           phi->set_type(dsttype);
 400           _gvn.set_type(phi, dsttype);
 401         }
 402       }
 403     }
 404   }
 405 }
 406 
 407 //--------------------------use_exception_state--------------------------------
 408 Node* GraphKit::use_exception_state(SafePointNode* phi_map) {
 409   if (failing()) { stop(); return top(); }
 410   Node* region = phi_map->control();
 411   Node* hidden_merge_mark = root();
 412   assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation");
 413   Node* ex_oop = clear_saved_ex_oop(phi_map);
 414   if (region->in(0) == hidden_merge_mark) {
 415     // Special marking for internal ex-states.  Process the phis now.
 416     region->set_req(0, region);  // now it's an ordinary region
 417     set_jvms(phi_map->jvms());   // ...so now we can use it as a map
 418     // Note: Setting the jvms also sets the bci and sp.
 419     set_control(_gvn.transform(region));
 420     uint tos = jvms()->stkoff() + sp();
 421     for (uint i = 1; i < tos; i++) {
 422       Node* x = phi_map->in(i);
 423       if (x->in(0) == region) {
 424         assert(x->is_Phi(), "expected a special phi");
 425         phi_map->set_req(i, _gvn.transform(x));
 426       }
 427     }
 428     for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
 429       Node* x = mms.memory();
 430       if (x->in(0) == region) {
 431         assert(x->is_Phi(), "nobody else uses a hidden region");
 432         mms.set_memory(_gvn.transform(x));
 433       }
 434     }
 435     if (ex_oop->in(0) == region) {
 436       assert(ex_oop->is_Phi(), "expected a special phi");
 437       ex_oop = _gvn.transform(ex_oop);
 438     }
 439   } else {
 440     set_jvms(phi_map->jvms());
 441   }
 442 
 443   assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared");
 444   assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared");
 445   return ex_oop;
 446 }
 447 
 448 //---------------------------------java_bc-------------------------------------
 449 Bytecodes::Code GraphKit::java_bc() const {
 450   ciMethod* method = this->method();
 451   int       bci    = this->bci();
 452   if (method != NULL && bci != InvocationEntryBci)
 453     return method->java_code_at_bci(bci);
 454   else
 455     return Bytecodes::_illegal;
 456 }
 457 
 458 //------------------------------builtin_throw----------------------------------
 459 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
 460   bool must_throw = true;
 461 
 462   if (JvmtiExport::can_post_exceptions()) {
 463     // Do not try anything fancy if we're notifying the VM on every throw.
 464     // Cf. case Bytecodes::_athrow in parse2.cpp.
 465     uncommon_trap(reason, Deoptimization::Action_none,
 466                   (ciKlass*)NULL, (char*)NULL, must_throw);
 467     return;
 468   }
 469 
 470   // If this particular condition has not yet happened at this
 471   // bytecode, then use the uncommon trap mechanism, and allow for
 472   // a future recompilation if several traps occur here.
 473   // If the throw is hot, try to use a more complicated inline mechanism
 474   // which keeps execution inside the compiled code.
 475   bool treat_throw_as_hot = false;
 476   ciMethodData* md = method()->method_data();
 477 
 478   if (ProfileTraps) {
 479     if (too_many_traps(reason)) {
 480       treat_throw_as_hot = true;
 481     }
 482     // (If there is no MDO at all, assume it is early in
 483     // execution, and that any deopts are part of the
 484     // startup transient, and don't need to be remembered.)
 485 
 486     // Also, if there is a local exception handler, treat all throws
 487     // as hot if there has been at least one in this method.
 488     if (C->trap_count(reason) != 0
 489         && method()->method_data()->trap_count(reason) != 0
 490         && has_ex_handler()) {
 491         treat_throw_as_hot = true;
 492     }
 493   }
 494 
 495   // If this throw happens frequently, an uncommon trap might cause
 496   // a performance pothole.  If there is a local exception handler,
 497   // and if this particular bytecode appears to be deoptimizing often,
 498   // let us handle the throw inline, with a preconstructed instance.
 499   // Note:   If the deopt count has blown up, the uncommon trap
 500   // runtime is going to flush this nmethod, not matter what.
 501   if (treat_throw_as_hot
 502       && (!StackTraceInThrowable || OmitStackTraceInFastThrow)) {
 503     // If the throw is local, we use a pre-existing instance and
 504     // punt on the backtrace.  This would lead to a missing backtrace
 505     // (a repeat of 4292742) if the backtrace object is ever asked
 506     // for its backtrace.
 507     // Fixing this remaining case of 4292742 requires some flavor of
 508     // escape analysis.  Leave that for the future.
 509     ciInstance* ex_obj = NULL;
 510     switch (reason) {
 511     case Deoptimization::Reason_null_check:
 512       ex_obj = env()->NullPointerException_instance();
 513       break;
 514     case Deoptimization::Reason_div0_check:
 515       ex_obj = env()->ArithmeticException_instance();
 516       break;
 517     case Deoptimization::Reason_range_check:
 518       ex_obj = env()->ArrayIndexOutOfBoundsException_instance();
 519       break;
 520     case Deoptimization::Reason_class_check:
 521       if (java_bc() == Bytecodes::_aastore) {
 522         ex_obj = env()->ArrayStoreException_instance();
 523       } else {
 524         ex_obj = env()->ClassCastException_instance();
 525       }
 526       break;
 527     }
 528     if (failing()) { stop(); return; }  // exception allocation might fail
 529     if (ex_obj != NULL) {
 530       // Cheat with a preallocated exception object.
 531       if (C->log() != NULL)
 532         C->log()->elem("hot_throw preallocated='1' reason='%s'",
 533                        Deoptimization::trap_reason_name(reason));
 534       const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
 535       Node*              ex_node = _gvn.transform( ConNode::make(C, ex_con) );
 536 
 537       // Clear the detail message of the preallocated exception object.
 538       // Weblogic sometimes mutates the detail message of exceptions
 539       // using reflection.
 540       int offset = java_lang_Throwable::get_detailMessage_offset();
 541       const TypePtr* adr_typ = ex_con->add_offset(offset);
 542 
 543       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
 544       Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), ex_con, T_OBJECT);
 545 
 546       add_exception_state(make_exception_state(ex_node));
 547       return;
 548     }
 549   }
 550 
 551   // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
 552   // It won't be much cheaper than bailing to the interp., since we'll
 553   // have to pass up all the debug-info, and the runtime will have to
 554   // create the stack trace.
 555 
 556   // Usual case:  Bail to interpreter.
 557   // Reserve the right to recompile if we haven't seen anything yet.
 558 
 559   Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
 560   if (treat_throw_as_hot
 561       && (method()->method_data()->trap_recompiled_at(bci())
 562           || C->too_many_traps(reason))) {
 563     // We cannot afford to take more traps here.  Suffer in the interpreter.
 564     if (C->log() != NULL)
 565       C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'",
 566                      Deoptimization::trap_reason_name(reason),
 567                      C->trap_count(reason));
 568     action = Deoptimization::Action_none;
 569   }
 570 
 571   // "must_throw" prunes the JVM state to include only the stack, if there
 572   // are no local exception handlers.  This should cut down on register
 573   // allocation time and code size, by drastically reducing the number
 574   // of in-edges on the call to the uncommon trap.
 575 
 576   uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw);
 577 }
 578 
 579 
 580 //----------------------------PreserveJVMState---------------------------------
 581 PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
 582   debug_only(kit->verify_map());
 583   _kit    = kit;
 584   _map    = kit->map();   // preserve the map
 585   _sp     = kit->sp();
 586   kit->set_map(clone_map ? kit->clone_map() : NULL);
 587 #ifdef ASSERT
 588   _bci    = kit->bci();
 589   Parse* parser = kit->is_Parse();
 590   int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
 591   _block  = block;
 592 #endif
 593 }
 594 PreserveJVMState::~PreserveJVMState() {
 595   GraphKit* kit = _kit;
 596 #ifdef ASSERT
 597   assert(kit->bci() == _bci, "bci must not shift");
 598   Parse* parser = kit->is_Parse();
 599   int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
 600   assert(block == _block,    "block must not shift");
 601 #endif
 602   kit->set_map(_map);
 603   kit->set_sp(_sp);
 604 }
 605 
 606 
 607 //-----------------------------BuildCutout-------------------------------------
 608 BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt)
 609   : PreserveJVMState(kit)
 610 {
 611   assert(p->is_Con() || p->is_Bool(), "test must be a bool");
 612   SafePointNode* outer_map = _map;   // preserved map is caller's
 613   SafePointNode* inner_map = kit->map();
 614   IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt);
 615   outer_map->set_control(kit->gvn().transform( new (kit->C, 1) IfTrueNode(iff) ));
 616   inner_map->set_control(kit->gvn().transform( new (kit->C, 1) IfFalseNode(iff) ));
 617 }
 618 BuildCutout::~BuildCutout() {
 619   GraphKit* kit = _kit;
 620   assert(kit->stopped(), "cutout code must stop, throw, return, etc.");
 621 }
 622 
 623 
 624 //------------------------------clone_map--------------------------------------
 625 // Implementation of PreserveJVMState
 626 //
 627 // Only clone_map(...) here. If this function is only used in the
 628 // PreserveJVMState class we may want to get rid of this extra
 629 // function eventually and do it all there.
 630 
 631 SafePointNode* GraphKit::clone_map() {
 632   if (map() == NULL)  return NULL;
 633 
 634   // Clone the memory edge first
 635   Node* mem = MergeMemNode::make(C, map()->memory());
 636   gvn().set_type_bottom(mem);
 637 
 638   SafePointNode *clonemap = (SafePointNode*)map()->clone();
 639   JVMState* jvms = this->jvms();
 640   JVMState* clonejvms = jvms->clone_shallow(C);
 641   clonemap->set_memory(mem);
 642   clonemap->set_jvms(clonejvms);
 643   clonejvms->set_map(clonemap);
 644   record_for_igvn(clonemap);
 645   gvn().set_type_bottom(clonemap);
 646   return clonemap;
 647 }
 648 
 649 
 650 //-----------------------------set_map_clone-----------------------------------
 651 void GraphKit::set_map_clone(SafePointNode* m) {
 652   _map = m;
 653   _map = clone_map();
 654   _map->set_next_exception(NULL);
 655   debug_only(verify_map());
 656 }
 657 
 658 
 659 //----------------------------kill_dead_locals---------------------------------
 660 // Detect any locals which are known to be dead, and force them to top.
 661 void GraphKit::kill_dead_locals() {
 662   // Consult the liveness information for the locals.  If any
 663   // of them are unused, then they can be replaced by top().  This
 664   // should help register allocation time and cut down on the size
 665   // of the deoptimization information.
 666 
 667   // This call is made from many of the bytecode handling
 668   // subroutines called from the Big Switch in do_one_bytecode.
 669   // Every bytecode which might include a slow path is responsible
 670   // for killing its dead locals.  The more consistent we
 671   // are about killing deads, the fewer useless phis will be
 672   // constructed for them at various merge points.
 673 
 674   // bci can be -1 (InvocationEntryBci).  We return the entry
 675   // liveness for the method.
 676 
 677   if (method() == NULL || method()->code_size() == 0) {
 678     // We are building a graph for a call to a native method.
 679     // All locals are live.
 680     return;
 681   }
 682 
 683   ResourceMark rm;
 684 
 685   // Consult the liveness information for the locals.  If any
 686   // of them are unused, then they can be replaced by top().  This
 687   // should help register allocation time and cut down on the size
 688   // of the deoptimization information.
 689   MethodLivenessResult live_locals = method()->liveness_at_bci(bci());
 690 
 691   int len = (int)live_locals.size();
 692   assert(len <= jvms()->loc_size(), "too many live locals");
 693   for (int local = 0; local < len; local++) {
 694     if (!live_locals.at(local)) {
 695       set_local(local, top());
 696     }
 697   }
 698 }
 699 
 700 #ifdef ASSERT
 701 //-------------------------dead_locals_are_killed------------------------------
 702 // Return true if all dead locals are set to top in the map.
 703 // Used to assert "clean" debug info at various points.
 704 bool GraphKit::dead_locals_are_killed() {
 705   if (method() == NULL || method()->code_size() == 0) {
 706     // No locals need to be dead, so all is as it should be.
 707     return true;
 708   }
 709 
 710   // Make sure somebody called kill_dead_locals upstream.
 711   ResourceMark rm;
 712   for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
 713     if (jvms->loc_size() == 0)  continue;  // no locals to consult
 714     SafePointNode* map = jvms->map();
 715     ciMethod* method = jvms->method();
 716     int       bci    = jvms->bci();
 717     if (jvms == this->jvms()) {
 718       bci = this->bci();  // it might not yet be synched
 719     }
 720     MethodLivenessResult live_locals = method->liveness_at_bci(bci);
 721     int len = (int)live_locals.size();
 722     if (!live_locals.is_valid() || len == 0)
 723       // This method is trivial, or is poisoned by a breakpoint.
 724       return true;
 725     assert(len == jvms->loc_size(), "live map consistent with locals map");
 726     for (int local = 0; local < len; local++) {
 727       if (!live_locals.at(local) && map->local(jvms, local) != top()) {
 728         if (PrintMiscellaneous && (Verbose || WizardMode)) {
 729           tty->print_cr("Zombie local %d: ", local);
 730           jvms->dump();
 731         }
 732         return false;
 733       }
 734     }
 735   }
 736   return true;
 737 }
 738 
 739 #endif //ASSERT
 740 
 741 // Helper function for adding JVMState and debug information to node
 742 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
 743   // Add the safepoint edges to the call (or other safepoint).
 744 
 745   // Make sure dead locals are set to top.  This
 746   // should help register allocation time and cut down on the size
 747   // of the deoptimization information.
 748   assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
 749 
 750   // Walk the inline list to fill in the correct set of JVMState's
 751   // Also fill in the associated edges for each JVMState.
 752 
 753   JVMState* youngest_jvms = sync_jvms();
 754 
 755   // Do we need debug info here?  If it is a SafePoint and this method
 756   // cannot de-opt, then we do NOT need any debug info.
 757   bool full_info = (C->deopt_happens() || call->Opcode() != Op_SafePoint);
 758 
 759   // If we are guaranteed to throw, we can prune everything but the
 760   // input to the current bytecode.
 761   bool can_prune_locals = false;
 762   uint stack_slots_not_pruned = 0;
 763   int inputs = 0, depth = 0;
 764   if (must_throw) {
 765     assert(method() == youngest_jvms->method(), "sanity");
 766     if (compute_stack_effects(inputs, depth)) {
 767       can_prune_locals = true;
 768       stack_slots_not_pruned = inputs;
 769     }
 770   }
 771 
 772   if (JvmtiExport::can_examine_or_deopt_anywhere()) {
 773     // At any safepoint, this method can get breakpointed, which would
 774     // then require an immediate deoptimization.
 775     full_info = true;
 776     can_prune_locals = false;  // do not prune locals
 777     stack_slots_not_pruned = 0;
 778   }
 779 
 780   // do not scribble on the input jvms
 781   JVMState* out_jvms = youngest_jvms->clone_deep(C);
 782   call->set_jvms(out_jvms); // Start jvms list for call node
 783 
 784   // Presize the call:
 785   debug_only(uint non_debug_edges = call->req());
 786   call->add_req_batch(top(), youngest_jvms->debug_depth());
 787   assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
 788 
 789   // Set up edges so that the call looks like this:
 790   //  Call [state:] ctl io mem fptr retadr
 791   //       [parms:] parm0 ... parmN
 792   //       [root:]  loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
 793   //    [...mid:]   loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
 794   //       [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
 795   // Note that caller debug info precedes callee debug info.
 796 
 797   // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
 798   uint debug_ptr = call->req();
 799 
 800   // Loop over the map input edges associated with jvms, add them
 801   // to the call node, & reset all offsets to match call node array.
 802   for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) {
 803     uint debug_end   = debug_ptr;
 804     uint debug_start = debug_ptr - in_jvms->debug_size();
 805     debug_ptr = debug_start;  // back up the ptr
 806 
 807     uint p = debug_start;  // walks forward in [debug_start, debug_end)
 808     uint j, k, l;
 809     SafePointNode* in_map = in_jvms->map();
 810     out_jvms->set_map(call);
 811 
 812     if (can_prune_locals) {
 813       assert(in_jvms->method() == out_jvms->method(), "sanity");
 814       // If the current throw can reach an exception handler in this JVMS,
 815       // then we must keep everything live that can reach that handler.
 816       // As a quick and dirty approximation, we look for any handlers at all.
 817       if (in_jvms->method()->has_exception_handlers()) {
 818         can_prune_locals = false;
 819       }
 820     }
 821 
 822     // Add the Locals
 823     k = in_jvms->locoff();
 824     l = in_jvms->loc_size();
 825     out_jvms->set_locoff(p);
 826     if (full_info && !can_prune_locals) {
 827       for (j = 0; j < l; j++)
 828         call->set_req(p++, in_map->in(k+j));
 829     } else {
 830       p += l;  // already set to top above by add_req_batch
 831     }
 832 
 833     // Add the Expression Stack
 834     k = in_jvms->stkoff();
 835     l = in_jvms->sp();
 836     out_jvms->set_stkoff(p);
 837     if (full_info && !can_prune_locals) {
 838       for (j = 0; j < l; j++)
 839         call->set_req(p++, in_map->in(k+j));
 840     } else if (can_prune_locals && stack_slots_not_pruned != 0) {
 841       // Divide stack into {S0,...,S1}, where S0 is set to top.
 842       uint s1 = stack_slots_not_pruned;
 843       stack_slots_not_pruned = 0;  // for next iteration
 844       if (s1 > l)  s1 = l;
 845       uint s0 = l - s1;
 846       p += s0;  // skip the tops preinstalled by add_req_batch
 847       for (j = s0; j < l; j++)
 848         call->set_req(p++, in_map->in(k+j));
 849     } else {
 850       p += l;  // already set to top above by add_req_batch
 851     }
 852 
 853     // Add the Monitors
 854     k = in_jvms->monoff();
 855     l = in_jvms->mon_size();
 856     out_jvms->set_monoff(p);
 857     for (j = 0; j < l; j++)
 858       call->set_req(p++, in_map->in(k+j));
 859 
 860     // Copy any scalar object fields.
 861     k = in_jvms->scloff();
 862     l = in_jvms->scl_size();
 863     out_jvms->set_scloff(p);
 864     for (j = 0; j < l; j++)
 865       call->set_req(p++, in_map->in(k+j));
 866 
 867     // Finish the new jvms.
 868     out_jvms->set_endoff(p);
 869 
 870     assert(out_jvms->endoff()     == debug_end,             "fill ptr must match");
 871     assert(out_jvms->depth()      == in_jvms->depth(),      "depth must match");
 872     assert(out_jvms->loc_size()   == in_jvms->loc_size(),   "size must match");
 873     assert(out_jvms->mon_size()   == in_jvms->mon_size(),   "size must match");
 874     assert(out_jvms->scl_size()   == in_jvms->scl_size(),   "size must match");
 875     assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
 876 
 877     // Update the two tail pointers in parallel.
 878     out_jvms = out_jvms->caller();
 879     in_jvms  = in_jvms->caller();
 880   }
 881 
 882   assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
 883 
 884   // Test the correctness of JVMState::debug_xxx accessors:
 885   assert(call->jvms()->debug_start() == non_debug_edges, "");
 886   assert(call->jvms()->debug_end()   == call->req(), "");
 887   assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
 888 }
 889 
 890 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
 891   Bytecodes::Code code = java_bc();
 892   if (code == Bytecodes::_wide) {
 893     code = method()->java_code_at_bci(bci() + 1);
 894   }
 895 
 896   BasicType rtype = T_ILLEGAL;
 897   int       rsize = 0;
 898 
 899   if (code != Bytecodes::_illegal) {
 900     depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
 901     rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
 902     if (rtype < T_CONFLICT)
 903       rsize = type2size[rtype];
 904   }
 905 
 906   switch (code) {
 907   case Bytecodes::_illegal:
 908     return false;
 909 
 910   case Bytecodes::_ldc:
 911   case Bytecodes::_ldc_w:
 912   case Bytecodes::_ldc2_w:
 913     inputs = 0;
 914     break;
 915 
 916   case Bytecodes::_dup:         inputs = 1;  break;
 917   case Bytecodes::_dup_x1:      inputs = 2;  break;
 918   case Bytecodes::_dup_x2:      inputs = 3;  break;
 919   case Bytecodes::_dup2:        inputs = 2;  break;
 920   case Bytecodes::_dup2_x1:     inputs = 3;  break;
 921   case Bytecodes::_dup2_x2:     inputs = 4;  break;
 922   case Bytecodes::_swap:        inputs = 2;  break;
 923   case Bytecodes::_arraylength: inputs = 1;  break;
 924 
 925   case Bytecodes::_getstatic:
 926   case Bytecodes::_putstatic:
 927   case Bytecodes::_getfield:
 928   case Bytecodes::_putfield:
 929     {
 930       bool is_get = (depth >= 0), is_static = (depth & 1);
 931       bool ignore;
 932       ciBytecodeStream iter(method());
 933       iter.reset_to_bci(bci());
 934       iter.next();
 935       ciField* field = iter.get_field(ignore);
 936       int      size  = field->type()->size();
 937       inputs  = (is_static ? 0 : 1);
 938       if (is_get) {
 939         depth = size - inputs;
 940       } else {
 941         inputs += size;        // putxxx pops the value from the stack
 942         depth = - inputs;
 943       }
 944     }
 945     break;
 946 
 947   case Bytecodes::_invokevirtual:
 948   case Bytecodes::_invokespecial:
 949   case Bytecodes::_invokestatic:
 950   case Bytecodes::_invokeinterface:
 951     {
 952       bool is_static = (depth == 0);
 953       bool ignore;
 954       ciBytecodeStream iter(method());
 955       iter.reset_to_bci(bci());
 956       iter.next();
 957       ciMethod* method = iter.get_method(ignore);
 958       inputs = method->arg_size_no_receiver();
 959       if (!is_static)  inputs += 1;
 960       int size = method->return_type()->size();
 961       depth = size - inputs;
 962     }
 963     break;
 964 
 965   case Bytecodes::_multianewarray:
 966     {
 967       ciBytecodeStream iter(method());
 968       iter.reset_to_bci(bci());
 969       iter.next();
 970       inputs = iter.get_dimensions();
 971       assert(rsize == 1, "");
 972       depth = rsize - inputs;
 973     }
 974     break;
 975 
 976   case Bytecodes::_ireturn:
 977   case Bytecodes::_lreturn:
 978   case Bytecodes::_freturn:
 979   case Bytecodes::_dreturn:
 980   case Bytecodes::_areturn:
 981     assert(rsize = -depth, "");
 982     inputs = rsize;
 983     break;
 984 
 985   case Bytecodes::_jsr:
 986   case Bytecodes::_jsr_w:
 987     inputs = 0;
 988     depth  = 1;                  // S.B. depth=1, not zero
 989     break;
 990 
 991   default:
 992     // bytecode produces a typed result
 993     inputs = rsize - depth;
 994     assert(inputs >= 0, "");
 995     break;
 996   }
 997 
 998 #ifdef ASSERT
 999   // spot check
1000   int outputs = depth + inputs;
1001   assert(outputs >= 0, "sanity");
1002   switch (code) {
1003   case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break;
1004   case Bytecodes::_athrow:    assert(inputs == 1 && outputs == 0, ""); break;
1005   case Bytecodes::_aload_0:   assert(inputs == 0 && outputs == 1, ""); break;
1006   case Bytecodes::_return:    assert(inputs == 0 && outputs == 0, ""); break;
1007   case Bytecodes::_drem:      assert(inputs == 4 && outputs == 2, ""); break;
1008   }
1009 #endif //ASSERT
1010 
1011   return true;
1012 }
1013 
1014 
1015 
1016 //------------------------------basic_plus_adr---------------------------------
1017 Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) {
1018   // short-circuit a common case
1019   if (offset == intcon(0))  return ptr;
1020   return _gvn.transform( new (C, 4) AddPNode(base, ptr, offset) );
1021 }
1022 
1023 Node* GraphKit::ConvI2L(Node* offset) {
1024   // short-circuit a common case
1025   jint offset_con = find_int_con(offset, Type::OffsetBot);
1026   if (offset_con != Type::OffsetBot) {
1027     return longcon((long) offset_con);
1028   }
1029   return _gvn.transform( new (C, 2) ConvI2LNode(offset));
1030 }
1031 Node* GraphKit::ConvL2I(Node* offset) {
1032   // short-circuit a common case
1033   jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1034   if (offset_con != (jlong)Type::OffsetBot) {
1035     return intcon((int) offset_con);
1036   }
1037   return _gvn.transform( new (C, 2) ConvL2INode(offset));
1038 }
1039 
1040 //-------------------------load_object_klass-----------------------------------
1041 Node* GraphKit::load_object_klass(Node* obj) {
1042   // Special-case a fresh allocation to avoid building nodes:
1043   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1044   if (akls != NULL)  return akls;
1045   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1046   return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
1047 }
1048 
1049 //-------------------------load_array_length-----------------------------------
1050 Node* GraphKit::load_array_length(Node* array) {
1051   // Special-case a fresh allocation to avoid building nodes:
1052   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
1053   Node *alen;
1054   if (alloc == NULL) {
1055     Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1056     alen = _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1057   } else {
1058     alen = alloc->Ideal_length();
1059     Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_aryptr(), &_gvn);
1060     if (ccast != alen) {
1061       alen = _gvn.transform(ccast);
1062     }
1063   }
1064   return alen;
1065 }
1066 
1067 //------------------------------do_null_check----------------------------------
1068 // Helper function to do a NULL pointer check.  Returned value is
1069 // the incoming address with NULL casted away.  You are allowed to use the
1070 // not-null value only if you are control dependent on the test.
1071 extern int explicit_null_checks_inserted,
1072            explicit_null_checks_elided;
1073 Node* GraphKit::null_check_common(Node* value, BasicType type,
1074                                   // optional arguments for variations:
1075                                   bool assert_null,
1076                                   Node* *null_control) {
1077   assert(!assert_null || null_control == NULL, "not both at once");
1078   if (stopped())  return top();
1079   if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) {
1080     // For some performance testing, we may wish to suppress null checking.
1081     value = cast_not_null(value);   // Make it appear to be non-null (4962416).
1082     return value;
1083   }
1084   explicit_null_checks_inserted++;
1085 
1086   // Construct NULL check
1087   Node *chk = NULL;
1088   switch(type) {
1089     case T_LONG   : chk = new (C, 3) CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1090     case T_INT    : chk = new (C, 3) CmpINode( value, _gvn.intcon(0)); break;
1091     case T_ARRAY  : // fall through
1092       type = T_OBJECT;  // simplify further tests
1093     case T_OBJECT : {
1094       const Type *t = _gvn.type( value );
1095 
1096       const TypeInstPtr* tp = t->isa_instptr();
1097       if (tp != NULL && !tp->klass()->is_loaded()
1098           // Only for do_null_check, not any of its siblings:
1099           && !assert_null && null_control == NULL) {
1100         // Usually, any field access or invocation on an unloaded oop type
1101         // will simply fail to link, since the statically linked class is
1102         // likely also to be unloaded.  However, in -Xcomp mode, sometimes
1103         // the static class is loaded but the sharper oop type is not.
1104         // Rather than checking for this obscure case in lots of places,
1105         // we simply observe that a null check on an unloaded class
1106         // will always be followed by a nonsense operation, so we
1107         // can just issue the uncommon trap here.
1108         // Our access to the unloaded class will only be correct
1109         // after it has been loaded and initialized, which requires
1110         // a trip through the interpreter.
1111 #ifndef PRODUCT
1112         if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); }
1113 #endif
1114         uncommon_trap(Deoptimization::Reason_unloaded,
1115                       Deoptimization::Action_reinterpret,
1116                       tp->klass(), "!loaded");
1117         return top();
1118       }
1119 
1120       if (assert_null) {
1121         // See if the type is contained in NULL_PTR.
1122         // If so, then the value is already null.
1123         if (t->higher_equal(TypePtr::NULL_PTR)) {
1124           explicit_null_checks_elided++;
1125           return value;           // Elided null assert quickly!
1126         }
1127       } else {
1128         // See if mixing in the NULL pointer changes type.
1129         // If so, then the NULL pointer was not allowed in the original
1130         // type.  In other words, "value" was not-null.
1131         if (t->meet(TypePtr::NULL_PTR) != t) {
1132           // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
1133           explicit_null_checks_elided++;
1134           return value;           // Elided null check quickly!
1135         }
1136       }
1137       chk = new (C, 3) CmpPNode( value, null() );
1138       break;
1139     }
1140 
1141     default      : ShouldNotReachHere();
1142   }
1143   assert(chk != NULL, "sanity check");
1144   chk = _gvn.transform(chk);
1145 
1146   BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;
1147   BoolNode *btst = new (C, 2) BoolNode( chk, btest);
1148   Node   *tst = _gvn.transform( btst );
1149 
1150   //-----------
1151   // if peephole optimizations occurred, a prior test existed.
1152   // If a prior test existed, maybe it dominates as we can avoid this test.
1153   if (tst != btst && type == T_OBJECT) {
1154     // At this point we want to scan up the CFG to see if we can
1155     // find an identical test (and so avoid this test altogether).
1156     Node *cfg = control();
1157     int depth = 0;
1158     while( depth < 16 ) {       // Limit search depth for speed
1159       if( cfg->Opcode() == Op_IfTrue &&
1160           cfg->in(0)->in(1) == tst ) {
1161         // Found prior test.  Use "cast_not_null" to construct an identical
1162         // CastPP (and hence hash to) as already exists for the prior test.
1163         // Return that casted value.
1164         if (assert_null) {
1165           replace_in_map(value, null());
1166           return null();  // do not issue the redundant test
1167         }
1168         Node *oldcontrol = control();
1169         set_control(cfg);
1170         Node *res = cast_not_null(value);
1171         set_control(oldcontrol);
1172         explicit_null_checks_elided++;
1173         return res;
1174       }
1175       cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1176       if (cfg == NULL)  break;  // Quit at region nodes
1177       depth++;
1178     }
1179   }
1180 
1181   //-----------
1182   // Branch to failure if null
1183   float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
1184   Deoptimization::DeoptReason reason;
1185   if (assert_null)
1186     reason = Deoptimization::Reason_null_assert;
1187   else if (type == T_OBJECT)
1188     reason = Deoptimization::Reason_null_check;
1189   else
1190     reason = Deoptimization::Reason_div0_check;
1191 
1192   // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1193   // ciMethodData::has_trap_at will return a conservative -1 if any
1194   // must-be-null assertion has failed.  This could cause performance
1195   // problems for a method after its first do_null_assert failure.
1196   // Consider using 'Reason_class_check' instead?
1197 
1198   // To cause an implicit null check, we set the not-null probability
1199   // to the maximum (PROB_MAX).  For an explicit check the probability
1200   // is set to a smaller value.
1201   if (null_control != NULL || too_many_traps(reason)) {
1202     // probability is less likely
1203     ok_prob =  PROB_LIKELY_MAG(3);
1204   } else if (!assert_null &&
1205              (ImplicitNullCheckThreshold > 0) &&
1206              method() != NULL &&
1207              (method()->method_data()->trap_count(reason)
1208               >= (uint)ImplicitNullCheckThreshold)) {
1209     ok_prob =  PROB_LIKELY_MAG(3);
1210   }
1211 
1212   if (null_control != NULL) {
1213     IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN);
1214     Node* null_true = _gvn.transform( new (C, 1) IfFalseNode(iff));
1215     set_control(      _gvn.transform( new (C, 1) IfTrueNode(iff)));
1216     if (null_true == top())
1217       explicit_null_checks_elided++;
1218     (*null_control) = null_true;
1219   } else {
1220     BuildCutout unless(this, tst, ok_prob);
1221     // Check for optimizer eliding test at parse time
1222     if (stopped()) {
1223       // Failure not possible; do not bother making uncommon trap.
1224       explicit_null_checks_elided++;
1225     } else if (assert_null) {
1226       uncommon_trap(reason,
1227                     Deoptimization::Action_make_not_entrant,
1228                     NULL, "assert_null");
1229     } else {
1230       replace_in_map(value, zerocon(type));
1231       builtin_throw(reason);
1232     }
1233   }
1234 
1235   // Must throw exception, fall-thru not possible?
1236   if (stopped()) {
1237     return top();               // No result
1238   }
1239 
1240   if (assert_null) {
1241     // Cast obj to null on this path.
1242     replace_in_map(value, zerocon(type));
1243     return zerocon(type);
1244   }
1245 
1246   // Cast obj to not-null on this path, if there is no null_control.
1247   // (If there is a null_control, a non-null value may come back to haunt us.)
1248   if (type == T_OBJECT) {
1249     Node* cast = cast_not_null(value, false);
1250     if (null_control == NULL || (*null_control) == top())
1251       replace_in_map(value, cast);
1252     value = cast;
1253   }
1254 
1255   return value;
1256 }
1257 
1258 
1259 //------------------------------cast_not_null----------------------------------
1260 // Cast obj to not-null on this path
1261 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1262   const Type *t = _gvn.type(obj);
1263   const Type *t_not_null = t->join(TypePtr::NOTNULL);
1264   // Object is already not-null?
1265   if( t == t_not_null ) return obj;
1266 
1267   Node *cast = new (C, 2) CastPPNode(obj,t_not_null);
1268   cast->init_req(0, control());
1269   cast = _gvn.transform( cast );
1270 
1271   // Scan for instances of 'obj' in the current JVM mapping.
1272   // These instances are known to be not-null after the test.
1273   if (do_replace_in_map)
1274     replace_in_map(obj, cast);
1275 
1276   return cast;                  // Return casted value
1277 }
1278 
1279 
1280 //--------------------------replace_in_map-------------------------------------
1281 void GraphKit::replace_in_map(Node* old, Node* neww) {
1282   this->map()->replace_edge(old, neww);
1283 
1284   // Note: This operation potentially replaces any edge
1285   // on the map.  This includes locals, stack, and monitors
1286   // of the current (innermost) JVM state.
1287 
1288   // We can consider replacing in caller maps.
1289   // The idea would be that an inlined function's null checks
1290   // can be shared with the entire inlining tree.
1291   // The expense of doing this is that the PreserveJVMState class
1292   // would have to preserve caller states too, with a deep copy.
1293 }
1294 
1295 
1296 
1297 //=============================================================================
1298 //--------------------------------memory---------------------------------------
1299 Node* GraphKit::memory(uint alias_idx) {
1300   MergeMemNode* mem = merged_memory();
1301   Node* p = mem->memory_at(alias_idx);
1302   _gvn.set_type(p, Type::MEMORY);  // must be mapped
1303   return p;
1304 }
1305 
1306 //-----------------------------reset_memory------------------------------------
1307 Node* GraphKit::reset_memory() {
1308   Node* mem = map()->memory();
1309   // do not use this node for any more parsing!
1310   debug_only( map()->set_memory((Node*)NULL) );
1311   return _gvn.transform( mem );
1312 }
1313 
1314 //------------------------------set_all_memory---------------------------------
1315 void GraphKit::set_all_memory(Node* newmem) {
1316   Node* mergemem = MergeMemNode::make(C, newmem);
1317   gvn().set_type_bottom(mergemem);
1318   map()->set_memory(mergemem);
1319 }
1320 
1321 //------------------------------set_all_memory_call----------------------------
1322 void GraphKit::set_all_memory_call(Node* call) {
1323   Node* newmem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
1324   set_all_memory(newmem);
1325 }
1326 
1327 //=============================================================================
1328 //
1329 // parser factory methods for MemNodes
1330 //
1331 // These are layered on top of the factory methods in LoadNode and StoreNode,
1332 // and integrate with the parser's memory state and _gvn engine.
1333 //
1334 
1335 // factory methods in "int adr_idx"
1336 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1337                           int adr_idx,
1338                           bool require_atomic_access) {
1339   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1340   const TypePtr* adr_type = NULL; // debug-mode-only argument
1341   debug_only(adr_type = C->get_adr_type(adr_idx));
1342   Node* mem = memory(adr_idx);
1343   Node* ld;
1344   if (require_atomic_access && bt == T_LONG) {
1345     ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
1346   } else {
1347     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
1348   }
1349   return _gvn.transform(ld);
1350 }
1351 
1352 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1353                                 int adr_idx,
1354                                 bool require_atomic_access) {
1355   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1356   const TypePtr* adr_type = NULL;
1357   debug_only(adr_type = C->get_adr_type(adr_idx));
1358   Node *mem = memory(adr_idx);
1359   Node* st;
1360   if (require_atomic_access && bt == T_LONG) {
1361     st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
1362   } else {
1363     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
1364   }
1365   st = _gvn.transform(st);
1366   set_memory(st, adr_idx);
1367   // Back-to-back stores can only remove intermediate store with DU info
1368   // so push on worklist for optimizer.
1369   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1370     record_for_igvn(st);
1371 
1372   return st;
1373 }
1374 
1375 void GraphKit::pre_barrier(Node* ctl,
1376                            Node* obj,
1377                            Node* adr,
1378                            uint adr_idx,
1379                            Node *val,
1380                            const Type* val_type,
1381                            BasicType bt) {
1382   BarrierSet* bs = Universe::heap()->barrier_set();
1383   set_control(ctl);
1384   switch (bs->kind()) {
1385     case BarrierSet::G1SATBCT:
1386     case BarrierSet::G1SATBCTLogging:
1387         g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
1388       break;
1389 
1390     case BarrierSet::CardTableModRef:
1391     case BarrierSet::CardTableExtension:
1392     case BarrierSet::ModRef:
1393       break;
1394 
1395     case BarrierSet::Other:
1396     default      :
1397       ShouldNotReachHere();
1398 
1399   }
1400 }
1401 
1402 void GraphKit::post_barrier(Node* ctl,
1403                             Node* store,
1404                             Node* obj,
1405                             Node* adr,
1406                             uint adr_idx,
1407                             Node *val,
1408                             BasicType bt,
1409                             bool use_precise) {
1410   BarrierSet* bs = Universe::heap()->barrier_set();
1411   set_control(ctl);
1412   switch (bs->kind()) {
1413     case BarrierSet::G1SATBCT:
1414     case BarrierSet::G1SATBCTLogging:
1415         g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1416       break;
1417 
1418     case BarrierSet::CardTableModRef:
1419     case BarrierSet::CardTableExtension:
1420       write_barrier_post(store, obj, adr, val, use_precise);
1421       break;
1422 
1423     case BarrierSet::ModRef:
1424       break;
1425 
1426     case BarrierSet::Other:
1427     default      :
1428       ShouldNotReachHere();
1429 
1430   }
1431 }
1432 
1433 Node* GraphKit::store_oop_to_object(Node* ctl,
1434                                     Node* obj,
1435                                     Node* adr,
1436                                     const TypePtr* adr_type,
1437                                     Node *val,
1438                                     const Type* val_type,
1439                                     BasicType bt) {
1440   uint adr_idx = C->get_alias_index(adr_type);
1441   Node* store;
1442   pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
1443   store = store_to_memory(control(), adr, val, bt, adr_idx);
1444   post_barrier(control(), store, obj, adr, adr_idx, val, bt, false);
1445   return store;
1446 }
1447 
1448 Node* GraphKit::store_oop_to_array(Node* ctl,
1449                                    Node* obj,
1450                                    Node* adr,
1451                                    const TypePtr* adr_type,
1452                                    Node *val,
1453                                    const Type* val_type,
1454                                    BasicType bt) {
1455   uint adr_idx = C->get_alias_index(adr_type);
1456   Node* store;
1457   pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
1458   store = store_to_memory(control(), adr, val, bt, adr_idx);
1459   post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
1460   return store;
1461 }
1462 
1463 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1464                                      Node* obj,
1465                                      Node* adr,
1466                                      const TypePtr* adr_type,
1467                                      Node *val,
1468                                      const Type* val_type,
1469                                      BasicType bt) {
1470   uint adr_idx = C->get_alias_index(adr_type);
1471   Node* store;
1472   pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
1473   store = store_to_memory(control(), adr, val, bt, adr_idx);
1474   post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
1475   return store;
1476 }
1477 
1478 
1479 //-------------------------array_element_address-------------------------
1480 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1481                                       const TypeInt* sizetype) {
1482   uint shift  = exact_log2(type2aelembytes(elembt));
1483   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1484 
1485   // short-circuit a common case (saves lots of confusing waste motion)
1486   jint idx_con = find_int_con(idx, -1);
1487   if (idx_con >= 0) {
1488     intptr_t offset = header + ((intptr_t)idx_con << shift);
1489     return basic_plus_adr(ary, offset);
1490   }
1491 
1492   // must be correct type for alignment purposes
1493   Node* base  = basic_plus_adr(ary, header);
1494 #ifdef _LP64
1495   // The scaled index operand to AddP must be a clean 64-bit value.
1496   // Java allows a 32-bit int to be incremented to a negative
1497   // value, which appears in a 64-bit register as a large
1498   // positive number.  Using that large positive number as an
1499   // operand in pointer arithmetic has bad consequences.
1500   // On the other hand, 32-bit overflow is rare, and the possibility
1501   // can often be excluded, if we annotate the ConvI2L node with
1502   // a type assertion that its value is known to be a small positive
1503   // number.  (The prior range check has ensured this.)
1504   // This assertion is used by ConvI2LNode::Ideal.
1505   int index_max = max_jint - 1;  // array size is max_jint, index is one less
1506   if (sizetype != NULL)  index_max = sizetype->_hi - 1;
1507   const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
1508   idx = _gvn.transform( new (C, 2) ConvI2LNode(idx, lidxtype) );
1509 #endif
1510   Node* scale = _gvn.transform( new (C, 3) LShiftXNode(idx, intcon(shift)) );
1511   return basic_plus_adr(ary, base, scale);
1512 }
1513 
1514 //-------------------------load_array_element-------------------------
1515 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1516   const Type* elemtype = arytype->elem();
1517   BasicType elembt = elemtype->array_element_basic_type();
1518   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1519   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype);
1520   return ld;
1521 }
1522 
1523 //-------------------------set_arguments_for_java_call-------------------------
1524 // Arguments (pre-popped from the stack) are taken from the JVMS.
1525 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1526   // Add the call arguments:
1527   uint nargs = call->method()->arg_size();
1528   for (uint i = 0; i < nargs; i++) {
1529     Node* arg = argument(i);
1530     call->init_req(i + TypeFunc::Parms, arg);
1531   }
1532 }
1533 
1534 //---------------------------set_edges_for_java_call---------------------------
1535 // Connect a newly created call into the current JVMS.
1536 // A return value node (if any) is returned from set_edges_for_java_call.
1537 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw) {
1538 
1539   // Add the predefined inputs:
1540   call->init_req( TypeFunc::Control, control() );
1541   call->init_req( TypeFunc::I_O    , i_o() );
1542   call->init_req( TypeFunc::Memory , reset_memory() );
1543   call->init_req( TypeFunc::FramePtr, frameptr() );
1544   call->init_req( TypeFunc::ReturnAdr, top() );
1545 
1546   add_safepoint_edges(call, must_throw);
1547 
1548   Node* xcall = _gvn.transform(call);
1549 
1550   if (xcall == top()) {
1551     set_control(top());
1552     return;
1553   }
1554   assert(xcall == call, "call identity is stable");
1555 
1556   // Re-use the current map to produce the result.
1557 
1558   set_control(_gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Control)));
1559   set_i_o(    _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O    )));
1560   set_all_memory_call(xcall);
1561 
1562   //return xcall;   // no need, caller already has it
1563 }
1564 
1565 Node* GraphKit::set_results_for_java_call(CallJavaNode* call) {
1566   if (stopped())  return top();  // maybe the call folded up?
1567 
1568   // Capture the return value, if any.
1569   Node* ret;
1570   if (call->method() == NULL ||
1571       call->method()->return_type()->basic_type() == T_VOID)
1572         ret = top();
1573   else  ret = _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
1574 
1575   // Note:  Since any out-of-line call can produce an exception,
1576   // we always insert an I_O projection from the call into the result.
1577 
1578   make_slow_call_ex(call, env()->Throwable_klass(), false);
1579 
1580   return ret;
1581 }
1582 
1583 //--------------------set_predefined_input_for_runtime_call--------------------
1584 // Reading and setting the memory state is way conservative here.
1585 // The real problem is that I am not doing real Type analysis on memory,
1586 // so I cannot distinguish card mark stores from other stores.  Across a GC
1587 // point the Store Barrier and the card mark memory has to agree.  I cannot
1588 // have a card mark store and its barrier split across the GC point from
1589 // either above or below.  Here I get that to happen by reading ALL of memory.
1590 // A better answer would be to separate out card marks from other memory.
1591 // For now, return the input memory state, so that it can be reused
1592 // after the call, if this call has restricted memory effects.
1593 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1594   // Set fixed predefined input arguments
1595   Node* memory = reset_memory();
1596   call->init_req( TypeFunc::Control,   control()  );
1597   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1598   call->init_req( TypeFunc::Memory,    memory     ); // may gc ptrs
1599   call->init_req( TypeFunc::FramePtr,  frameptr() );
1600   call->init_req( TypeFunc::ReturnAdr, top()      );
1601   return memory;
1602 }
1603 
1604 //-------------------set_predefined_output_for_runtime_call--------------------
1605 // Set control and memory (not i_o) from the call.
1606 // If keep_mem is not NULL, use it for the output state,
1607 // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM.
1608 // If hook_mem is NULL, this call produces no memory effects at all.
1609 // If hook_mem is a Java-visible memory slice (such as arraycopy operands),
1610 // then only that memory slice is taken from the call.
1611 // In the last case, we must put an appropriate memory barrier before
1612 // the call, so as to create the correct anti-dependencies on loads
1613 // preceding the call.
1614 void GraphKit::set_predefined_output_for_runtime_call(Node* call,
1615                                                       Node* keep_mem,
1616                                                       const TypePtr* hook_mem) {
1617   // no i/o
1618   set_control(_gvn.transform( new (C, 1) ProjNode(call,TypeFunc::Control) ));
1619   if (keep_mem) {
1620     // First clone the existing memory state
1621     set_all_memory(keep_mem);
1622     if (hook_mem != NULL) {
1623       // Make memory for the call
1624       Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
1625       // Set the RawPtr memory state only.  This covers all the heap top/GC stuff
1626       // We also use hook_mem to extract specific effects from arraycopy stubs.
1627       set_memory(mem, hook_mem);
1628     }
1629     // ...else the call has NO memory effects.
1630 
1631     // Make sure the call advertises its memory effects precisely.
1632     // This lets us build accurate anti-dependences in gcm.cpp.
1633     assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem),
1634            "call node must be constructed correctly");
1635   } else {
1636     assert(hook_mem == NULL, "");
1637     // This is not a "slow path" call; all memory comes from the call.
1638     set_all_memory_call(call);
1639   }
1640 }
1641 
1642 //------------------------------increment_counter------------------------------
1643 // for statistics: increment a VM counter by 1
1644 
1645 void GraphKit::increment_counter(address counter_addr) {
1646   Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
1647   increment_counter(adr1);
1648 }
1649 
1650 void GraphKit::increment_counter(Node* counter_addr) {
1651   int adr_type = Compile::AliasIdxRaw;
1652   Node* cnt  = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type);
1653   Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1)));
1654   store_to_memory( NULL, counter_addr, incr, T_INT, adr_type );
1655 }
1656 
1657 
1658 //------------------------------uncommon_trap----------------------------------
1659 // Bail out to the interpreter in mid-method.  Implemented by calling the
1660 // uncommon_trap blob.  This helper function inserts a runtime call with the
1661 // right debug info.
1662 void GraphKit::uncommon_trap(int trap_request,
1663                              ciKlass* klass, const char* comment,
1664                              bool must_throw,
1665                              bool keep_exact_action) {
1666   if (failing())  stop();
1667   if (stopped())  return; // trap reachable?
1668 
1669   // Note:  If ProfileTraps is true, and if a deopt. actually
1670   // occurs here, the runtime will make sure an MDO exists.  There is
1671   // no need to call method()->build_method_data() at this point.
1672 
1673 #ifdef ASSERT
1674   if (!must_throw) {
1675     // Make sure the stack has at least enough depth to execute
1676     // the current bytecode.
1677     int inputs, ignore;
1678     if (compute_stack_effects(inputs, ignore)) {
1679       assert(sp() >= inputs, "must have enough JVMS stack to execute");
1680       // It is a frequent error in library_call.cpp to issue an
1681       // uncommon trap with the _sp value already popped.
1682     }
1683   }
1684 #endif
1685 
1686   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1687   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
1688 
1689   switch (action) {
1690   case Deoptimization::Action_maybe_recompile:
1691   case Deoptimization::Action_reinterpret:
1692     // Temporary fix for 6529811 to allow virtual calls to be sure they
1693     // get the chance to go from mono->bi->mega
1694     if (!keep_exact_action &&
1695         Deoptimization::trap_request_index(trap_request) < 0 &&
1696         too_many_recompiles(reason)) {
1697       // This BCI is causing too many recompilations.
1698       action = Deoptimization::Action_none;
1699       trap_request = Deoptimization::make_trap_request(reason, action);
1700     } else {
1701       C->set_trap_can_recompile(true);
1702     }
1703     break;
1704   case Deoptimization::Action_make_not_entrant:
1705     C->set_trap_can_recompile(true);
1706     break;
1707 #ifdef ASSERT
1708   case Deoptimization::Action_none:
1709   case Deoptimization::Action_make_not_compilable:
1710     break;
1711   default:
1712     assert(false, "bad action");
1713 #endif
1714   }
1715 
1716   if (TraceOptoParse) {
1717     char buf[100];
1718     tty->print_cr("Uncommon trap %s at bci:%d",
1719                   Deoptimization::format_trap_request(buf, sizeof(buf),
1720                                                       trap_request), bci());
1721   }
1722 
1723   CompileLog* log = C->log();
1724   if (log != NULL) {
1725     int kid = (klass == NULL)? -1: log->identify(klass);
1726     log->begin_elem("uncommon_trap bci='%d'", bci());
1727     char buf[100];
1728     log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf),
1729                                                           trap_request));
1730     if (kid >= 0)         log->print(" klass='%d'", kid);
1731     if (comment != NULL)  log->print(" comment='%s'", comment);
1732     log->end_elem();
1733   }
1734 
1735   // Make sure any guarding test views this path as very unlikely
1736   Node *i0 = control()->in(0);
1737   if (i0 != NULL && i0->is_If()) {        // Found a guarding if test?
1738     IfNode *iff = i0->as_If();
1739     float f = iff->_prob;   // Get prob
1740     if (control()->Opcode() == Op_IfTrue) {
1741       if (f > PROB_UNLIKELY_MAG(4))
1742         iff->_prob = PROB_MIN;
1743     } else {
1744       if (f < PROB_LIKELY_MAG(4))
1745         iff->_prob = PROB_MAX;
1746     }
1747   }
1748 
1749   // Clear out dead values from the debug info.
1750   kill_dead_locals();
1751 
1752   // Now insert the uncommon trap subroutine call
1753   address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin();
1754   const TypePtr* no_memory_effects = NULL;
1755   // Pass the index of the class to be loaded
1756   Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |
1757                                  (must_throw ? RC_MUST_THROW : 0),
1758                                  OptoRuntime::uncommon_trap_Type(),
1759                                  call_addr, "uncommon_trap", no_memory_effects,
1760                                  intcon(trap_request));
1761   assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request,
1762          "must extract request correctly from the graph");
1763   assert(trap_request != 0, "zero value reserved by uncommon_trap_request");
1764 
1765   call->set_req(TypeFunc::ReturnAdr, returnadr());
1766   // The debug info is the only real input to this call.
1767 
1768   // Halt-and-catch fire here.  The above call should never return!
1769   HaltNode* halt = new(C, TypeFunc::Parms) HaltNode(control(), frameptr());
1770   _gvn.set_type_bottom(halt);
1771   root()->add_req(halt);
1772 
1773   stop_and_kill_map();
1774 }
1775 
1776 
1777 //--------------------------just_allocated_object------------------------------
1778 // Report the object that was just allocated.
1779 // It must be the case that there are no intervening safepoints.
1780 // We use this to determine if an object is so "fresh" that
1781 // it does not require card marks.
1782 Node* GraphKit::just_allocated_object(Node* current_control) {
1783   if (C->recent_alloc_ctl() == current_control)
1784     return C->recent_alloc_obj();
1785   return NULL;
1786 }
1787 
1788 
1789 //------------------------------store_barrier----------------------------------
1790 // Insert a write-barrier store.  This is to let generational GC work; we have
1791 // to flag all oop-stores before the next GC point.
1792 void GraphKit::write_barrier_post(Node* oop_store, Node* obj, Node* adr,
1793                                   Node* val, bool use_precise) {
1794   // No store check needed if we're storing a NULL or an old object
1795   // (latter case is probably a string constant). The concurrent
1796   // mark sweep garbage collector, however, needs to have all nonNull
1797   // oop updates flagged via card-marks.
1798   if (val != NULL && val->is_Con()) {
1799     // must be either an oop or NULL
1800     const Type* t = val->bottom_type();
1801     if (t == TypePtr::NULL_PTR || t == Type::TOP)
1802       // stores of null never (?) need barriers
1803       return;
1804     ciObject* con = t->is_oopptr()->const_oop();
1805     if (con != NULL
1806         && con->is_perm()
1807         && Universe::heap()->can_elide_permanent_oop_store_barriers())
1808       // no store barrier needed, because no old-to-new ref created
1809       return;
1810   }
1811 
1812   if (use_ReduceInitialCardMarks()
1813       && obj == just_allocated_object(control())) {
1814     // We can skip marks on a freshly-allocated object.
1815     // Keep this code in sync with do_eager_card_mark in runtime.cpp.
1816     // That routine eagerly marks the occasional object which is produced
1817     // by the slow path, so that we don't have to do it here.
1818     return;
1819   }
1820 
1821   if (!use_precise) {
1822     // All card marks for a (non-array) instance are in one place:
1823     adr = obj;
1824   }
1825   // (Else it's an array (or unknown), and we want more precise card marks.)
1826   assert(adr != NULL, "");
1827 
1828   // Get the alias_index for raw card-mark memory
1829   int adr_type = Compile::AliasIdxRaw;
1830   // Convert the pointer to an int prior to doing math on it
1831   Node* cast = _gvn.transform(new (C, 2) CastP2XNode(control(), adr));
1832   // Divide by card size
1833   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
1834          "Only one we handle so far.");
1835   CardTableModRefBS* ct =
1836     (CardTableModRefBS*)(Universe::heap()->barrier_set());
1837   Node *b = _gvn.transform(new (C, 3) URShiftXNode( cast, _gvn.intcon(CardTableModRefBS::card_shift) ));
1838   // We store into a byte array, so do not bother to left-shift by zero
1839   Node *c = byte_map_base_node();
1840   // Combine
1841   Node *sb_ctl = control();
1842   Node *sb_adr = _gvn.transform(new (C, 4) AddPNode( top()/*no base ptr*/, c, b ));
1843   Node *sb_val = _gvn.intcon(0);
1844   // Smash zero into card
1845   if( !UseConcMarkSweepGC ) {
1846     BasicType bt = T_BYTE;
1847     store_to_memory(sb_ctl, sb_adr, sb_val, bt, adr_type);
1848   } else {
1849     // Specialized path for CM store barrier
1850     cms_card_mark( sb_ctl, sb_adr, sb_val, oop_store);
1851   }
1852 }
1853 
1854 // Specialized path for CMS store barrier
1855 void GraphKit::cms_card_mark(Node* ctl, Node* adr, Node* val, Node *oop_store) {
1856   BasicType bt = T_BYTE;
1857   int adr_idx = Compile::AliasIdxRaw;
1858   Node* mem = memory(adr_idx);
1859 
1860   // The type input is NULL in PRODUCT builds
1861   const TypePtr* type = NULL;
1862   debug_only(type = C->get_adr_type(adr_idx));
1863 
1864   // Add required edge to oop_store, optimizer does not support precedence edges.
1865   // Convert required edge to precedence edge before allocation.
1866   Node *store = _gvn.transform( new (C, 5) StoreCMNode(ctl, mem, adr, type, val, oop_store) );
1867   set_memory(store, adr_idx);
1868 
1869   // For CMS, back-to-back card-marks can only remove the first one
1870   // and this requires DU info.  Push on worklist for optimizer.
1871   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1872     record_for_igvn(store);
1873 }
1874 
1875 
1876 void GraphKit::round_double_arguments(ciMethod* dest_method) {
1877   // (Note:  TypeFunc::make has a cache that makes this fast.)
1878   const TypeFunc* tf    = TypeFunc::make(dest_method);
1879   int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
1880   for (int j = 0; j < nargs; j++) {
1881     const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
1882     if( targ->basic_type() == T_DOUBLE ) {
1883       // If any parameters are doubles, they must be rounded before
1884       // the call, dstore_rounding does gvn.transform
1885       Node *arg = argument(j);
1886       arg = dstore_rounding(arg);
1887       set_argument(j, arg);
1888     }
1889   }
1890 }
1891 
1892 void GraphKit::round_double_result(ciMethod* dest_method) {
1893   // A non-strict method may return a double value which has an extended
1894   // exponent, but this must not be visible in a caller which is 'strict'
1895   // If a strict caller invokes a non-strict callee, round a double result
1896 
1897   BasicType result_type = dest_method->return_type()->basic_type();
1898   assert( method() != NULL, "must have caller context");
1899   if( result_type == T_DOUBLE && method()->is_strict() && !dest_method->is_strict() ) {
1900     // Destination method's return value is on top of stack
1901     // dstore_rounding() does gvn.transform
1902     Node *result = pop_pair();
1903     result = dstore_rounding(result);
1904     push_pair(result);
1905   }
1906 }
1907 
1908 // rounding for strict float precision conformance
1909 Node* GraphKit::precision_rounding(Node* n) {
1910   return UseStrictFP && _method->flags().is_strict()
1911     && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding
1912     ? _gvn.transform( new (C, 2) RoundFloatNode(0, n) )
1913     : n;
1914 }
1915 
1916 // rounding for strict double precision conformance
1917 Node* GraphKit::dprecision_rounding(Node *n) {
1918   return UseStrictFP && _method->flags().is_strict()
1919     && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding
1920     ? _gvn.transform( new (C, 2) RoundDoubleNode(0, n) )
1921     : n;
1922 }
1923 
1924 // rounding for non-strict double stores
1925 Node* GraphKit::dstore_rounding(Node* n) {
1926   return Matcher::strict_fp_requires_explicit_rounding
1927     && UseSSE <= 1
1928     ? _gvn.transform( new (C, 2) RoundDoubleNode(0, n) )
1929     : n;
1930 }
1931 
1932 //=============================================================================
1933 // Generate a fast path/slow path idiom.  Graph looks like:
1934 // [foo] indicates that 'foo' is a parameter
1935 //
1936 //              [in]     NULL
1937 //                 \    /
1938 //                  CmpP
1939 //                  Bool ne
1940 //                   If
1941 //                  /  \
1942 //              True    False-<2>
1943 //              / |
1944 //             /  cast_not_null
1945 //           Load  |    |   ^
1946 //        [fast_test]   |   |
1947 // gvn to   opt_test    |   |
1948 //          /    \      |  <1>
1949 //      True     False  |
1950 //        |         \\  |
1951 //   [slow_call]     \[fast_result]
1952 //    Ctl   Val       \      \
1953 //     |               \      \
1954 //    Catch       <1>   \      \
1955 //   /    \        ^     \      \
1956 //  Ex    No_Ex    |      \      \
1957 //  |       \   \  |       \ <2>  \
1958 //  ...      \  [slow_res] |  |    \   [null_result]
1959 //            \         \--+--+---  |  |
1960 //             \           | /    \ | /
1961 //              --------Region     Phi
1962 //
1963 //=============================================================================
1964 // Code is structured as a series of driver functions all called 'do_XXX' that
1965 // call a set of helper functions.  Helper functions first, then drivers.
1966 
1967 //------------------------------null_check_oop---------------------------------
1968 // Null check oop.  Set null-path control into Region in slot 3.
1969 // Make a cast-not-nullness use the other not-null control.  Return cast.
1970 Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
1971                                bool never_see_null) {
1972   // Initial NULL check taken path
1973   (*null_control) = top();
1974   Node* cast = null_check_common(value, T_OBJECT, false, null_control);
1975 
1976   // Generate uncommon_trap:
1977   if (never_see_null && (*null_control) != top()) {
1978     // If we see an unexpected null at a check-cast we record it and force a
1979     // recompile; the offending check-cast will be compiled to handle NULLs.
1980     // If we see more than one offending BCI, then all checkcasts in the
1981     // method will be compiled to handle NULLs.
1982     PreserveJVMState pjvms(this);
1983     set_control(*null_control);
1984     replace_in_map(value, null());
1985     uncommon_trap(Deoptimization::Reason_null_check,
1986                   Deoptimization::Action_make_not_entrant);
1987     (*null_control) = top();    // NULL path is dead
1988   }
1989 
1990   // Cast away null-ness on the result
1991   return cast;
1992 }
1993 
1994 //------------------------------opt_iff----------------------------------------
1995 // Optimize the fast-check IfNode.  Set the fast-path region slot 2.
1996 // Return slow-path control.
1997 Node* GraphKit::opt_iff(Node* region, Node* iff) {
1998   IfNode *opt_iff = _gvn.transform(iff)->as_If();
1999 
2000   // Fast path taken; set region slot 2
2001   Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(opt_iff) );
2002   region->init_req(2,fast_taken); // Capture fast-control
2003 
2004   // Fast path not-taken, i.e. slow path
2005   Node *slow_taken = _gvn.transform( new (C, 1) IfTrueNode(opt_iff) );
2006   return slow_taken;
2007 }
2008 
2009 //-----------------------------make_runtime_call-------------------------------
2010 Node* GraphKit::make_runtime_call(int flags,
2011                                   const TypeFunc* call_type, address call_addr,
2012                                   const char* call_name,
2013                                   const TypePtr* adr_type,
2014                                   // The following parms are all optional.
2015                                   // The first NULL ends the list.
2016                                   Node* parm0, Node* parm1,
2017                                   Node* parm2, Node* parm3,
2018                                   Node* parm4, Node* parm5,
2019                                   Node* parm6, Node* parm7) {
2020   // Slow-path call
2021   int size = call_type->domain()->cnt();
2022   bool is_leaf = !(flags & RC_NO_LEAF);
2023   bool has_io  = (!is_leaf && !(flags & RC_NO_IO));
2024   if (call_name == NULL) {
2025     assert(!is_leaf, "must supply name for leaf");
2026     call_name = OptoRuntime::stub_name(call_addr);
2027   }
2028   CallNode* call;
2029   if (!is_leaf) {
2030     call = new(C, size) CallStaticJavaNode(call_type, call_addr, call_name,
2031                                            bci(), adr_type);
2032   } else if (flags & RC_NO_FP) {
2033     call = new(C, size) CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2034   } else {
2035     call = new(C, size) CallLeafNode(call_type, call_addr, call_name, adr_type);
2036   }
2037 
2038   // The following is similar to set_edges_for_java_call,
2039   // except that the memory effects of the call are restricted to AliasIdxRaw.
2040 
2041   // Slow path call has no side-effects, uses few values
2042   bool wide_in  = !(flags & RC_NARROW_MEM);
2043   bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2044 
2045   Node* prev_mem = NULL;
2046   if (wide_in) {
2047     prev_mem = set_predefined_input_for_runtime_call(call);
2048   } else {
2049     assert(!wide_out, "narrow in => narrow out");
2050     Node* narrow_mem = memory(adr_type);
2051     prev_mem = reset_memory();
2052     map()->set_memory(narrow_mem);
2053     set_predefined_input_for_runtime_call(call);
2054   }
2055 
2056   // Hook each parm in order.  Stop looking at the first NULL.
2057   if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);
2058   if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);
2059   if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);
2060   if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);
2061   if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);
2062   if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);
2063   if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);
2064   if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);
2065     /* close each nested if ===> */  } } } } } } } }
2066   assert(call->in(call->req()-1) != NULL, "must initialize all parms");
2067 
2068   if (!is_leaf) {
2069     // Non-leaves can block and take safepoints:
2070     add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0));
2071   }
2072   // Non-leaves can throw exceptions:
2073   if (has_io) {
2074     call->set_req(TypeFunc::I_O, i_o());
2075   }
2076 
2077   if (flags & RC_UNCOMMON) {
2078     // Set the count to a tiny probability.  Cf. Estimate_Block_Frequency.
2079     // (An "if" probability corresponds roughly to an unconditional count.
2080     // Sort of.)
2081     call->set_cnt(PROB_UNLIKELY_MAG(4));
2082   }
2083 
2084   Node* c = _gvn.transform(call);
2085   assert(c == call, "cannot disappear");
2086 
2087   if (wide_out) {
2088     // Slow path call has full side-effects.
2089     set_predefined_output_for_runtime_call(call);
2090   } else {
2091     // Slow path call has few side-effects, and/or sets few values.
2092     set_predefined_output_for_runtime_call(call, prev_mem, adr_type);
2093   }
2094 
2095   if (has_io) {
2096     set_i_o(_gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O)));
2097   }
2098   return call;
2099 
2100 }
2101 
2102 //------------------------------merge_memory-----------------------------------
2103 // Merge memory from one path into the current memory state.
2104 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2105   for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2106     Node* old_slice = mms.force_memory();
2107     Node* new_slice = mms.memory2();
2108     if (old_slice != new_slice) {
2109       PhiNode* phi;
2110       if (new_slice->is_Phi() && new_slice->as_Phi()->region() == region) {
2111         phi = new_slice->as_Phi();
2112         #ifdef ASSERT
2113         if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region)
2114           old_slice = old_slice->in(new_path);
2115         // Caller is responsible for ensuring that any pre-existing
2116         // phis are already aware of old memory.
2117         int old_path = (new_path > 1) ? 1 : 2;  // choose old_path != new_path
2118         assert(phi->in(old_path) == old_slice, "pre-existing phis OK");
2119         #endif
2120         mms.set_memory(phi);
2121       } else {
2122         phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C));
2123         _gvn.set_type(phi, Type::MEMORY);
2124         phi->set_req(new_path, new_slice);
2125         mms.set_memory(_gvn.transform(phi));  // assume it is complete
2126       }
2127     }
2128   }
2129 }
2130 
2131 //------------------------------make_slow_call_ex------------------------------
2132 // Make the exception handler hookups for the slow call
2133 void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj) {
2134   if (stopped())  return;
2135 
2136   // Make a catch node with just two handlers:  fall-through and catch-all
2137   Node* i_o  = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::I_O, separate_io_proj) );
2138   Node* catc = _gvn.transform( new (C, 2) CatchNode(control(), i_o, 2) );
2139   Node* norm = _gvn.transform( new (C, 1) CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) );
2140   Node* excp = _gvn.transform( new (C, 1) CatchProjNode(catc, CatchProjNode::catch_all_index,    CatchProjNode::no_handler_bci) );
2141 
2142   { PreserveJVMState pjvms(this);
2143     set_control(excp);
2144     set_i_o(i_o);
2145 
2146     if (excp != top()) {
2147       // Create an exception state also.
2148       // Use an exact type if the caller has specified a specific exception.
2149       const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull);
2150       Node*       ex_oop  = new (C, 2) CreateExNode(ex_type, control(), i_o);
2151       add_exception_state(make_exception_state(_gvn.transform(ex_oop)));
2152     }
2153   }
2154 
2155   // Get the no-exception control from the CatchNode.
2156   set_control(norm);
2157 }
2158 
2159 
2160 //-------------------------------gen_subtype_check-----------------------------
2161 // Generate a subtyping check.  Takes as input the subtype and supertype.
2162 // Returns 2 values: sets the default control() to the true path and returns
2163 // the false path.  Only reads invariant memory; sets no (visible) memory.
2164 // The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding
2165 // but that's not exposed to the optimizer.  This call also doesn't take in an
2166 // Object; if you wish to check an Object you need to load the Object's class
2167 // prior to coming here.
2168 Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
2169   // Fast check for identical types, perhaps identical constants.
2170   // The types can even be identical non-constants, in cases
2171   // involving Array.newInstance, Object.clone, etc.
2172   if (subklass == superklass)
2173     return top();             // false path is dead; no test needed.
2174 
2175   if (_gvn.type(superklass)->singleton()) {
2176     ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
2177     ciKlass* subk   = _gvn.type(subklass)->is_klassptr()->klass();
2178 
2179     // In the common case of an exact superklass, try to fold up the
2180     // test before generating code.  You may ask, why not just generate
2181     // the code and then let it fold up?  The answer is that the generated
2182     // code will necessarily include null checks, which do not always
2183     // completely fold away.  If they are also needless, then they turn
2184     // into a performance loss.  Example:
2185     //    Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
2186     // Here, the type of 'fa' is often exact, so the store check
2187     // of fa[1]=x will fold up, without testing the nullness of x.
2188     switch (static_subtype_check(superk, subk)) {
2189     case SSC_always_false:
2190       {
2191         Node* always_fail = control();
2192         set_control(top());
2193         return always_fail;
2194       }
2195     case SSC_always_true:
2196       return top();
2197     case SSC_easy_test:
2198       {
2199         // Just do a direct pointer compare and be done.
2200         Node* cmp = _gvn.transform( new(C, 3) CmpPNode(subklass, superklass) );
2201         Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) );
2202         IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2203         set_control( _gvn.transform( new(C, 1) IfTrueNode (iff) ) );
2204         return       _gvn.transform( new(C, 1) IfFalseNode(iff) );
2205       }
2206     case SSC_full_test:
2207       break;
2208     default:
2209       ShouldNotReachHere();
2210     }
2211   }
2212 
2213   // %%% Possible further optimization:  Even if the superklass is not exact,
2214   // if the subklass is the unique subtype of the superklass, the check
2215   // will always succeed.  We could leave a dependency behind to ensure this.
2216 
2217   // First load the super-klass's check-offset
2218   Node *p1 = basic_plus_adr( superklass, superklass, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes() );
2219   Node *chk_off = _gvn.transform( new (C, 3) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) );
2220   int cacheoff_con = sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes();
2221   bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
2222 
2223   // Load from the sub-klass's super-class display list, or a 1-word cache of
2224   // the secondary superclass list, or a failing value with a sentinel offset
2225   // if the super-klass is an interface or exceptionally deep in the Java
2226   // hierarchy and we have to scan the secondary superclass list the hard way.
2227   // Worst-case type is a little odd: NULL is allowed as a result (usually
2228   // klass loads can never produce a NULL).
2229   Node *chk_off_X = ConvI2X(chk_off);
2230   Node *p2 = _gvn.transform( new (C, 4) AddPNode(subklass,subklass,chk_off_X) );
2231   // For some types like interfaces the following loadKlass is from a 1-word
2232   // cache which is mutable so can't use immutable memory.  Other
2233   // types load from the super-class display table which is immutable.
2234   Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
2235   Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
2236 
2237   // Compile speed common case: ARE a subtype and we canNOT fail
2238   if( superklass == nkls )
2239     return top();             // false path is dead; no test needed.
2240 
2241   // See if we get an immediate positive hit.  Happens roughly 83% of the
2242   // time.  Test to see if the value loaded just previously from the subklass
2243   // is exactly the superklass.
2244   Node *cmp1 = _gvn.transform( new (C, 3) CmpPNode( superklass, nkls ) );
2245   Node *bol1 = _gvn.transform( new (C, 2) BoolNode( cmp1, BoolTest::eq ) );
2246   IfNode *iff1 = create_and_xform_if( control(), bol1, PROB_LIKELY(0.83f), COUNT_UNKNOWN );
2247   Node *iftrue1 = _gvn.transform( new (C, 1) IfTrueNode ( iff1 ) );
2248   set_control(    _gvn.transform( new (C, 1) IfFalseNode( iff1 ) ) );
2249 
2250   // Compile speed common case: Check for being deterministic right now.  If
2251   // chk_off is a constant and not equal to cacheoff then we are NOT a
2252   // subklass.  In this case we need exactly the 1 test above and we can
2253   // return those results immediately.
2254   if (!might_be_cache) {
2255     Node* not_subtype_ctrl = control();
2256     set_control(iftrue1); // We need exactly the 1 test above
2257     return not_subtype_ctrl;
2258   }
2259 
2260   // Gather the various success & failures here
2261   RegionNode *r_ok_subtype = new (C, 4) RegionNode(4);
2262   record_for_igvn(r_ok_subtype);
2263   RegionNode *r_not_subtype = new (C, 3) RegionNode(3);
2264   record_for_igvn(r_not_subtype);
2265 
2266   r_ok_subtype->init_req(1, iftrue1);
2267 
2268   // Check for immediate negative hit.  Happens roughly 11% of the time (which
2269   // is roughly 63% of the remaining cases).  Test to see if the loaded
2270   // check-offset points into the subklass display list or the 1-element
2271   // cache.  If it points to the display (and NOT the cache) and the display
2272   // missed then it's not a subtype.
2273   Node *cacheoff = _gvn.intcon(cacheoff_con);
2274   Node *cmp2 = _gvn.transform( new (C, 3) CmpINode( chk_off, cacheoff ) );
2275   Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmp2, BoolTest::ne ) );
2276   IfNode *iff2 = create_and_xform_if( control(), bol2, PROB_LIKELY(0.63f), COUNT_UNKNOWN );
2277   r_not_subtype->init_req(1, _gvn.transform( new (C, 1) IfTrueNode (iff2) ) );
2278   set_control(                _gvn.transform( new (C, 1) IfFalseNode(iff2) ) );
2279 
2280   // Check for self.  Very rare to get here, but it is taken 1/3 the time.
2281   // No performance impact (too rare) but allows sharing of secondary arrays
2282   // which has some footprint reduction.
2283   Node *cmp3 = _gvn.transform( new (C, 3) CmpPNode( subklass, superklass ) );
2284   Node *bol3 = _gvn.transform( new (C, 2) BoolNode( cmp3, BoolTest::eq ) );
2285   IfNode *iff3 = create_and_xform_if( control(), bol3, PROB_LIKELY(0.36f), COUNT_UNKNOWN );
2286   r_ok_subtype->init_req(2, _gvn.transform( new (C, 1) IfTrueNode ( iff3 ) ) );
2287   set_control(               _gvn.transform( new (C, 1) IfFalseNode( iff3 ) ) );
2288 
2289   // -- Roads not taken here: --
2290   // We could also have chosen to perform the self-check at the beginning
2291   // of this code sequence, as the assembler does.  This would not pay off
2292   // the same way, since the optimizer, unlike the assembler, can perform
2293   // static type analysis to fold away many successful self-checks.
2294   // Non-foldable self checks work better here in second position, because
2295   // the initial primary superclass check subsumes a self-check for most
2296   // types.  An exception would be a secondary type like array-of-interface,
2297   // which does not appear in its own primary supertype display.
2298   // Finally, we could have chosen to move the self-check into the
2299   // PartialSubtypeCheckNode, and from there out-of-line in a platform
2300   // dependent manner.  But it is worthwhile to have the check here,
2301   // where it can be perhaps be optimized.  The cost in code space is
2302   // small (register compare, branch).
2303 
2304   // Now do a linear scan of the secondary super-klass array.  Again, no real
2305   // performance impact (too rare) but it's gotta be done.
2306   // Since the code is rarely used, there is no penalty for moving it
2307   // out of line, and it can only improve I-cache density.
2308   // The decision to inline or out-of-line this final check is platform
2309   // dependent, and is found in the AD file definition of PartialSubtypeCheck.
2310   Node* psc = _gvn.transform(
2311     new (C, 3) PartialSubtypeCheckNode(control(), subklass, superklass) );
2312 
2313   Node *cmp4 = _gvn.transform( new (C, 3) CmpPNode( psc, null() ) );
2314   Node *bol4 = _gvn.transform( new (C, 2) BoolNode( cmp4, BoolTest::ne ) );
2315   IfNode *iff4 = create_and_xform_if( control(), bol4, PROB_FAIR, COUNT_UNKNOWN );
2316   r_not_subtype->init_req(2, _gvn.transform( new (C, 1) IfTrueNode (iff4) ) );
2317   r_ok_subtype ->init_req(3, _gvn.transform( new (C, 1) IfFalseNode(iff4) ) );
2318 
2319   // Return false path; set default control to true path.
2320   set_control( _gvn.transform(r_ok_subtype) );
2321   return _gvn.transform(r_not_subtype);
2322 }
2323 
2324 //----------------------------static_subtype_check-----------------------------
2325 // Shortcut important common cases when superklass is exact:
2326 // (0) superklass is java.lang.Object (can occur in reflective code)
2327 // (1) subklass is already limited to a subtype of superklass => always ok
2328 // (2) subklass does not overlap with superklass => always fail
2329 // (3) superklass has NO subtypes and we can check with a simple compare.
2330 int GraphKit::static_subtype_check(ciKlass* superk, ciKlass* subk) {
2331   if (StressReflectiveCode) {
2332     return SSC_full_test;       // Let caller generate the general case.
2333   }
2334 
2335   if (superk == env()->Object_klass()) {
2336     return SSC_always_true;     // (0) this test cannot fail
2337   }
2338 
2339   ciType* superelem = superk;
2340   if (superelem->is_array_klass())
2341     superelem = superelem->as_array_klass()->base_element_type();
2342 
2343   if (!subk->is_interface()) {  // cannot trust static interface types yet
2344     if (subk->is_subtype_of(superk)) {
2345       return SSC_always_true;   // (1) false path dead; no dynamic test needed
2346     }
2347     if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
2348         !superk->is_subtype_of(subk)) {
2349       return SSC_always_false;
2350     }
2351   }
2352 
2353   // If casting to an instance klass, it must have no subtypes
2354   if (superk->is_interface()) {
2355     // Cannot trust interfaces yet.
2356     // %%% S.B. superk->nof_implementors() == 1
2357   } else if (superelem->is_instance_klass()) {
2358     ciInstanceKlass* ik = superelem->as_instance_klass();
2359     if (!ik->has_subklass() && !ik->is_interface()) {
2360       if (!ik->is_final()) {
2361         // Add a dependency if there is a chance of a later subclass.
2362         C->dependencies()->assert_leaf_type(ik);
2363       }
2364       return SSC_easy_test;     // (3) caller can do a simple ptr comparison
2365     }
2366   } else {
2367     // A primitive array type has no subtypes.
2368     return SSC_easy_test;       // (3) caller can do a simple ptr comparison
2369   }
2370 
2371   return SSC_full_test;
2372 }
2373 
2374 // Profile-driven exact type check:
2375 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
2376                                     float prob,
2377                                     Node* *casted_receiver) {
2378   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
2379   Node* recv_klass = load_object_klass(receiver);
2380   Node* want_klass = makecon(tklass);
2381   Node* cmp = _gvn.transform( new(C, 3) CmpPNode(recv_klass, want_klass) );
2382   Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) );
2383   IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
2384   set_control( _gvn.transform( new(C, 1) IfTrueNode (iff) ));
2385   Node* fail = _gvn.transform( new(C, 1) IfFalseNode(iff) );
2386 
2387   const TypeOopPtr* recv_xtype = tklass->as_instance_type();
2388   assert(recv_xtype->klass_is_exact(), "");
2389 
2390   // Subsume downstream occurrences of receiver with a cast to
2391   // recv_xtype, since now we know what the type will be.
2392   Node* cast = new(C, 2) CheckCastPPNode(control(), receiver, recv_xtype);
2393   (*casted_receiver) = _gvn.transform(cast);
2394   // (User must make the replace_in_map call.)
2395 
2396   return fail;
2397 }
2398 
2399 
2400 //-------------------------------gen_instanceof--------------------------------
2401 // Generate an instance-of idiom.  Used by both the instance-of bytecode
2402 // and the reflective instance-of call.
2403 Node* GraphKit::gen_instanceof( Node *subobj, Node* superklass ) {
2404   C->set_has_split_ifs(true); // Has chance for split-if optimization
2405   assert( !stopped(), "dead parse path should be checked in callers" );
2406   assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
2407          "must check for not-null not-dead klass in callers");
2408 
2409   // Make the merge point
2410   enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT };
2411   RegionNode* region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
2412   Node*       phi    = new(C, PATH_LIMIT) PhiNode(region, TypeInt::BOOL);
2413   C->set_has_split_ifs(true); // Has chance for split-if optimization
2414 
2415   // Null check; get casted pointer; set region slot 3
2416   Node* null_ctl = top();
2417   Node* not_null_obj = null_check_oop(subobj, &null_ctl);
2418 
2419   // If not_null_obj is dead, only null-path is taken
2420   if (stopped()) {              // Doing instance-of on a NULL?
2421     set_control(null_ctl);
2422     return intcon(0);
2423   }
2424   region->init_req(_null_path, null_ctl);
2425   phi   ->init_req(_null_path, intcon(0)); // Set null path value
2426 
2427   // Load the object's klass
2428   Node* obj_klass = load_object_klass(not_null_obj);
2429 
2430   // Generate the subtype check
2431   Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass);
2432 
2433   // Plug in the success path to the general merge in slot 1.
2434   region->init_req(_obj_path, control());
2435   phi   ->init_req(_obj_path, intcon(1));
2436 
2437   // Plug in the failing path to the general merge in slot 2.
2438   region->init_req(_fail_path, not_subtype_ctrl);
2439   phi   ->init_req(_fail_path, intcon(0));
2440 
2441   // Return final merged results
2442   set_control( _gvn.transform(region) );
2443   record_for_igvn(region);
2444   return _gvn.transform(phi);
2445 }
2446 
2447 //-------------------------------gen_checkcast---------------------------------
2448 // Generate a checkcast idiom.  Used by both the checkcast bytecode and the
2449 // array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
2450 // uncommon-trap paths work.  Adjust stack after this call.
2451 // If failure_control is supplied and not null, it is filled in with
2452 // the control edge for the cast failure.  Otherwise, an appropriate
2453 // uncommon trap or exception is thrown.
2454 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
2455                               Node* *failure_control) {
2456   kill_dead_locals();           // Benefit all the uncommon traps
2457   const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr();
2458   const Type *toop = TypeOopPtr::make_from_klass(tk->klass());
2459 
2460   // Fast cutout:  Check the case that the cast is vacuously true.
2461   // This detects the common cases where the test will short-circuit
2462   // away completely.  We do this before we perform the null check,
2463   // because if the test is going to turn into zero code, we don't
2464   // want a residual null check left around.  (Causes a slowdown,
2465   // for example, in some objArray manipulations, such as a[i]=a[j].)
2466   if (tk->singleton()) {
2467     const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
2468     if (objtp != NULL && objtp->klass() != NULL) {
2469       switch (static_subtype_check(tk->klass(), objtp->klass())) {
2470       case SSC_always_true:
2471         return obj;
2472       case SSC_always_false:
2473         // It needs a null check because a null will *pass* the cast check.
2474         // A non-null value will always produce an exception.
2475         return do_null_assert(obj, T_OBJECT);
2476       }
2477     }
2478   }
2479 
2480   ciProfileData* data = NULL;
2481   if (failure_control == NULL) {        // use MDO in regular case only
2482     assert(java_bc() == Bytecodes::_aastore ||
2483            java_bc() == Bytecodes::_checkcast,
2484            "interpreter profiles type checks only for these BCs");
2485     data = method()->method_data()->bci_to_data(bci());
2486   }
2487 
2488   // Make the merge point
2489   enum { _obj_path = 1, _null_path, PATH_LIMIT };
2490   RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT);
2491   Node*       phi    = new (C, PATH_LIMIT) PhiNode(region, toop);
2492   C->set_has_split_ifs(true); // Has chance for split-if optimization
2493 
2494   // Use null-cast information if it is available
2495   bool never_see_null = false;
2496   // If we see an unexpected null at a check-cast we record it and force a
2497   // recompile; the offending check-cast will be compiled to handle NULLs.
2498   // If we see several offending BCIs, then all checkcasts in the
2499   // method will be compiled to handle NULLs.
2500   if (UncommonNullCast            // Cutout for this technique
2501       && failure_control == NULL  // regular case
2502       && obj != null()            // And not the -Xcomp stupid case?
2503       && !too_many_traps(Deoptimization::Reason_null_check)) {
2504     // Finally, check the "null_seen" bit from the interpreter.
2505     if (data == NULL || !data->as_BitData()->null_seen()) {
2506       never_see_null = true;
2507     }
2508   }
2509 
2510   // Null check; get casted pointer; set region slot 3
2511   Node* null_ctl = top();
2512   Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
2513 
2514   // If not_null_obj is dead, only null-path is taken
2515   if (stopped()) {              // Doing instance-of on a NULL?
2516     set_control(null_ctl);
2517     return null();
2518   }
2519   region->init_req(_null_path, null_ctl);
2520   phi   ->init_req(_null_path, null());  // Set null path value
2521 
2522   Node* cast_obj = NULL;        // the casted version of the object
2523 
2524   // If the profile has seen exactly one type, narrow to that type.
2525   // (The subsequent subtype check will always fold up.)
2526   if (UseTypeProfile && TypeProfileCasts && data != NULL &&
2527       // Counter has never been decremented (due to cast failure).
2528       // ...This is a reasonable thing to expect.  It is true of
2529       // all casts inserted by javac to implement generic types.
2530       data->as_CounterData()->count() >= 0 &&
2531       !too_many_traps(Deoptimization::Reason_class_check)) {
2532     // (No, this isn't a call, but it's enough like a virtual call
2533     // to use the same ciMethod accessor to get the profile info...)
2534     ciCallProfile profile = method()->call_profile_at_bci(bci());
2535     if (profile.count() >= 0 &&         // no cast failures here
2536         profile.has_receiver(0) &&
2537         profile.morphism() == 1) {
2538       ciKlass* exact_kls = profile.receiver(0);
2539       int ssc = static_subtype_check(tk->klass(), exact_kls);
2540       if (ssc == SSC_always_true) {
2541         // If we narrow the type to match what the type profile sees,
2542         // we can then remove the rest of the cast.
2543         // This is a win, even if the exact_kls is very specific,
2544         // because downstream operations, such as method calls,
2545         // will often benefit from the sharper type.
2546         Node* exact_obj = not_null_obj; // will get updated in place...
2547         Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
2548                                               &exact_obj);
2549         { PreserveJVMState pjvms(this);
2550           set_control(slow_ctl);
2551           uncommon_trap(Deoptimization::Reason_class_check,
2552                         Deoptimization::Action_maybe_recompile);
2553         }
2554         if (failure_control != NULL) // failure is now impossible
2555           (*failure_control) = top();
2556         replace_in_map(not_null_obj, exact_obj);
2557         // adjust the type of the phi to the exact klass:
2558         phi->raise_bottom_type(_gvn.type(exact_obj)->meet(TypePtr::NULL_PTR));
2559         cast_obj = exact_obj;
2560       }
2561       // assert(cast_obj != NULL)... except maybe the profile lied to us.
2562     }
2563   }
2564 
2565   if (cast_obj == NULL) {
2566     // Load the object's klass
2567     Node* obj_klass = load_object_klass(not_null_obj);
2568 
2569     // Generate the subtype check
2570     Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass );
2571 
2572     // Plug in success path into the merge
2573     cast_obj = _gvn.transform(new (C, 2) CheckCastPPNode(control(),
2574                                                          not_null_obj, toop));
2575     // Failure path ends in uncommon trap (or may be dead - failure impossible)
2576     if (failure_control == NULL) {
2577       if (not_subtype_ctrl != top()) { // If failure is possible
2578         PreserveJVMState pjvms(this);
2579         set_control(not_subtype_ctrl);
2580         builtin_throw(Deoptimization::Reason_class_check, obj_klass);
2581       }
2582     } else {
2583       (*failure_control) = not_subtype_ctrl;
2584     }
2585   }
2586 
2587   region->init_req(_obj_path, control());
2588   phi   ->init_req(_obj_path, cast_obj);
2589 
2590   // A merge of NULL or Casted-NotNull obj
2591   Node* res = _gvn.transform(phi);
2592 
2593   // Note I do NOT always 'replace_in_map(obj,result)' here.
2594   //  if( tk->klass()->can_be_primary_super()  )
2595     // This means that if I successfully store an Object into an array-of-String
2596     // I 'forget' that the Object is really now known to be a String.  I have to
2597     // do this because we don't have true union types for interfaces - if I store
2598     // a Baz into an array-of-Interface and then tell the optimizer it's an
2599     // Interface, I forget that it's also a Baz and cannot do Baz-like field
2600     // references to it.  FIX THIS WHEN UNION TYPES APPEAR!
2601   //  replace_in_map( obj, res );
2602 
2603   // Return final merged results
2604   set_control( _gvn.transform(region) );
2605   record_for_igvn(region);
2606   return res;
2607 }
2608 
2609 //------------------------------next_monitor-----------------------------------
2610 // What number should be given to the next monitor?
2611 int GraphKit::next_monitor() {
2612   int current = jvms()->monitor_depth()* C->sync_stack_slots();
2613   int next = current + C->sync_stack_slots();
2614   // Keep the toplevel high water mark current:
2615   if (C->fixed_slots() < next)  C->set_fixed_slots(next);
2616   return current;
2617 }
2618 
2619 //------------------------------insert_mem_bar---------------------------------
2620 // Memory barrier to avoid floating things around
2621 // The membar serves as a pinch point between both control and all memory slices.
2622 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
2623   MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
2624   mb->init_req(TypeFunc::Control, control());
2625   mb->init_req(TypeFunc::Memory,  reset_memory());
2626   Node* membar = _gvn.transform(mb);
2627   set_control(_gvn.transform(new (C, 1) ProjNode(membar,TypeFunc::Control) ));
2628   set_all_memory_call(membar);
2629   return membar;
2630 }
2631 
2632 //-------------------------insert_mem_bar_volatile----------------------------
2633 // Memory barrier to avoid floating things around
2634 // The membar serves as a pinch point between both control and memory(alias_idx).
2635 // If you want to make a pinch point on all memory slices, do not use this
2636 // function (even with AliasIdxBot); use insert_mem_bar() instead.
2637 Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) {
2638   // When Parse::do_put_xxx updates a volatile field, it appends a series
2639   // of MemBarVolatile nodes, one for *each* volatile field alias category.
2640   // The first membar is on the same memory slice as the field store opcode.
2641   // This forces the membar to follow the store.  (Bug 6500685 broke this.)
2642   // All the other membars (for other volatile slices, including AliasIdxBot,
2643   // which stands for all unknown volatile slices) are control-dependent
2644   // on the first membar.  This prevents later volatile loads or stores
2645   // from sliding up past the just-emitted store.
2646 
2647   MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
2648   mb->set_req(TypeFunc::Control,control());
2649   if (alias_idx == Compile::AliasIdxBot) {
2650     mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());
2651   } else {
2652     assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");
2653     mb->set_req(TypeFunc::Memory, memory(alias_idx));
2654   }
2655   Node* membar = _gvn.transform(mb);
2656   set_control(_gvn.transform(new (C, 1) ProjNode(membar, TypeFunc::Control)));
2657   if (alias_idx == Compile::AliasIdxBot) {
2658     merged_memory()->set_base_memory(_gvn.transform(new (C, 1) ProjNode(membar, TypeFunc::Memory)));
2659   } else {
2660     set_memory(_gvn.transform(new (C, 1) ProjNode(membar, TypeFunc::Memory)),alias_idx);
2661   }
2662   return membar;
2663 }
2664 
2665 //------------------------------shared_lock------------------------------------
2666 // Emit locking code.
2667 FastLockNode* GraphKit::shared_lock(Node* obj) {
2668   // bci is either a monitorenter bc or InvocationEntryBci
2669   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
2670   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
2671 
2672   if( !GenerateSynchronizationCode )
2673     return NULL;                // Not locking things?
2674   if (stopped())                // Dead monitor?
2675     return NULL;
2676 
2677   assert(dead_locals_are_killed(), "should kill locals before sync. point");
2678 
2679   // Box the stack location
2680   Node* box = _gvn.transform(new (C, 1) BoxLockNode(next_monitor()));
2681   Node* mem = reset_memory();
2682 
2683   FastLockNode * flock = _gvn.transform(new (C, 3) FastLockNode(0, obj, box) )->as_FastLock();
2684   if (PrintPreciseBiasedLockingStatistics) {
2685     // Create the counters for this fast lock.
2686     flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
2687   }
2688   // Add monitor to debug info for the slow path.  If we block inside the
2689   // slow path and de-opt, we need the monitor hanging around
2690   map()->push_monitor( flock );
2691 
2692   const TypeFunc *tf = LockNode::lock_type();
2693   LockNode *lock = new (C, tf->domain()->cnt()) LockNode(C, tf);
2694 
2695   lock->init_req( TypeFunc::Control, control() );
2696   lock->init_req( TypeFunc::Memory , mem );
2697   lock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
2698   lock->init_req( TypeFunc::FramePtr, frameptr() );
2699   lock->init_req( TypeFunc::ReturnAdr, top() );
2700 
2701   lock->init_req(TypeFunc::Parms + 0, obj);
2702   lock->init_req(TypeFunc::Parms + 1, box);
2703   lock->init_req(TypeFunc::Parms + 2, flock);
2704   add_safepoint_edges(lock);
2705 
2706   lock = _gvn.transform( lock )->as_Lock();
2707 
2708   // lock has no side-effects, sets few values
2709   set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
2710 
2711   insert_mem_bar(Op_MemBarAcquire);
2712 
2713   // Add this to the worklist so that the lock can be eliminated
2714   record_for_igvn(lock);
2715 
2716 #ifndef PRODUCT
2717   if (PrintLockStatistics) {
2718     // Update the counter for this lock.  Don't bother using an atomic
2719     // operation since we don't require absolute accuracy.
2720     lock->create_lock_counter(map()->jvms());
2721     int adr_type = Compile::AliasIdxRaw;
2722     Node* counter_addr = makecon(TypeRawPtr::make(lock->counter()->addr()));
2723     Node* cnt  = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type);
2724     Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1)));
2725     store_to_memory(control(), counter_addr, incr, T_INT, adr_type);
2726   }
2727 #endif
2728 
2729   return flock;
2730 }
2731 
2732 
2733 //------------------------------shared_unlock----------------------------------
2734 // Emit unlocking code.
2735 void GraphKit::shared_unlock(Node* box, Node* obj) {
2736   // bci is either a monitorenter bc or InvocationEntryBci
2737   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
2738   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
2739 
2740   if( !GenerateSynchronizationCode )
2741     return;
2742   if (stopped()) {               // Dead monitor?
2743     map()->pop_monitor();        // Kill monitor from debug info
2744     return;
2745   }
2746 
2747   // Memory barrier to avoid floating things down past the locked region
2748   insert_mem_bar(Op_MemBarRelease);
2749 
2750   const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
2751   UnlockNode *unlock = new (C, tf->domain()->cnt()) UnlockNode(C, tf);
2752   uint raw_idx = Compile::AliasIdxRaw;
2753   unlock->init_req( TypeFunc::Control, control() );
2754   unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
2755   unlock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
2756   unlock->init_req( TypeFunc::FramePtr, frameptr() );
2757   unlock->init_req( TypeFunc::ReturnAdr, top() );
2758 
2759   unlock->init_req(TypeFunc::Parms + 0, obj);
2760   unlock->init_req(TypeFunc::Parms + 1, box);
2761   unlock = _gvn.transform(unlock)->as_Unlock();
2762 
2763   Node* mem = reset_memory();
2764 
2765   // unlock has no side-effects, sets few values
2766   set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
2767 
2768   // Kill monitor from debug info
2769   map()->pop_monitor( );
2770 }
2771 
2772 //-------------------------------get_layout_helper-----------------------------
2773 // If the given klass is a constant or known to be an array,
2774 // fetch the constant layout helper value into constant_value
2775 // and return (Node*)NULL.  Otherwise, load the non-constant
2776 // layout helper value, and return the node which represents it.
2777 // This two-faced routine is useful because allocation sites
2778 // almost always feature constant types.
2779 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
2780   const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
2781   if (!StressReflectiveCode && inst_klass != NULL) {
2782     ciKlass* klass = inst_klass->klass();
2783     bool    xklass = inst_klass->klass_is_exact();
2784     if (xklass || klass->is_array_klass()) {
2785       jint lhelper = klass->layout_helper();
2786       if (lhelper != Klass::_lh_neutral_value) {
2787         constant_value = lhelper;
2788         return (Node*) NULL;
2789       }
2790     }
2791   }
2792   constant_value = Klass::_lh_neutral_value;  // put in a known value
2793   Node* lhp = basic_plus_adr(klass_node, klass_node, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc));
2794   return make_load(NULL, lhp, TypeInt::INT, T_INT);
2795 }
2796 
2797 // We just put in an allocate/initialize with a big raw-memory effect.
2798 // Hook selected additional alias categories on the initialization.
2799 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
2800                                 MergeMemNode* init_in_merge,
2801                                 Node* init_out_raw) {
2802   DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
2803   assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
2804 
2805   Node* prevmem = kit.memory(alias_idx);
2806   init_in_merge->set_memory_at(alias_idx, prevmem);
2807   kit.set_memory(init_out_raw, alias_idx);
2808 }
2809 
2810 //---------------------------set_output_for_allocation-------------------------
2811 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
2812                                           const TypeOopPtr* oop_type,
2813                                           bool raw_mem_only) {
2814   int rawidx = Compile::AliasIdxRaw;
2815   alloc->set_req( TypeFunc::FramePtr, frameptr() );
2816   add_safepoint_edges(alloc);
2817   Node* allocx = _gvn.transform(alloc);
2818   set_control( _gvn.transform(new (C, 1) ProjNode(allocx, TypeFunc::Control) ) );
2819   // create memory projection for i_o
2820   set_memory ( _gvn.transform( new (C, 1) ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
2821   make_slow_call_ex(allocx, env()->OutOfMemoryError_klass(), true);
2822 
2823   // create a memory projection as for the normal control path
2824   Node* malloc = _gvn.transform(new (C, 1) ProjNode(allocx, TypeFunc::Memory));
2825   set_memory(malloc, rawidx);
2826 
2827   // a normal slow-call doesn't change i_o, but an allocation does
2828   // we create a separate i_o projection for the normal control path
2829   set_i_o(_gvn.transform( new (C, 1) ProjNode(allocx, TypeFunc::I_O, false) ) );
2830   Node* rawoop = _gvn.transform( new (C, 1) ProjNode(allocx, TypeFunc::Parms) );
2831 
2832   // put in an initialization barrier
2833   InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
2834                                                  rawoop)->as_Initialize();
2835   assert(alloc->initialization() == init,  "2-way macro link must work");
2836   assert(init ->allocation()     == alloc, "2-way macro link must work");
2837   if (ReduceFieldZeroing && !raw_mem_only) {
2838     // Extract memory strands which may participate in the new object's
2839     // initialization, and source them from the new InitializeNode.
2840     // This will allow us to observe initializations when they occur,
2841     // and link them properly (as a group) to the InitializeNode.
2842     assert(init->in(InitializeNode::Memory) == malloc, "");
2843     MergeMemNode* minit_in = MergeMemNode::make(C, malloc);
2844     init->set_req(InitializeNode::Memory, minit_in);
2845     record_for_igvn(minit_in); // fold it up later, if possible
2846     Node* minit_out = memory(rawidx);
2847     assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
2848     if (oop_type->isa_aryptr()) {
2849       const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
2850       int            elemidx  = C->get_alias_index(telemref);
2851       hook_memory_on_init(*this, elemidx, minit_in, minit_out);
2852     } else if (oop_type->isa_instptr()) {
2853       ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
2854       for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
2855         ciField* field = ik->nonstatic_field_at(i);
2856         if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
2857           continue;  // do not bother to track really large numbers of fields
2858         // Find (or create) the alias category for this field:
2859         int fieldidx = C->alias_type(field)->index();
2860         hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
2861       }
2862     }
2863   }
2864 
2865   // Cast raw oop to the real thing...
2866   Node* javaoop = new (C, 2) CheckCastPPNode(control(), rawoop, oop_type);
2867   javaoop = _gvn.transform(javaoop);
2868   C->set_recent_alloc(control(), javaoop);
2869   assert(just_allocated_object(control()) == javaoop, "just allocated");
2870 
2871 #ifdef ASSERT
2872   { // Verify that the AllocateNode::Ideal_allocation recognizers work:
2873     assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,
2874            "Ideal_allocation works");
2875     assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,
2876            "Ideal_allocation works");
2877     if (alloc->is_AllocateArray()) {
2878       assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),
2879              "Ideal_allocation works");
2880       assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),
2881              "Ideal_allocation works");
2882     } else {
2883       assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
2884     }
2885   }
2886 #endif //ASSERT
2887 
2888   return javaoop;
2889 }
2890 
2891 //---------------------------new_instance--------------------------------------
2892 // This routine takes a klass_node which may be constant (for a static type)
2893 // or may be non-constant (for reflective code).  It will work equally well
2894 // for either, and the graph will fold nicely if the optimizer later reduces
2895 // the type to a constant.
2896 // The optional arguments are for specialized use by intrinsics:
2897 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
2898 //  - If 'raw_mem_only', do not cast the result to an oop.
2899 //  - If 'return_size_val', report the the total object size to the caller.
2900 Node* GraphKit::new_instance(Node* klass_node,
2901                              Node* extra_slow_test,
2902                              bool raw_mem_only, // affect only raw memory
2903                              Node* *return_size_val) {
2904   // Compute size in doublewords
2905   // The size is always an integral number of doublewords, represented
2906   // as a positive bytewise size stored in the klass's layout_helper.
2907   // The layout_helper also encodes (in a low bit) the need for a slow path.
2908   jint  layout_con = Klass::_lh_neutral_value;
2909   Node* layout_val = get_layout_helper(klass_node, layout_con);
2910   int   layout_is_con = (layout_val == NULL);
2911 
2912   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
2913   // Generate the initial go-slow test.  It's either ALWAYS (return a
2914   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
2915   // case) a computed value derived from the layout_helper.
2916   Node* initial_slow_test = NULL;
2917   if (layout_is_con) {
2918     assert(!StressReflectiveCode, "stress mode does not use these paths");
2919     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
2920     initial_slow_test = must_go_slow? intcon(1): extra_slow_test;
2921 
2922   } else {   // reflective case
2923     // This reflective path is used by Unsafe.allocateInstance.
2924     // (It may be stress-tested by specifying StressReflectiveCode.)
2925     // Basically, we want to get into the VM is there's an illegal argument.
2926     Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
2927     initial_slow_test = _gvn.transform( new (C, 3) AndINode(layout_val, bit) );
2928     if (extra_slow_test != intcon(0)) {
2929       initial_slow_test = _gvn.transform( new (C, 3) OrINode(initial_slow_test, extra_slow_test) );
2930     }
2931     // (Macro-expander will further convert this to a Bool, if necessary.)
2932   }
2933 
2934   // Find the size in bytes.  This is easy; it's the layout_helper.
2935   // The size value must be valid even if the slow path is taken.
2936   Node* size = NULL;
2937   if (layout_is_con) {
2938     size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con));
2939   } else {   // reflective case
2940     // This reflective path is used by clone and Unsafe.allocateInstance.
2941     size = ConvI2X(layout_val);
2942 
2943     // Clear the low bits to extract layout_helper_size_in_bytes:
2944     assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
2945     Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
2946     size = _gvn.transform( new (C, 3) AndXNode(size, mask) );
2947   }
2948   if (return_size_val != NULL) {
2949     (*return_size_val) = size;
2950   }
2951 
2952   // This is a precise notnull oop of the klass.
2953   // (Actually, it need not be precise if this is a reflective allocation.)
2954   // It's what we cast the result to.
2955   const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
2956   if (!tklass)  tklass = TypeKlassPtr::OBJECT;
2957   const TypeOopPtr* oop_type = tklass->as_instance_type();
2958 
2959   // Now generate allocation code
2960 
2961   // The entire memory state is needed for slow path of the allocation
2962   // since GC and deoptimization can happened.
2963   Node *mem = reset_memory();
2964   set_all_memory(mem); // Create new memory state
2965 
2966   AllocateNode* alloc
2967     = new (C, AllocateNode::ParmLimit)
2968         AllocateNode(C, AllocateNode::alloc_type(),
2969                      control(), mem, i_o(),
2970                      size, klass_node,
2971                      initial_slow_test);
2972 
2973   return set_output_for_allocation(alloc, oop_type, raw_mem_only);
2974 }
2975 
2976 //-------------------------------new_array-------------------------------------
2977 // helper for both newarray and anewarray
2978 // The 'length' parameter is (obviously) the length of the array.
2979 // See comments on new_instance for the meaning of the other arguments.
2980 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
2981                           Node* length,         // number of array elements
2982                           bool raw_mem_only,    // affect only raw memory
2983                           Node* *return_size_val) {
2984   jint  layout_con = Klass::_lh_neutral_value;
2985   Node* layout_val = get_layout_helper(klass_node, layout_con);
2986   int   layout_is_con = (layout_val == NULL);
2987 
2988   if (!layout_is_con && !StressReflectiveCode &&
2989       !too_many_traps(Deoptimization::Reason_class_check)) {
2990     // This is a reflective array creation site.
2991     // Optimistically assume that it is a subtype of Object[],
2992     // so that we can fold up all the address arithmetic.
2993     layout_con = Klass::array_layout_helper(T_OBJECT);
2994     Node* cmp_lh = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(layout_con)) );
2995     Node* bol_lh = _gvn.transform( new(C, 2) BoolNode(cmp_lh, BoolTest::eq) );
2996     { BuildCutout unless(this, bol_lh, PROB_MAX);
2997       uncommon_trap(Deoptimization::Reason_class_check,
2998                     Deoptimization::Action_maybe_recompile);
2999     }
3000     layout_val = NULL;
3001     layout_is_con = true;
3002   }
3003 
3004   // Generate the initial go-slow test.  Make sure we do not overflow
3005   // if length is huge (near 2Gig) or negative!  We do not need
3006   // exact double-words here, just a close approximation of needed
3007   // double-words.  We can't add any offset or rounding bits, lest we
3008   // take a size -1 of bytes and make it positive.  Use an unsigned
3009   // compare, so negative sizes look hugely positive.
3010   int fast_size_limit = FastAllocateSizeLimit;
3011   if (layout_is_con) {
3012     assert(!StressReflectiveCode, "stress mode does not use these paths");
3013     // Increase the size limit if we have exact knowledge of array type.
3014     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3015     fast_size_limit <<= (LogBytesPerLong - log2_esize);
3016   }
3017 
3018   Node* initial_slow_cmp  = _gvn.transform( new (C, 3) CmpUNode( length, intcon( fast_size_limit ) ) );
3019   Node* initial_slow_test = _gvn.transform( new (C, 2) BoolNode( initial_slow_cmp, BoolTest::gt ) );
3020   if (initial_slow_test->is_Bool()) {
3021     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3022     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3023   }
3024 
3025   // --- Size Computation ---
3026   // array_size = round_to_heap(array_header + (length << elem_shift));
3027   // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
3028   // and round_to(x, y) == ((x + y-1) & ~(y-1))
3029   // The rounding mask is strength-reduced, if possible.
3030   int round_mask = MinObjAlignmentInBytes - 1;
3031   Node* header_size = NULL;
3032   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3033   // (T_BYTE has the weakest alignment and size restrictions...)
3034   if (layout_is_con) {
3035     int       hsize  = Klass::layout_helper_header_size(layout_con);
3036     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
3037     BasicType etype  = Klass::layout_helper_element_type(layout_con);
3038     if ((round_mask & ~right_n_bits(eshift)) == 0)
3039       round_mask = 0;  // strength-reduce it if it goes away completely
3040     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3041     assert(header_size_min <= hsize, "generic minimum is smallest");
3042     header_size_min = hsize;
3043     header_size = intcon(hsize + round_mask);
3044   } else {
3045     Node* hss   = intcon(Klass::_lh_header_size_shift);
3046     Node* hsm   = intcon(Klass::_lh_header_size_mask);
3047     Node* hsize = _gvn.transform( new(C, 3) URShiftINode(layout_val, hss) );
3048     hsize       = _gvn.transform( new(C, 3) AndINode(hsize, hsm) );
3049     Node* mask  = intcon(round_mask);
3050     header_size = _gvn.transform( new(C, 3) AddINode(hsize, mask) );
3051   }
3052 
3053   Node* elem_shift = NULL;
3054   if (layout_is_con) {
3055     int eshift = Klass::layout_helper_log2_element_size(layout_con);
3056     if (eshift != 0)
3057       elem_shift = intcon(eshift);
3058   } else {
3059     // There is no need to mask or shift this value.
3060     // The semantics of LShiftINode include an implicit mask to 0x1F.
3061     assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3062     elem_shift = layout_val;
3063   }
3064 
3065   // Transition to native address size for all offset calculations:
3066   Node* lengthx = ConvI2X(length);
3067   Node* headerx = ConvI2X(header_size);
3068 #ifdef _LP64
3069   { const TypeLong* tllen = _gvn.find_long_type(lengthx);
3070     if (tllen != NULL && tllen->_lo < 0) {
3071       // Add a manual constraint to a positive range.  Cf. array_element_address.
3072       jlong size_max = arrayOopDesc::max_array_length(T_BYTE);
3073       if (size_max > tllen->_hi)  size_max = tllen->_hi;
3074       const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin);
3075       lengthx = _gvn.transform( new (C, 2) ConvI2LNode(length, tlcon));
3076     }
3077   }
3078 #endif
3079 
3080   // Combine header size (plus rounding) and body size.  Then round down.
3081   // This computation cannot overflow, because it is used only in two
3082   // places, one where the length is sharply limited, and the other
3083   // after a successful allocation.
3084   Node* abody = lengthx;
3085   if (elem_shift != NULL)
3086     abody     = _gvn.transform( new(C, 3) LShiftXNode(lengthx, elem_shift) );
3087   Node* size  = _gvn.transform( new(C, 3) AddXNode(headerx, abody) );
3088   if (round_mask != 0) {
3089     Node* mask = MakeConX(~round_mask);
3090     size       = _gvn.transform( new(C, 3) AndXNode(size, mask) );
3091   }
3092   // else if round_mask == 0, the size computation is self-rounding
3093 
3094   if (return_size_val != NULL) {
3095     // This is the size
3096     (*return_size_val) = size;
3097   }
3098 
3099   // Now generate allocation code
3100 
3101   // The entire memory state is needed for slow path of the allocation
3102   // since GC and deoptimization can happened.
3103   Node *mem = reset_memory();
3104   set_all_memory(mem); // Create new memory state
3105 
3106   // Create the AllocateArrayNode and its result projections
3107   AllocateArrayNode* alloc
3108     = new (C, AllocateArrayNode::ParmLimit)
3109         AllocateArrayNode(C, AllocateArrayNode::alloc_type(),
3110                           control(), mem, i_o(),
3111                           size, klass_node,
3112                           initial_slow_test,
3113                           length);
3114 
3115   // Cast to correct type.  Note that the klass_node may be constant or not,
3116   // and in the latter case the actual array type will be inexact also.
3117   // (This happens via a non-constant argument to inline_native_newArray.)
3118   // In any case, the value of klass_node provides the desired array type.
3119   const TypeInt* length_type = _gvn.find_int_type(length);
3120   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3121   if (ary_type->isa_aryptr() && length_type != NULL) {
3122     // Try to get a better type than POS for the size
3123     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3124   }
3125 
3126   Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only);
3127 
3128   // Cast length on remaining path to be as narrow as possible
3129   if (map()->find_edge(length) >= 0) {
3130     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3131     if (ccast != length) {
3132       _gvn.set_type_bottom(ccast);
3133       record_for_igvn(ccast);
3134       replace_in_map(length, ccast);
3135     }
3136   }
3137 
3138   return javaoop;
3139 }
3140 
3141 // The following "Ideal_foo" functions are placed here because they recognize
3142 // the graph shapes created by the functions immediately above.
3143 
3144 //---------------------------Ideal_allocation----------------------------------
3145 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3146 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3147   if (ptr == NULL) {     // reduce dumb test in callers
3148     return NULL;
3149   }
3150   if (ptr->is_CheckCastPP()) {  // strip a raw-to-oop cast
3151     ptr = ptr->in(1);
3152     if (ptr == NULL)  return NULL;
3153   }
3154   if (ptr->is_Proj()) {
3155     Node* allo = ptr->in(0);
3156     if (allo != NULL && allo->is_Allocate()) {
3157       return allo->as_Allocate();
3158     }
3159   }
3160   // Report failure to match.
3161   return NULL;
3162 }
3163 
3164 // Fancy version which also strips off an offset (and reports it to caller).
3165 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
3166                                              intptr_t& offset) {
3167   Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
3168   if (base == NULL)  return NULL;
3169   return Ideal_allocation(base, phase);
3170 }
3171 
3172 // Trace Initialize <- Proj[Parm] <- Allocate
3173 AllocateNode* InitializeNode::allocation() {
3174   Node* rawoop = in(InitializeNode::RawAddress);
3175   if (rawoop->is_Proj()) {
3176     Node* alloc = rawoop->in(0);
3177     if (alloc->is_Allocate()) {
3178       return alloc->as_Allocate();
3179     }
3180   }
3181   return NULL;
3182 }
3183 
3184 // Trace Allocate -> Proj[Parm] -> Initialize
3185 InitializeNode* AllocateNode::initialization() {
3186   ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
3187   if (rawoop == NULL)  return NULL;
3188   for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
3189     Node* init = rawoop->fast_out(i);
3190     if (init->is_Initialize()) {
3191       assert(init->as_Initialize()->allocation() == this, "2-way link");
3192       return init->as_Initialize();
3193     }
3194   }
3195   return NULL;
3196 }
3197 
3198 void GraphKit::g1_write_barrier_pre(Node* obj,
3199                                     Node* adr,
3200                                     uint alias_idx,
3201                                     Node* val,
3202                                     const Type* val_type,
3203                                     BasicType bt) {
3204   IdealKit ideal(gvn(), control(), merged_memory(), true);
3205 #define __ ideal.
3206   __ declares_done();
3207 
3208   Node* thread = __ thread();
3209 
3210   Node* no_ctrl = NULL;
3211   Node* no_base = __ top();
3212   Node* zero = __ ConI(0);
3213 
3214   float likely  = PROB_LIKELY(0.999);
3215   float unlikely  = PROB_UNLIKELY(0.999);
3216 
3217   BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3218   assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
3219 
3220   // Offsets into the thread
3221   const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
3222                                           PtrQueue::byte_offset_of_active());
3223   const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
3224                                           PtrQueue::byte_offset_of_index());
3225   const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
3226                                           PtrQueue::byte_offset_of_buf());
3227   // Now the actual pointers into the thread
3228 
3229   // set_control( ctl);
3230 
3231   Node* marking_adr = __ AddP(no_base, thread, __ ConX(marking_offset));
3232   Node* buffer_adr  = __ AddP(no_base, thread, __ ConX(buffer_offset));
3233   Node* index_adr   = __ AddP(no_base, thread, __ ConX(index_offset));
3234 
3235   // Now some of the values
3236 
3237   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
3238 
3239   // if (!marking)
3240   __ if_then(marking, BoolTest::ne, zero); {
3241     Node* index   = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
3242 
3243     const Type* t1 = adr->bottom_type();
3244     const Type* t2 = val->bottom_type();
3245 
3246     Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx);
3247     // if (orig != NULL)
3248     __ if_then(orig, BoolTest::ne, null()); {
3249       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3250 
3251       // load original value
3252       // alias_idx correct??
3253 
3254       // is the queue for this thread full?
3255       __ if_then(index, BoolTest::ne, zero, likely); {
3256 
3257         // decrement the index
3258         Node* next_index = __ SubI(index,  __ ConI(sizeof(intptr_t)));
3259         Node* next_indexX = next_index;
3260 #ifdef _LP64
3261           // We could refine the type for what it's worth
3262           // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
3263           next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
3264 #endif // _LP64
3265 
3266         // Now get the buffer location we will log the original value into and store it
3267 
3268         Node *log_addr = __ AddP(no_base, buffer, next_indexX);
3269         // __ store(__ ctrl(), log_addr, orig, T_OBJECT, C->get_alias_index(TypeOopPtr::BOTTOM));
3270         __ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
3271 
3272 
3273         // update the index
3274         // __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
3275         // This is a hack to force this store to occur before the oop store that is coming up
3276         __ store(__ ctrl(), index_adr, next_index, T_INT, C->get_alias_index(TypeOopPtr::BOTTOM));
3277 
3278       } __ else_(); {
3279 
3280         // logging buffer is full, call the runtime
3281         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3282         // __ make_leaf_call(tf, OptoRuntime::g1_wb_pre_Java(), "g1_wb_pre", orig, thread);
3283         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, thread);
3284       } __ end_if();
3285     } __ end_if();
3286   } __ end_if();
3287 
3288   __ drain_delay_transform();
3289   set_control( __ ctrl());
3290   set_all_memory( __ merged_memory());
3291 
3292 #undef __
3293 }
3294 
3295 //
3296 // Update the card table and add card address to the queue
3297 //
3298 void GraphKit::g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store,  Node* index, Node* index_adr, Node* buffer, const TypeFunc* tf) {
3299 #define __ ideal->
3300   Node* zero = __ ConI(0);
3301   Node* no_base = __ top();
3302   BasicType card_bt = T_BYTE;
3303   // Smash zero into card. MUST BE ORDERED WRT TO STORE
3304   __ storeCM(__ ctrl(), card_adr, zero, store, card_bt, Compile::AliasIdxRaw);
3305 
3306   //  Now do the queue work
3307   __ if_then(index, BoolTest::ne, zero); {
3308 
3309     Node* next_index = __ SubI(index,  __ ConI(sizeof(intptr_t)));
3310     Node* next_indexX = next_index;
3311 #ifdef _LP64
3312     // We could refine the type for what it's worth
3313     // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
3314     next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
3315 #endif // _LP64
3316     Node* log_addr = __ AddP(no_base, buffer, next_indexX);
3317 
3318     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
3319     __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
3320 
3321   } __ else_(); {
3322     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3323   } __ end_if();
3324 #undef __
3325 }
3326 
3327 void GraphKit::g1_write_barrier_post(Node* store,
3328                                      Node* obj,
3329                                      Node* adr,
3330                                      uint alias_idx,
3331                                      Node* val,
3332                                      BasicType bt,
3333                                      bool use_precise) {
3334   // If we are writing a NULL then we need no post barrier
3335 
3336   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
3337     // Must be NULL
3338     const Type* t = val->bottom_type();
3339     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
3340     // No post barrier if writing NULLx
3341     return;
3342   }
3343 
3344   if (!use_precise) {
3345     // All card marks for a (non-array) instance are in one place:
3346     adr = obj;
3347   }
3348   // (Else it's an array (or unknown), and we want more precise card marks.)
3349   assert(adr != NULL, "");
3350 
3351   IdealKit ideal(gvn(), control(), merged_memory(), true);
3352 #define __ ideal.
3353   __ declares_done();
3354 
3355   Node* thread = __ thread();
3356 
3357   Node* no_ctrl = NULL;
3358   Node* no_base = __ top();
3359   float likely  = PROB_LIKELY(0.999);
3360   float unlikely  = PROB_UNLIKELY(0.999);
3361   Node* zero = __ ConI(0);
3362   Node* zeroX = __ ConX(0);
3363 
3364   // Get the alias_index for raw card-mark memory
3365   const TypePtr* card_type = TypeRawPtr::BOTTOM;
3366 
3367   const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
3368 
3369   // Offsets into the thread
3370   const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
3371                                      PtrQueue::byte_offset_of_index());
3372   const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
3373                                      PtrQueue::byte_offset_of_buf());
3374 
3375   // Pointers into the thread
3376 
3377   Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset));
3378   Node* index_adr =  __ AddP(no_base, thread, __ ConX(index_offset));
3379 
3380   // Now some values
3381 
3382   Node* index  = __ load(no_ctrl, index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
3383   Node* buffer = __ load(no_ctrl, buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3384 
3385 
3386   // Convert the store obj pointer to an int prior to doing math on it
3387   // Use addr not obj gets accurate card marks
3388 
3389   // Node* cast = __ CastPX(no_ctrl, adr /* obj */);
3390 
3391   // Must use ctrl to prevent "integerized oop" existing across safepoint
3392   Node* cast =  __ CastPX(__ ctrl(), ( use_precise ? adr : obj ));
3393 
3394   // Divide pointer by card size
3395   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3396 
3397   // Combine card table base and card offset
3398   Node *card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
3399 
3400   // If we know the value being stored does it cross regions?
3401 
3402   if (val != NULL) {
3403     // Does the store cause us to cross regions?
3404 
3405     // Should be able to do an unsigned compare of region_size instead of
3406     // and extra shift. Do we have an unsigned compare??
3407     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
3408     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
3409 
3410     // if (xor_res == 0) same region so skip
3411     __ if_then(xor_res, BoolTest::ne, zeroX); {
3412 
3413       // No barrier if we are storing a NULL
3414       __ if_then(val, BoolTest::ne, null(), unlikely); {
3415 
3416         // Ok must mark the card if not already dirty
3417 
3418         // load the original value of the card
3419         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
3420 
3421         __ if_then(card_val, BoolTest::ne, zero); {
3422           g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf);
3423         } __ end_if();
3424       } __ end_if();
3425     } __ end_if();
3426   } else {
3427     g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf);
3428   }
3429 
3430 
3431   __ drain_delay_transform();
3432   set_control( __ ctrl());
3433   set_all_memory( __ merged_memory());
3434 #undef __
3435 
3436 }