1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/callGenerator.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/convertnode.hpp"
  39 #include "opto/countbitsnode.hpp"
  40 #include "opto/intrinsicnode.hpp"
  41 #include "opto/idealKit.hpp"
  42 #include "opto/mathexactnode.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/mulnode.hpp"
  45 #include "opto/narrowptrnode.hpp"
  46 #include "opto/opaquenode.hpp"
  47 #include "opto/parse.hpp"
  48 #include "opto/runtime.hpp"
  49 #include "opto/subnode.hpp"
  50 #include "prims/nativeLookup.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "trace/traceMacros.hpp"
  53 
  54 class LibraryIntrinsic : public InlineCallGenerator {
  55   // Extend the set of intrinsics known to the runtime:
  56  public:
  57  private:
  58   bool             _is_virtual;
  59   bool             _does_virtual_dispatch;
  60   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  61   int8_t           _last_predicate; // Last generated predicate
  62   vmIntrinsics::ID _intrinsic_id;
  63 
  64  public:
  65   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  66     : InlineCallGenerator(m),
  67       _is_virtual(is_virtual),
  68       _does_virtual_dispatch(does_virtual_dispatch),
  69       _predicates_count((int8_t)predicates_count),
  70       _last_predicate((int8_t)-1),
  71       _intrinsic_id(id)
  72   {
  73   }
  74   virtual bool is_intrinsic() const { return true; }
  75   virtual bool is_virtual()   const { return _is_virtual; }
  76   virtual bool is_predicated() const { return _predicates_count > 0; }
  77   virtual int  predicates_count() const { return _predicates_count; }
  78   virtual bool does_virtual_dispatch()   const { return _does_virtual_dispatch; }
  79   virtual JVMState* generate(JVMState* jvms);
  80   virtual Node* generate_predicate(JVMState* jvms, int predicate);
  81   vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
  82 };
  83 
  84 
  85 // Local helper class for LibraryIntrinsic:
  86 class LibraryCallKit : public GraphKit {
  87  private:
  88   LibraryIntrinsic* _intrinsic;     // the library intrinsic being called
  89   Node*             _result;        // the result node, if any
  90   int               _reexecute_sp;  // the stack pointer when bytecode needs to be reexecuted
  91 
  92   const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
  93 
  94  public:
  95   LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
  96     : GraphKit(jvms),
  97       _intrinsic(intrinsic),
  98       _result(NULL)
  99   {
 100     // Check if this is a root compile.  In that case we don't have a caller.
 101     if (!jvms->has_method()) {
 102       _reexecute_sp = sp();
 103     } else {
 104       // Find out how many arguments the interpreter needs when deoptimizing
 105       // and save the stack pointer value so it can used by uncommon_trap.
 106       // We find the argument count by looking at the declared signature.
 107       bool ignored_will_link;
 108       ciSignature* declared_signature = NULL;
 109       ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
 110       const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
 111       _reexecute_sp = sp() + nargs;  // "push" arguments back on stack
 112     }
 113   }
 114 
 115   virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
 116 
 117   ciMethod*         caller()    const    { return jvms()->method(); }
 118   int               bci()       const    { return jvms()->bci(); }
 119   LibraryIntrinsic* intrinsic() const    { return _intrinsic; }
 120   vmIntrinsics::ID  intrinsic_id() const { return _intrinsic->intrinsic_id(); }
 121   ciMethod*         callee()    const    { return _intrinsic->method(); }
 122 
 123   bool  try_to_inline(int predicate);
 124   Node* try_to_predicate(int predicate);
 125 
 126   void push_result() {
 127     // Push the result onto the stack.
 128     if (!stopped() && result() != NULL) {
 129       BasicType bt = result()->bottom_type()->basic_type();
 130       push_node(bt, result());
 131     }
 132   }
 133 
 134  private:
 135   void fatal_unexpected_iid(vmIntrinsics::ID iid) {
 136     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 137   }
 138 
 139   void  set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
 140   void  set_result(RegionNode* region, PhiNode* value);
 141   Node*     result() { return _result; }
 142 
 143   virtual int reexecute_sp() { return _reexecute_sp; }
 144 
 145   // Helper functions to inline natives
 146   Node* generate_guard(Node* test, RegionNode* region, float true_prob);
 147   Node* generate_slow_guard(Node* test, RegionNode* region);
 148   Node* generate_fair_guard(Node* test, RegionNode* region);
 149   Node* generate_negative_guard(Node* index, RegionNode* region,
 150                                 // resulting CastII of index:
 151                                 Node* *pos_index = NULL);
 152   Node* generate_limit_guard(Node* offset, Node* subseq_length,
 153                              Node* array_length,
 154                              RegionNode* region);
 155   Node* generate_current_thread(Node* &tls_output);
 156   Node* load_mirror_from_klass(Node* klass);
 157   Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
 158                                       RegionNode* region, int null_path,
 159                                       int offset);
 160   Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
 161                                RegionNode* region, int null_path) {
 162     int offset = java_lang_Class::klass_offset_in_bytes();
 163     return load_klass_from_mirror_common(mirror, never_see_null,
 164                                          region, null_path,
 165                                          offset);
 166   }
 167   Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
 168                                      RegionNode* region, int null_path) {
 169     int offset = java_lang_Class::array_klass_offset_in_bytes();
 170     return load_klass_from_mirror_common(mirror, never_see_null,
 171                                          region, null_path,
 172                                          offset);
 173   }
 174   Node* generate_access_flags_guard(Node* kls,
 175                                     int modifier_mask, int modifier_bits,
 176                                     RegionNode* region);
 177   Node* generate_interface_guard(Node* kls, RegionNode* region);
 178   Node* generate_array_guard(Node* kls, RegionNode* region) {
 179     return generate_array_guard_common(kls, region, false, false);
 180   }
 181   Node* generate_non_array_guard(Node* kls, RegionNode* region) {
 182     return generate_array_guard_common(kls, region, false, true);
 183   }
 184   Node* generate_objArray_guard(Node* kls, RegionNode* region) {
 185     return generate_array_guard_common(kls, region, true, false);
 186   }
 187   Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
 188     return generate_array_guard_common(kls, region, true, true);
 189   }
 190   Node* generate_array_guard_common(Node* kls, RegionNode* region,
 191                                     bool obj_array, bool not_array);
 192   Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
 193   CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
 194                                      bool is_virtual = false, bool is_static = false);
 195   CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
 196     return generate_method_call(method_id, false, true);
 197   }
 198   CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
 199     return generate_method_call(method_id, true, false);
 200   }
 201   Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
 202 
 203   Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae);
 204   bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);
 205   bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae);
 206   bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae);
 207   bool inline_string_indexOfChar();
 208   bool inline_string_equals(StrIntrinsicNode::ArgEnc ae);
 209   bool inline_string_toBytesU();
 210   bool inline_string_getCharsU();
 211   bool inline_string_copy(bool compress);
 212   bool inline_string_char_access(bool is_store);
 213   Node* round_double_node(Node* n);
 214   bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
 215   bool inline_math_native(vmIntrinsics::ID id);
 216   bool inline_trig(vmIntrinsics::ID id);
 217   bool inline_math(vmIntrinsics::ID id);
 218   template <typename OverflowOp>
 219   bool inline_math_overflow(Node* arg1, Node* arg2);
 220   void inline_math_mathExact(Node* math, Node* test);
 221   bool inline_math_addExactI(bool is_increment);
 222   bool inline_math_addExactL(bool is_increment);
 223   bool inline_math_multiplyExactI();
 224   bool inline_math_multiplyExactL();
 225   bool inline_math_negateExactI();
 226   bool inline_math_negateExactL();
 227   bool inline_math_subtractExactI(bool is_decrement);
 228   bool inline_math_subtractExactL(bool is_decrement);
 229   bool inline_pow();
 230   Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
 231   bool inline_min_max(vmIntrinsics::ID id);
 232   bool inline_notify(vmIntrinsics::ID id);
 233   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 234   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 235   int classify_unsafe_addr(Node* &base, Node* &offset);
 236   Node* make_unsafe_address(Node* base, Node* offset);
 237   // Helper for inline_unsafe_access.
 238   // Generates the guards that check whether the result of
 239   // Unsafe.getObject should be recorded in an SATB log buffer.
 240   void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
 241   bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
 242   static bool klass_needs_init_guard(Node* kls);
 243   bool inline_unsafe_allocate();
 244   bool inline_unsafe_copyMemory();
 245   bool inline_native_currentThread();
 246 #ifdef TRACE_HAVE_INTRINSICS
 247   bool inline_native_classID();
 248   bool inline_native_threadID();
 249 #endif
 250   bool inline_native_time_funcs(address method, const char* funcName);
 251   bool inline_native_isInterrupted();
 252   bool inline_native_Class_query(vmIntrinsics::ID id);
 253   bool inline_native_subtype_check();
 254 
 255   bool inline_native_newArray();
 256   bool inline_native_getLength();
 257   bool inline_array_copyOf(bool is_copyOfRange);
 258   bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
 259   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
 260   bool inline_native_clone(bool is_virtual);
 261   bool inline_native_Reflection_getCallerClass();
 262   // Helper function for inlining native object hash method
 263   bool inline_native_hashcode(bool is_virtual, bool is_static);
 264   bool inline_native_getClass();
 265 
 266   // Helper functions for inlining arraycopy
 267   bool inline_arraycopy();
 268   AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
 269                                                 RegionNode* slow_region);
 270   JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
 271   void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp);
 272 
 273   typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
 274   bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
 275   bool inline_unsafe_ordered_store(BasicType type);
 276   bool inline_unsafe_fence(vmIntrinsics::ID id);
 277   bool inline_fp_conversions(vmIntrinsics::ID id);
 278   bool inline_number_methods(vmIntrinsics::ID id);
 279   bool inline_reference_get();
 280   bool inline_Class_cast();
 281   bool inline_aescrypt_Block(vmIntrinsics::ID id);
 282   bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
 283   Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
 284   Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
 285   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
 286   bool inline_ghash_processBlocks();
 287   bool inline_sha_implCompress(vmIntrinsics::ID id);
 288   bool inline_digestBase_implCompressMB(int predicate);
 289   bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
 290                                  bool long_state, address stubAddr, const char *stubName,
 291                                  Node* src_start, Node* ofs, Node* limit);
 292   Node* get_state_from_sha_object(Node *sha_object);
 293   Node* get_state_from_sha5_object(Node *sha_object);
 294   Node* inline_digestBase_implCompressMB_predicate(int predicate);
 295   bool inline_encodeISOArray();
 296   bool inline_updateCRC32();
 297   bool inline_updateBytesCRC32();
 298   bool inline_updateByteBufferCRC32();
 299   Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
 300   bool inline_updateBytesCRC32C();
 301   bool inline_updateDirectByteBufferCRC32C();
 302   bool inline_updateBytesAdler32();
 303   bool inline_updateByteBufferAdler32();
 304   bool inline_multiplyToLen();
 305   bool inline_hasNegatives();
 306   bool inline_squareToLen();
 307   bool inline_mulAdd();
 308   bool inline_montgomeryMultiply();
 309   bool inline_montgomerySquare();
 310 
 311   bool inline_profileBoolean();
 312   bool inline_isCompileConstant();
 313 };
 314 
 315 //---------------------------make_vm_intrinsic----------------------------
 316 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
 317   vmIntrinsics::ID id = m->intrinsic_id();
 318   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 319 
 320   if (!m->is_loaded()) {
 321     // Do not attempt to inline unloaded methods.
 322     return NULL;
 323   }
 324 
 325   C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
 326   bool is_available = false;
 327 
 328   {
 329     // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
 330     // the compiler must transition to '_thread_in_vm' state because both
 331     // methods access VM-internal data.
 332     VM_ENTRY_MARK;
 333     methodHandle mh(THREAD, m->get_Method());
 334     is_available = compiler->is_intrinsic_supported(mh, is_virtual) &&
 335                    !C->directive()->is_intrinsic_disabled(mh) &&
 336                    !vmIntrinsics::is_disabled_by_flags(mh);
 337 
 338   }
 339 
 340   if (is_available) {
 341     assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
 342     assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
 343     return new LibraryIntrinsic(m, is_virtual,
 344                                 vmIntrinsics::predicates_needed(id),
 345                                 vmIntrinsics::does_virtual_dispatch(id),
 346                                 (vmIntrinsics::ID) id);
 347   } else {
 348     return NULL;
 349   }
 350 }
 351 
 352 //----------------------register_library_intrinsics-----------------------
 353 // Initialize this file's data structures, for each Compile instance.
 354 void Compile::register_library_intrinsics() {
 355   // Nothing to do here.
 356 }
 357 
 358 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
 359   LibraryCallKit kit(jvms, this);
 360   Compile* C = kit.C;
 361   int nodes = C->unique();
 362 #ifndef PRODUCT
 363   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
 364     char buf[1000];
 365     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
 366     tty->print_cr("Intrinsic %s", str);
 367   }
 368 #endif
 369   ciMethod* callee = kit.callee();
 370   const int bci    = kit.bci();
 371 
 372   // Try to inline the intrinsic.
 373   if ((CheckIntrinsics ? callee->intrinsic_candidate() : true) &&
 374       kit.try_to_inline(_last_predicate)) {
 375     if (C->print_intrinsics() || C->print_inlining()) {
 376       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
 377     }
 378     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
 379     if (C->log()) {
 380       C->log()->elem("intrinsic id='%s'%s nodes='%d'",
 381                      vmIntrinsics::name_at(intrinsic_id()),
 382                      (is_virtual() ? " virtual='1'" : ""),
 383                      C->unique() - nodes);
 384     }
 385     // Push the result from the inlined method onto the stack.
 386     kit.push_result();
 387     C->print_inlining_update(this);
 388     return kit.transfer_exceptions_into_jvms();
 389   }
 390 
 391   // The intrinsic bailed out
 392   if (C->print_intrinsics() || C->print_inlining()) {
 393     if (jvms->has_method()) {
 394       // Not a root compile.
 395       const char* msg;
 396       if (callee->intrinsic_candidate()) {
 397         msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
 398       } else {
 399         msg = is_virtual() ? "failed to inline (intrinsic, virtual), method not annotated"
 400                            : "failed to inline (intrinsic), method not annotated";
 401       }
 402       C->print_inlining(callee, jvms->depth() - 1, bci, msg);
 403     } else {
 404       // Root compile
 405       tty->print("Did not generate intrinsic %s%s at bci:%d in",
 406                vmIntrinsics::name_at(intrinsic_id()),
 407                (is_virtual() ? " (virtual)" : ""), bci);
 408     }
 409   }
 410   C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
 411   C->print_inlining_update(this);
 412   return NULL;
 413 }
 414 
 415 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
 416   LibraryCallKit kit(jvms, this);
 417   Compile* C = kit.C;
 418   int nodes = C->unique();
 419   _last_predicate = predicate;
 420 #ifndef PRODUCT
 421   assert(is_predicated() && predicate < predicates_count(), "sanity");
 422   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
 423     char buf[1000];
 424     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
 425     tty->print_cr("Predicate for intrinsic %s", str);
 426   }
 427 #endif
 428   ciMethod* callee = kit.callee();
 429   const int bci    = kit.bci();
 430 
 431   Node* slow_ctl = kit.try_to_predicate(predicate);
 432   if (!kit.failing()) {
 433     if (C->print_intrinsics() || C->print_inlining()) {
 434       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual, predicate)" : "(intrinsic, predicate)");
 435     }
 436     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
 437     if (C->log()) {
 438       C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
 439                      vmIntrinsics::name_at(intrinsic_id()),
 440                      (is_virtual() ? " virtual='1'" : ""),
 441                      C->unique() - nodes);
 442     }
 443     return slow_ctl; // Could be NULL if the check folds.
 444   }
 445 
 446   // The intrinsic bailed out
 447   if (C->print_intrinsics() || C->print_inlining()) {
 448     if (jvms->has_method()) {
 449       // Not a root compile.
 450       const char* msg = "failed to generate predicate for intrinsic";
 451       C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
 452     } else {
 453       // Root compile
 454       C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
 455                                         vmIntrinsics::name_at(intrinsic_id()),
 456                                         (is_virtual() ? " (virtual)" : ""), bci);
 457     }
 458   }
 459   C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
 460   return NULL;
 461 }
 462 
 463 bool LibraryCallKit::try_to_inline(int predicate) {
 464   // Handle symbolic names for otherwise undistinguished boolean switches:
 465   const bool is_store       = true;
 466   const bool is_compress    = true;
 467   const bool is_native_ptr  = true;
 468   const bool is_static      = true;
 469   const bool is_volatile    = true;
 470 
 471   if (!jvms()->has_method()) {
 472     // Root JVMState has a null method.
 473     assert(map()->memory()->Opcode() == Op_Parm, "");
 474     // Insert the memory aliasing node
 475     set_all_memory(reset_memory());
 476   }
 477   assert(merged_memory(), "");
 478 
 479 
 480   switch (intrinsic_id()) {
 481   case vmIntrinsics::_hashCode:                 return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
 482   case vmIntrinsics::_identityHashCode:         return inline_native_hashcode(/*!virtual*/ false,         is_static);
 483   case vmIntrinsics::_getClass:                 return inline_native_getClass();
 484 
 485   case vmIntrinsics::_dsin:
 486   case vmIntrinsics::_dcos:
 487   case vmIntrinsics::_dtan:
 488   case vmIntrinsics::_dabs:
 489   case vmIntrinsics::_datan2:
 490   case vmIntrinsics::_dsqrt:
 491   case vmIntrinsics::_dexp:
 492   case vmIntrinsics::_dlog:
 493   case vmIntrinsics::_dlog10:
 494   case vmIntrinsics::_dpow:                     return inline_math_native(intrinsic_id());
 495 
 496   case vmIntrinsics::_min:
 497   case vmIntrinsics::_max:                      return inline_min_max(intrinsic_id());
 498 
 499   case vmIntrinsics::_notify:
 500   case vmIntrinsics::_notifyAll:
 501     if (InlineNotify) {
 502       return inline_notify(intrinsic_id());
 503     }
 504     return false;
 505 
 506   case vmIntrinsics::_addExactI:                return inline_math_addExactI(false /* add */);
 507   case vmIntrinsics::_addExactL:                return inline_math_addExactL(false /* add */);
 508   case vmIntrinsics::_decrementExactI:          return inline_math_subtractExactI(true /* decrement */);
 509   case vmIntrinsics::_decrementExactL:          return inline_math_subtractExactL(true /* decrement */);
 510   case vmIntrinsics::_incrementExactI:          return inline_math_addExactI(true /* increment */);
 511   case vmIntrinsics::_incrementExactL:          return inline_math_addExactL(true /* increment */);
 512   case vmIntrinsics::_multiplyExactI:           return inline_math_multiplyExactI();
 513   case vmIntrinsics::_multiplyExactL:           return inline_math_multiplyExactL();
 514   case vmIntrinsics::_negateExactI:             return inline_math_negateExactI();
 515   case vmIntrinsics::_negateExactL:             return inline_math_negateExactL();
 516   case vmIntrinsics::_subtractExactI:           return inline_math_subtractExactI(false /* subtract */);
 517   case vmIntrinsics::_subtractExactL:           return inline_math_subtractExactL(false /* subtract */);
 518 
 519   case vmIntrinsics::_arraycopy:                return inline_arraycopy();
 520 
 521   case vmIntrinsics::_compareToL:               return inline_string_compareTo(StrIntrinsicNode::LL);
 522   case vmIntrinsics::_compareToU:               return inline_string_compareTo(StrIntrinsicNode::UU);
 523   case vmIntrinsics::_compareToLU:              return inline_string_compareTo(StrIntrinsicNode::LU);
 524   case vmIntrinsics::_compareToUL:              return inline_string_compareTo(StrIntrinsicNode::UL);
 525 
 526   case vmIntrinsics::_indexOfL:                 return inline_string_indexOf(StrIntrinsicNode::LL);
 527   case vmIntrinsics::_indexOfU:                 return inline_string_indexOf(StrIntrinsicNode::UU);
 528   case vmIntrinsics::_indexOfUL:                return inline_string_indexOf(StrIntrinsicNode::UL);
 529   case vmIntrinsics::_indexOfIL:                return inline_string_indexOfI(StrIntrinsicNode::LL);
 530   case vmIntrinsics::_indexOfIU:                return inline_string_indexOfI(StrIntrinsicNode::UU);
 531   case vmIntrinsics::_indexOfIUL:               return inline_string_indexOfI(StrIntrinsicNode::UL);
 532   case vmIntrinsics::_indexOfU_char:            return inline_string_indexOfChar();
 533 
 534   case vmIntrinsics::_equalsL:                  return inline_string_equals(StrIntrinsicNode::LL);
 535   case vmIntrinsics::_equalsU:                  return inline_string_equals(StrIntrinsicNode::UU);
 536 
 537   case vmIntrinsics::_toBytesStringU:           return inline_string_toBytesU();
 538   case vmIntrinsics::_getCharsStringU:          return inline_string_getCharsU();
 539   case vmIntrinsics::_getCharStringU:           return inline_string_char_access(!is_store);
 540   case vmIntrinsics::_putCharStringU:           return inline_string_char_access( is_store);
 541 
 542   case vmIntrinsics::_compressStringC:
 543   case vmIntrinsics::_compressStringB:          return inline_string_copy( is_compress);
 544   case vmIntrinsics::_inflateStringC:
 545   case vmIntrinsics::_inflateStringB:           return inline_string_copy(!is_compress);
 546 
 547   case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,  !is_volatile);
 548   case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile);
 549   case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,    !is_volatile);
 550   case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile);
 551   case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile);
 552   case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile);
 553   case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile);
 554   case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,   !is_volatile);
 555   case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,  !is_volatile);
 556   case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,  !is_volatile);
 557   case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN, !is_volatile);
 558   case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,    !is_volatile);
 559   case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile);
 560   case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile);
 561   case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile);
 562   case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile);
 563   case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,   !is_volatile);
 564   case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,  !is_volatile);
 565 
 566   case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,    !is_volatile);
 567   case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,   !is_volatile);
 568   case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,    !is_volatile);
 569   case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,     !is_volatile);
 570   case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,    !is_volatile);
 571   case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,   !is_volatile);
 572   case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,  !is_volatile);
 573   case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile);
 574 
 575   case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,    !is_volatile);
 576   case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,   !is_volatile);
 577   case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,    !is_volatile);
 578   case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,     !is_volatile);
 579   case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,    !is_volatile);
 580   case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,   !is_volatile);
 581   case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,  !is_volatile);
 582   case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS, !is_volatile);
 583 
 584   case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile);
 585   case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile);
 586   case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile);
 587   case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile);
 588   case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile);
 589   case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile);
 590   case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile);
 591   case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile);
 592   case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile);
 593 
 594   case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile);
 595   case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile);
 596   case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile);
 597   case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile);
 598   case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile);
 599   case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile);
 600   case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile);
 601   case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile);
 602   case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile);
 603 
 604   case vmIntrinsics::_getShortUnaligned:        return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile);
 605   case vmIntrinsics::_getCharUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile);
 606   case vmIntrinsics::_getIntUnaligned:          return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile);
 607   case vmIntrinsics::_getLongUnaligned:         return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile);
 608 
 609   case vmIntrinsics::_putShortUnaligned:        return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile);
 610   case vmIntrinsics::_putCharUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile);
 611   case vmIntrinsics::_putIntUnaligned:          return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile);
 612   case vmIntrinsics::_putLongUnaligned:         return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile);
 613 
 614   case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
 615   case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
 616   case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
 617 
 618   case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
 619   case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
 620   case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
 621 
 622   case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
 623   case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
 624   case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
 625   case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
 626   case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
 627 
 628   case vmIntrinsics::_loadFence:
 629   case vmIntrinsics::_storeFence:
 630   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 631 
 632   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 633   case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
 634 
 635 #ifdef TRACE_HAVE_INTRINSICS
 636   case vmIntrinsics::_classID:                  return inline_native_classID();
 637   case vmIntrinsics::_threadID:                 return inline_native_threadID();
 638   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
 639 #endif
 640   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 641   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 642   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 643   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 644   case vmIntrinsics::_newArray:                 return inline_native_newArray();
 645   case vmIntrinsics::_getLength:                return inline_native_getLength();
 646   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 647   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 648   case vmIntrinsics::_equalsB:                  return inline_array_equals(StrIntrinsicNode::LL);
 649   case vmIntrinsics::_equalsC:                  return inline_array_equals(StrIntrinsicNode::UU);
 650   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 651 
 652   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 653 
 654   case vmIntrinsics::_isInstance:
 655   case vmIntrinsics::_getModifiers:
 656   case vmIntrinsics::_isInterface:
 657   case vmIntrinsics::_isArray:
 658   case vmIntrinsics::_isPrimitive:
 659   case vmIntrinsics::_getSuperclass:
 660   case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 661 
 662   case vmIntrinsics::_floatToRawIntBits:
 663   case vmIntrinsics::_floatToIntBits:
 664   case vmIntrinsics::_intBitsToFloat:
 665   case vmIntrinsics::_doubleToRawLongBits:
 666   case vmIntrinsics::_doubleToLongBits:
 667   case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
 668 
 669   case vmIntrinsics::_numberOfLeadingZeros_i:
 670   case vmIntrinsics::_numberOfLeadingZeros_l:
 671   case vmIntrinsics::_numberOfTrailingZeros_i:
 672   case vmIntrinsics::_numberOfTrailingZeros_l:
 673   case vmIntrinsics::_bitCount_i:
 674   case vmIntrinsics::_bitCount_l:
 675   case vmIntrinsics::_reverseBytes_i:
 676   case vmIntrinsics::_reverseBytes_l:
 677   case vmIntrinsics::_reverseBytes_s:
 678   case vmIntrinsics::_reverseBytes_c:           return inline_number_methods(intrinsic_id());
 679 
 680   case vmIntrinsics::_getCallerClass:           return inline_native_Reflection_getCallerClass();
 681 
 682   case vmIntrinsics::_Reference_get:            return inline_reference_get();
 683 
 684   case vmIntrinsics::_Class_cast:               return inline_Class_cast();
 685 
 686   case vmIntrinsics::_aescrypt_encryptBlock:
 687   case vmIntrinsics::_aescrypt_decryptBlock:    return inline_aescrypt_Block(intrinsic_id());
 688 
 689   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
 690   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
 691     return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
 692 
 693   case vmIntrinsics::_sha_implCompress:
 694   case vmIntrinsics::_sha2_implCompress:
 695   case vmIntrinsics::_sha5_implCompress:
 696     return inline_sha_implCompress(intrinsic_id());
 697 
 698   case vmIntrinsics::_digestBase_implCompressMB:
 699     return inline_digestBase_implCompressMB(predicate);
 700 
 701   case vmIntrinsics::_multiplyToLen:
 702     return inline_multiplyToLen();
 703 
 704   case vmIntrinsics::_squareToLen:
 705     return inline_squareToLen();
 706 
 707   case vmIntrinsics::_mulAdd:
 708     return inline_mulAdd();
 709 
 710   case vmIntrinsics::_montgomeryMultiply:
 711     return inline_montgomeryMultiply();
 712   case vmIntrinsics::_montgomerySquare:
 713     return inline_montgomerySquare();
 714 
 715   case vmIntrinsics::_ghash_processBlocks:
 716     return inline_ghash_processBlocks();
 717 
 718   case vmIntrinsics::_encodeISOArray:
 719   case vmIntrinsics::_encodeByteISOArray:
 720     return inline_encodeISOArray();
 721 
 722   case vmIntrinsics::_updateCRC32:
 723     return inline_updateCRC32();
 724   case vmIntrinsics::_updateBytesCRC32:
 725     return inline_updateBytesCRC32();
 726   case vmIntrinsics::_updateByteBufferCRC32:
 727     return inline_updateByteBufferCRC32();
 728 
 729   case vmIntrinsics::_updateBytesCRC32C:
 730     return inline_updateBytesCRC32C();
 731   case vmIntrinsics::_updateDirectByteBufferCRC32C:
 732     return inline_updateDirectByteBufferCRC32C();
 733 
 734   case vmIntrinsics::_updateBytesAdler32:
 735     return inline_updateBytesAdler32();
 736   case vmIntrinsics::_updateByteBufferAdler32:
 737     return inline_updateByteBufferAdler32();
 738 
 739   case vmIntrinsics::_profileBoolean:
 740     return inline_profileBoolean();
 741   case vmIntrinsics::_isCompileConstant:
 742     return inline_isCompileConstant();
 743 
 744   case vmIntrinsics::_hasNegatives:
 745     return inline_hasNegatives();
 746 
 747   default:
 748     // If you get here, it may be that someone has added a new intrinsic
 749     // to the list in vmSymbols.hpp without implementing it here.
 750 #ifndef PRODUCT
 751     if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
 752       tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
 753                     vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
 754     }
 755 #endif
 756     return false;
 757   }
 758 }
 759 
 760 Node* LibraryCallKit::try_to_predicate(int predicate) {
 761   if (!jvms()->has_method()) {
 762     // Root JVMState has a null method.
 763     assert(map()->memory()->Opcode() == Op_Parm, "");
 764     // Insert the memory aliasing node
 765     set_all_memory(reset_memory());
 766   }
 767   assert(merged_memory(), "");
 768 
 769   switch (intrinsic_id()) {
 770   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
 771     return inline_cipherBlockChaining_AESCrypt_predicate(false);
 772   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
 773     return inline_cipherBlockChaining_AESCrypt_predicate(true);
 774   case vmIntrinsics::_digestBase_implCompressMB:
 775     return inline_digestBase_implCompressMB_predicate(predicate);
 776 
 777   default:
 778     // If you get here, it may be that someone has added a new intrinsic
 779     // to the list in vmSymbols.hpp without implementing it here.
 780 #ifndef PRODUCT
 781     if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
 782       tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
 783                     vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
 784     }
 785 #endif
 786     Node* slow_ctl = control();
 787     set_control(top()); // No fast path instrinsic
 788     return slow_ctl;
 789   }
 790 }
 791 
 792 //------------------------------set_result-------------------------------
 793 // Helper function for finishing intrinsics.
 794 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
 795   record_for_igvn(region);
 796   set_control(_gvn.transform(region));
 797   set_result( _gvn.transform(value));
 798   assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
 799 }
 800 
 801 //------------------------------generate_guard---------------------------
 802 // Helper function for generating guarded fast-slow graph structures.
 803 // The given 'test', if true, guards a slow path.  If the test fails
 804 // then a fast path can be taken.  (We generally hope it fails.)
 805 // In all cases, GraphKit::control() is updated to the fast path.
 806 // The returned value represents the control for the slow path.
 807 // The return value is never 'top'; it is either a valid control
 808 // or NULL if it is obvious that the slow path can never be taken.
 809 // Also, if region and the slow control are not NULL, the slow edge
 810 // is appended to the region.
 811 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
 812   if (stopped()) {
 813     // Already short circuited.
 814     return NULL;
 815   }
 816 
 817   // Build an if node and its projections.
 818   // If test is true we take the slow path, which we assume is uncommon.
 819   if (_gvn.type(test) == TypeInt::ZERO) {
 820     // The slow branch is never taken.  No need to build this guard.
 821     return NULL;
 822   }
 823 
 824   IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
 825 
 826   Node* if_slow = _gvn.transform(new IfTrueNode(iff));
 827   if (if_slow == top()) {
 828     // The slow branch is never taken.  No need to build this guard.
 829     return NULL;
 830   }
 831 
 832   if (region != NULL)
 833     region->add_req(if_slow);
 834 
 835   Node* if_fast = _gvn.transform(new IfFalseNode(iff));
 836   set_control(if_fast);
 837 
 838   return if_slow;
 839 }
 840 
 841 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
 842   return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
 843 }
 844 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
 845   return generate_guard(test, region, PROB_FAIR);
 846 }
 847 
 848 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
 849                                                      Node* *pos_index) {
 850   if (stopped())
 851     return NULL;                // already stopped
 852   if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
 853     return NULL;                // index is already adequately typed
 854   Node* cmp_lt = _gvn.transform(new CmpINode(index, intcon(0)));
 855   Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
 856   Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
 857   if (is_neg != NULL && pos_index != NULL) {
 858     // Emulate effect of Parse::adjust_map_after_if.
 859     Node* ccast = new CastIINode(index, TypeInt::POS);
 860     ccast->set_req(0, control());
 861     (*pos_index) = _gvn.transform(ccast);
 862   }
 863   return is_neg;
 864 }
 865 
 866 // Make sure that 'position' is a valid limit index, in [0..length].
 867 // There are two equivalent plans for checking this:
 868 //   A. (offset + copyLength)  unsigned<=  arrayLength
 869 //   B. offset  <=  (arrayLength - copyLength)
 870 // We require that all of the values above, except for the sum and
 871 // difference, are already known to be non-negative.
 872 // Plan A is robust in the face of overflow, if offset and copyLength
 873 // are both hugely positive.
 874 //
 875 // Plan B is less direct and intuitive, but it does not overflow at
 876 // all, since the difference of two non-negatives is always
 877 // representable.  Whenever Java methods must perform the equivalent
 878 // check they generally use Plan B instead of Plan A.
 879 // For the moment we use Plan A.
 880 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
 881                                                   Node* subseq_length,
 882                                                   Node* array_length,
 883                                                   RegionNode* region) {
 884   if (stopped())
 885     return NULL;                // already stopped
 886   bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
 887   if (zero_offset && subseq_length->eqv_uncast(array_length))
 888     return NULL;                // common case of whole-array copy
 889   Node* last = subseq_length;
 890   if (!zero_offset)             // last += offset
 891     last = _gvn.transform(new AddINode(last, offset));
 892   Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
 893   Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
 894   Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
 895   return is_over;
 896 }
 897 
 898 
 899 //--------------------------generate_current_thread--------------------
 900 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
 901   ciKlass*    thread_klass = env()->Thread_klass();
 902   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
 903   Node* thread = _gvn.transform(new ThreadLocalNode());
 904   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
 905   Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
 906   tls_output = thread;
 907   return threadObj;
 908 }
 909 
 910 
 911 //------------------------------make_string_method_node------------------------
 912 // Helper method for String intrinsic functions. This version is called with
 913 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
 914 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
 915 // containing the lengths of str1 and str2.
 916 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
 917   Node* result = NULL;
 918   switch (opcode) {
 919   case Op_StrIndexOf:
 920     result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
 921                                 str1_start, cnt1, str2_start, cnt2, ae);
 922     break;
 923   case Op_StrComp:
 924     result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
 925                              str1_start, cnt1, str2_start, cnt2, ae);
 926     break;
 927   case Op_StrEquals:
 928     result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES),
 929                                str1_start, str2_start, cnt1, ae);
 930     break;
 931   default:
 932     ShouldNotReachHere();
 933     return NULL;
 934   }
 935 
 936   // All these intrinsics have checks.
 937   C->set_has_split_ifs(true); // Has chance for split-if optimization
 938 
 939   return _gvn.transform(result);
 940 }
 941 
 942 //------------------------------inline_string_compareTo------------------------
 943 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
 944   Node* arg1 = argument(0);
 945   Node* arg2 = argument(1);
 946 
 947   // Get start addr and length of first argument
 948   Node* arg1_start  = array_element_address(arg1, intcon(0), T_BYTE);
 949   Node* arg1_cnt    = load_array_length(arg1);
 950 
 951   // Get start addr and length of second argument
 952   Node* arg2_start  = array_element_address(arg2, intcon(0), T_BYTE);
 953   Node* arg2_cnt    = load_array_length(arg2);
 954 
 955   Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
 956   set_result(result);
 957   return true;
 958 }
 959 
 960 //------------------------------inline_string_equals------------------------
 961 bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) {
 962   Node* arg1 = argument(0);
 963   Node* arg2 = argument(1);
 964 
 965   // paths (plus control) merge
 966   RegionNode* region = new RegionNode(3);
 967   Node* phi = new PhiNode(region, TypeInt::BOOL);
 968 
 969   if (!stopped()) {
 970     // Get start addr and length of first argument
 971     Node* arg1_start  = array_element_address(arg1, intcon(0), T_BYTE);
 972     Node* arg1_cnt    = load_array_length(arg1);
 973 
 974     // Get start addr and length of second argument
 975     Node* arg2_start  = array_element_address(arg2, intcon(0), T_BYTE);
 976     Node* arg2_cnt    = load_array_length(arg2);
 977 
 978     // Check for arg1_cnt != arg2_cnt
 979     Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt));
 980     Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
 981     Node* if_ne = generate_slow_guard(bol, NULL);
 982     if (if_ne != NULL) {
 983       phi->init_req(2, intcon(0));
 984       region->init_req(2, if_ne);
 985     }
 986 
 987     // Check for count == 0 is done by assembler code for StrEquals.
 988 
 989     if (!stopped()) {
 990       Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
 991       phi->init_req(1, equals);
 992       region->init_req(1, control());
 993     }
 994   }
 995 
 996   // post merge
 997   set_control(_gvn.transform(region));
 998   record_for_igvn(region);
 999 
1000   set_result(_gvn.transform(phi));
1001   return true;
1002 }
1003 
1004 //------------------------------inline_array_equals----------------------------
1005 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
1006   assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
1007   Node* arg1 = argument(0);
1008   Node* arg2 = argument(1);
1009 
1010   const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
1011   set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
1012   return true;
1013 }
1014 
1015 //------------------------------inline_hasNegatives------------------------------
1016 bool LibraryCallKit::inline_hasNegatives() {
1017   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
1018 
1019   assert(callee()->signature()->size() == 3, "hasNegatives has 3 parameters");
1020   // no receiver since it is static method
1021   Node* ba         = argument(0);
1022   Node* offset     = argument(1);
1023   Node* len        = argument(2);
1024 
1025   RegionNode* bailout = new RegionNode(1);
1026   record_for_igvn(bailout);
1027 
1028   // offset must not be negative.
1029   generate_negative_guard(offset, bailout);
1030 
1031   // offset + length must not exceed length of ba.
1032   generate_limit_guard(offset, len, load_array_length(ba), bailout);
1033 
1034   if (bailout->req() > 1) {
1035     PreserveJVMState pjvms(this);
1036     set_control(_gvn.transform(bailout));
1037     uncommon_trap(Deoptimization::Reason_intrinsic,
1038                   Deoptimization::Action_maybe_recompile);
1039   }
1040   if (!stopped()) {
1041     Node* ba_start = array_element_address(ba, offset, T_BYTE);
1042     Node* result = new HasNegativesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
1043     set_result(_gvn.transform(result));
1044   }
1045   return true;
1046 }
1047 
1048 //------------------------------inline_string_indexOf------------------------
1049 bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
1050   if (!Matcher::has_match_rule(Op_StrIndexOf) || !UseSSE42Intrinsics) {
1051     return false;
1052   }
1053   Node* src = argument(0);
1054   Node* tgt = argument(1);
1055 
1056   // Make the merge point
1057   RegionNode* result_rgn = new RegionNode(4);
1058   Node*       result_phi = new PhiNode(result_rgn, TypeInt::INT);
1059 
1060   // Get start addr and length of source string
1061   Node* src_start = array_element_address(src, intcon(0), T_BYTE);
1062   Node* src_count = load_array_length(src);
1063 
1064   // Get start addr and length of substring
1065   Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1066   Node* tgt_count = load_array_length(tgt);
1067 
1068   if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
1069     // Divide src size by 2 if String is UTF16 encoded
1070     src_count = _gvn.transform(new RShiftINode(src_count, intcon(1)));
1071   }
1072   if (ae == StrIntrinsicNode::UU) {
1073     // Divide substring size by 2 if String is UTF16 encoded
1074     tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
1075   }
1076 
1077   // Check for substr count > string count
1078   Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
1079   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1080   Node* if_gt = generate_slow_guard(bol, NULL);
1081   if (if_gt != NULL) {
1082     result_phi->init_req(2, intcon(-1));
1083     result_rgn->init_req(2, if_gt);
1084   }
1085 
1086   if (!stopped()) {
1087     // Check for substr count == 0
1088     cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
1089     bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1090     Node* if_zero = generate_slow_guard(bol, NULL);
1091     if (if_zero != NULL) {
1092       result_phi->init_req(3, intcon(0));
1093       result_rgn->init_req(3, if_zero);
1094     }
1095   }
1096 
1097   if (!stopped()) {
1098     Node* result = make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1099     result_phi->init_req(1, result);
1100     result_rgn->init_req(1, control());
1101   }
1102   set_control(_gvn.transform(result_rgn));
1103   record_for_igvn(result_rgn);
1104   set_result(_gvn.transform(result_phi));
1105 
1106   return true;
1107 }
1108 
1109 //-----------------------------inline_string_indexOf-----------------------
1110 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
1111   if (!Matcher::has_match_rule(Op_StrIndexOf) || !UseSSE42Intrinsics) {
1112     return false;
1113   }
1114   assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
1115   Node* src         = argument(0); // byte[]
1116   Node* src_count   = argument(1);
1117   Node* tgt         = argument(2); // byte[]
1118   Node* tgt_count   = argument(3);
1119   Node* from_index  = argument(4);
1120 
1121   // Java code which calls this method has range checks for from_index value.
1122   src_count = _gvn.transform(new SubINode(src_count, from_index));
1123 
1124   // Multiply byte array index by 2 if String is UTF16 encoded
1125   Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1126   Node* src_start = array_element_address(src, src_offset, T_BYTE);
1127   Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1128 
1129   Node* result = make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1130 
1131   // The result is index relative to from_index if substring was found, -1 otherwise.
1132   // Generate code which will fold into cmove.
1133   RegionNode* region = new RegionNode(3);
1134   Node* phi = new PhiNode(region, TypeInt::INT);
1135 
1136   Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1137   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1138 
1139   Node* if_lt = generate_slow_guard(bol, NULL);
1140   if (if_lt != NULL) {
1141     // result == -1
1142     phi->init_req(2, result);
1143     region->init_req(2, if_lt);
1144   }
1145   if (!stopped()) {
1146     result = _gvn.transform(new AddINode(result, from_index));
1147     phi->init_req(1, result);
1148     region->init_req(1, control());
1149   }
1150 
1151   set_control(_gvn.transform(region));
1152   record_for_igvn(region);
1153   set_result(_gvn.transform(phi));
1154 
1155   return true;
1156 }
1157 
1158 //-----------------------------inline_string_indexOfChar-----------------------
1159 bool LibraryCallKit::inline_string_indexOfChar() {
1160   if (!Matcher::has_match_rule(Op_StrIndexOfChar) || !(UseSSE > 4)) {
1161     return false;
1162   }
1163   assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments");
1164   Node* src         = argument(0); // byte[]
1165   Node* tgt         = argument(1); // tgt is int ch
1166   Node* from_index  = argument(2);
1167   Node* max         = argument(3);
1168 
1169   Node* src_offset = _gvn.transform(new LShiftINode(from_index, intcon(1)));
1170   Node* src_start = array_element_address(src, src_offset, T_BYTE);
1171 
1172   Node* src_count = _gvn.transform(new SubINode(max, from_index));
1173 
1174   RegionNode* region = new RegionNode(3);
1175   Node* phi = new PhiNode(region, TypeInt::INT);
1176 
1177   Node* result = new StrIndexOfCharNode(control(), memory(TypeAryPtr::BYTES), src_start, src_count, tgt, StrIntrinsicNode::none);
1178   C->set_has_split_ifs(true); // Has chance for split-if optimization
1179   _gvn.transform(result);
1180 
1181   Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1182   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1183 
1184   Node* if_lt = generate_slow_guard(bol, NULL);
1185   if (if_lt != NULL) {
1186     // result == -1
1187     phi->init_req(2, result);
1188     region->init_req(2, if_lt);
1189   }
1190   if (!stopped()) {
1191     result = _gvn.transform(new AddINode(result, from_index));
1192     phi->init_req(1, result);
1193     region->init_req(1, control());
1194   }
1195   set_control(_gvn.transform(region));
1196   record_for_igvn(region);
1197   set_result(_gvn.transform(phi));
1198 
1199   return true;
1200 }
1201 //---------------------------inline_string_copy---------------------
1202 // compressIt == true --> generate a compressed copy operation (compress char[]/byte[] to byte[])
1203 //   int StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
1204 //   int StringUTF16.compress(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1205 // compressIt == false --> generate an inflated copy operation (inflate byte[] to char[]/byte[])
1206 //   void StringLatin1.inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len)
1207 //   void StringLatin1.inflate(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1208 bool LibraryCallKit::inline_string_copy(bool compress) {
1209   int nargs = 5;  // 2 oops, 3 ints
1210   assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
1211 
1212   Node* src         = argument(0);
1213   Node* src_offset  = argument(1);
1214   Node* dst         = argument(2);
1215   Node* dst_offset  = argument(3);
1216   Node* length      = argument(4);
1217 
1218   // Check for allocation before we add nodes that would confuse
1219   // tightly_coupled_allocation()
1220   AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
1221 
1222   // Figure out the size and type of the elements we will be copying.
1223   const Type* src_type = src->Value(&_gvn);
1224   const Type* dst_type = dst->Value(&_gvn);
1225   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
1226   BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
1227   assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
1228          (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
1229          "Unsupported array types for inline_string_copy");
1230 
1231   // Convert char[] offsets to byte[] offsets
1232   if (compress && src_elem == T_BYTE) {
1233     src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1234   } else if (!compress && dst_elem == T_BYTE) {
1235     dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1236   }
1237 
1238   Node* src_start = array_element_address(src, src_offset, src_elem);
1239   Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1240   // 'src_start' points to src array + scaled offset
1241   // 'dst_start' points to dst array + scaled offset
1242   Node* count = NULL;
1243   if (compress) {
1244     count = compress_string(src_start, dst_start, length);
1245   } else {
1246     inflate_string(src_start, dst_start, length);
1247   }
1248 
1249   if (alloc != NULL) {
1250     if (alloc->maybe_set_complete(&_gvn)) {
1251       // "You break it, you buy it."
1252       InitializeNode* init = alloc->initialization();
1253       assert(init->is_complete(), "we just did this");
1254       init->set_complete_with_arraycopy();
1255       assert(dst->is_CheckCastPP(), "sanity");
1256       assert(dst->in(0)->in(0) == init, "dest pinned");
1257     }
1258     // Do not let stores that initialize this object be reordered with
1259     // a subsequent store that would make this object accessible by
1260     // other threads.
1261     // Record what AllocateNode this StoreStore protects so that
1262     // escape analysis can go from the MemBarStoreStoreNode to the
1263     // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1264     // based on the escape status of the AllocateNode.
1265     insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
1266   }
1267   if (compress) {
1268     set_result(_gvn.transform(count));
1269   }
1270   return true;
1271 }
1272 
1273 #ifdef _LP64
1274 #define XTOP ,top() /*additional argument*/
1275 #else  //_LP64
1276 #define XTOP        /*no additional argument*/
1277 #endif //_LP64
1278 
1279 //------------------------inline_string_toBytesU--------------------------
1280 // public static byte[] StringUTF16.toBytes(char[] value, int off, int len)
1281 bool LibraryCallKit::inline_string_toBytesU() {
1282   // Get the arguments.
1283   Node* value     = argument(0);
1284   Node* offset    = argument(1);
1285   Node* length    = argument(2);
1286 
1287   Node* newcopy = NULL;
1288 
1289   // Set the original stack and the reexecute bit for the interpreter to reexecute
1290   // the bytecode that invokes StringUTF16.toBytes() if deoptimization happens.
1291   { PreserveReexecuteState preexecs(this);
1292     jvms()->set_should_reexecute(true);
1293 
1294     // Check if a null path was taken unconditionally.
1295     value = null_check(value);
1296 
1297     RegionNode* bailout = new RegionNode(1);
1298     record_for_igvn(bailout);
1299 
1300     // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
1301     generate_negative_guard(length, bailout);
1302     generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout);
1303 
1304     if (bailout->req() > 1) {
1305       PreserveJVMState pjvms(this);
1306       set_control(_gvn.transform(bailout));
1307       uncommon_trap(Deoptimization::Reason_intrinsic,
1308                     Deoptimization::Action_maybe_recompile);
1309     }
1310     if (stopped()) return true;
1311 
1312     // Range checks are done by caller.
1313 
1314     Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
1315     Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
1316     newcopy = new_array(klass_node, size, 0);  // no arguments to push
1317     AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy, NULL);
1318 
1319     // Calculate starting addresses.
1320     Node* src_start = array_element_address(value, offset, T_CHAR);
1321     Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1322 
1323     // Check if src array address is aligned to HeapWordSize (dst is always aligned)
1324     const TypeInt* toffset = gvn().type(offset)->is_int();
1325     bool aligned = toffset->is_con() && ((toffset->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1326 
1327     // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1328     const char* copyfunc_name = "arraycopy";
1329     address     copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1330     Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1331                       OptoRuntime::fast_arraycopy_Type(),
1332                       copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1333                       src_start, dst_start, ConvI2X(length) XTOP);
1334     // Do not let reads from the cloned object float above the arraycopy.
1335     if (alloc != NULL) {
1336       if (alloc->maybe_set_complete(&_gvn)) {
1337         // "You break it, you buy it."
1338         InitializeNode* init = alloc->initialization();
1339         assert(init->is_complete(), "we just did this");
1340         init->set_complete_with_arraycopy();
1341         assert(newcopy->is_CheckCastPP(), "sanity");
1342         assert(newcopy->in(0)->in(0) == init, "dest pinned");
1343       }
1344       // Do not let stores that initialize this object be reordered with
1345       // a subsequent store that would make this object accessible by
1346       // other threads.
1347       // Record what AllocateNode this StoreStore protects so that
1348       // escape analysis can go from the MemBarStoreStoreNode to the
1349       // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1350       // based on the escape status of the AllocateNode.
1351       insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
1352     } else {
1353       insert_mem_bar(Op_MemBarCPUOrder);
1354     }
1355   } // original reexecute is set back here
1356 
1357   C->set_has_split_ifs(true); // Has chance for split-if optimization
1358   if (!stopped()) {
1359     set_result(newcopy);
1360   }
1361   return true;
1362 }
1363 
1364 //------------------------inline_string_getCharsU--------------------------
1365 // public void StringUTF16.getChars(byte[] value, int srcBegin, int srcEnd, char dst[], int dstBegin)
1366 bool LibraryCallKit::inline_string_getCharsU() {
1367   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
1368 
1369   // Get the arguments.
1370   Node* value     = argument(0);
1371   Node* src_begin = argument(1);
1372   Node* src_end   = argument(2); // exclusive offset (i < src_end)
1373   Node* dst       = argument(3);
1374   Node* dst_begin = argument(4);
1375 
1376   // Check for allocation before we add nodes that would confuse
1377   // tightly_coupled_allocation()
1378   AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
1379 
1380   // Check if a null path was taken unconditionally.
1381   value = null_check(value);
1382   dst = null_check(dst);
1383   if (stopped()) {
1384     return true;
1385   }
1386 
1387   // Range checks are done by caller.
1388 
1389   // Get length and convert char[] offset to byte[] offset
1390   Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1391   src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1392 
1393   if (!stopped()) {
1394     // Calculate starting addresses.
1395     Node* src_start = array_element_address(value, src_begin, T_BYTE);
1396     Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1397 
1398     // Check if array addresses are aligned to HeapWordSize
1399     const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1400     const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1401     bool aligned = tsrc->is_con() && ((tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1402                    tdst->is_con() && ((tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1403 
1404     // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1405     const char* copyfunc_name = "arraycopy";
1406     address     copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1407     Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1408                       OptoRuntime::fast_arraycopy_Type(),
1409                       copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1410                       src_start, dst_start, ConvI2X(length) XTOP);
1411     // Do not let reads from the cloned object float above the arraycopy.
1412     if (alloc != NULL) {
1413       if (alloc->maybe_set_complete(&_gvn)) {
1414         // "You break it, you buy it."
1415         InitializeNode* init = alloc->initialization();
1416         assert(init->is_complete(), "we just did this");
1417         init->set_complete_with_arraycopy();
1418         assert(dst->is_CheckCastPP(), "sanity");
1419         assert(dst->in(0)->in(0) == init, "dest pinned");
1420       }
1421       // Do not let stores that initialize this object be reordered with
1422       // a subsequent store that would make this object accessible by
1423       // other threads.
1424       // Record what AllocateNode this StoreStore protects so that
1425       // escape analysis can go from the MemBarStoreStoreNode to the
1426       // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1427       // based on the escape status of the AllocateNode.
1428       insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
1429     } else {
1430       insert_mem_bar(Op_MemBarCPUOrder);
1431     }
1432   }
1433 
1434   C->set_has_split_ifs(true); // Has chance for split-if optimization
1435   return true;
1436 }
1437 
1438 //----------------------inline_string_char_access----------------------------
1439 // Store/Load char to/from byte[] array.
1440 // static void StringUTF16.putChar(byte[] val, int index, int c)
1441 // static char StringUTF16.getChar(byte[] val, int index)
1442 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1443   Node* value  = argument(0);
1444   Node* index  = argument(1);
1445   Node* ch = is_store ? argument(2) : NULL;
1446 
1447   // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1448   // correctly requires matched array shapes.
1449   assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1450           "sanity: byte[] and char[] bases agree");
1451   assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1452           "sanity: byte[] and char[] scales agree");
1453 
1454   Node* adr = array_element_address(value, index, T_CHAR);
1455   if (is_store) {
1456     (void) store_to_memory(control(), adr, ch, T_CHAR, TypeAryPtr::BYTES, MemNode::unordered);
1457   } else {
1458     ch = make_load(control(), adr, TypeInt::CHAR, T_CHAR, MemNode::unordered);
1459     set_result(ch);
1460   }
1461   return true;
1462 }
1463 
1464 //--------------------------round_double_node--------------------------------
1465 // Round a double node if necessary.
1466 Node* LibraryCallKit::round_double_node(Node* n) {
1467   if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1468     n = _gvn.transform(new RoundDoubleNode(0, n));
1469   return n;
1470 }
1471 
1472 //------------------------------inline_math-----------------------------------
1473 // public static double Math.abs(double)
1474 // public static double Math.sqrt(double)
1475 // public static double Math.log(double)
1476 // public static double Math.log10(double)
1477 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1478   Node* arg = round_double_node(argument(0));
1479   Node* n = NULL;
1480   switch (id) {
1481   case vmIntrinsics::_dabs:   n = new AbsDNode(                arg);  break;
1482   case vmIntrinsics::_dsqrt:  n = new SqrtDNode(C, control(),  arg);  break;
1483   case vmIntrinsics::_dlog10: n = new Log10DNode(C, control(), arg);  break;
1484   default:  fatal_unexpected_iid(id);  break;
1485   }
1486   set_result(_gvn.transform(n));
1487   return true;
1488 }
1489 
1490 //------------------------------inline_trig----------------------------------
1491 // Inline sin/cos/tan instructions, if possible.  If rounding is required, do
1492 // argument reduction which will turn into a fast/slow diamond.
1493 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
1494   Node* arg = round_double_node(argument(0));
1495   Node* n = NULL;
1496 
1497   switch (id) {
1498   case vmIntrinsics::_dsin:  n = new SinDNode(C, control(), arg);  break;
1499   case vmIntrinsics::_dcos:  n = new CosDNode(C, control(), arg);  break;
1500   case vmIntrinsics::_dtan:  n = new TanDNode(C, control(), arg);  break;
1501   default:  fatal_unexpected_iid(id);  break;
1502   }
1503   n = _gvn.transform(n);
1504 
1505   // Rounding required?  Check for argument reduction!
1506   if (Matcher::strict_fp_requires_explicit_rounding) {
1507     static const double     pi_4 =  0.7853981633974483;
1508     static const double neg_pi_4 = -0.7853981633974483;
1509     // pi/2 in 80-bit extended precision
1510     // static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00};
1511     // -pi/2 in 80-bit extended precision
1512     // static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00};
1513     // Cutoff value for using this argument reduction technique
1514     //static const double    pi_2_minus_epsilon =  1.564660403643354;
1515     //static const double neg_pi_2_plus_epsilon = -1.564660403643354;
1516 
1517     // Pseudocode for sin:
1518     // if (x <= Math.PI / 4.0) {
1519     //   if (x >= -Math.PI / 4.0) return  fsin(x);
1520     //   if (x >= -Math.PI / 2.0) return -fcos(x + Math.PI / 2.0);
1521     // } else {
1522     //   if (x <=  Math.PI / 2.0) return  fcos(x - Math.PI / 2.0);
1523     // }
1524     // return StrictMath.sin(x);
1525 
1526     // Pseudocode for cos:
1527     // if (x <= Math.PI / 4.0) {
1528     //   if (x >= -Math.PI / 4.0) return  fcos(x);
1529     //   if (x >= -Math.PI / 2.0) return  fsin(x + Math.PI / 2.0);
1530     // } else {
1531     //   if (x <=  Math.PI / 2.0) return -fsin(x - Math.PI / 2.0);
1532     // }
1533     // return StrictMath.cos(x);
1534 
1535     // Actually, sticking in an 80-bit Intel value into C2 will be tough; it
1536     // requires a special machine instruction to load it.  Instead we'll try
1537     // the 'easy' case.  If we really need the extra range +/- PI/2 we'll
1538     // probably do the math inside the SIN encoding.
1539 
1540     // Make the merge point
1541     RegionNode* r = new RegionNode(3);
1542     Node* phi = new PhiNode(r, Type::DOUBLE);
1543 
1544     // Flatten arg so we need only 1 test
1545     Node *abs = _gvn.transform(new AbsDNode(arg));
1546     // Node for PI/4 constant
1547     Node *pi4 = makecon(TypeD::make(pi_4));
1548     // Check PI/4 : abs(arg)
1549     Node *cmp = _gvn.transform(new CmpDNode(pi4,abs));
1550     // Check: If PI/4 < abs(arg) then go slow
1551     Node *bol = _gvn.transform(new BoolNode( cmp, BoolTest::lt ));
1552     // Branch either way
1553     IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1554     set_control(opt_iff(r,iff));
1555 
1556     // Set fast path result
1557     phi->init_req(2, n);
1558 
1559     // Slow path - non-blocking leaf call
1560     Node* call = NULL;
1561     switch (id) {
1562     case vmIntrinsics::_dsin:
1563       call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1564                                CAST_FROM_FN_PTR(address, SharedRuntime::dsin),
1565                                "Sin", NULL, arg, top());
1566       break;
1567     case vmIntrinsics::_dcos:
1568       call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1569                                CAST_FROM_FN_PTR(address, SharedRuntime::dcos),
1570                                "Cos", NULL, arg, top());
1571       break;
1572     case vmIntrinsics::_dtan:
1573       call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1574                                CAST_FROM_FN_PTR(address, SharedRuntime::dtan),
1575                                "Tan", NULL, arg, top());
1576       break;
1577     }
1578     assert(control()->in(0) == call, "");
1579     Node* slow_result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1580     r->init_req(1, control());
1581     phi->init_req(1, slow_result);
1582 
1583     // Post-merge
1584     set_control(_gvn.transform(r));
1585     record_for_igvn(r);
1586     n = _gvn.transform(phi);
1587 
1588     C->set_has_split_ifs(true); // Has chance for split-if optimization
1589   }
1590   set_result(n);
1591   return true;
1592 }
1593 
1594 Node* LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
1595   //-------------------
1596   //result=(result.isNaN())? funcAddr():result;
1597   // Check: If isNaN() by checking result!=result? then either trap
1598   // or go to runtime
1599   Node* cmpisnan = _gvn.transform(new CmpDNode(result, result));
1600   // Build the boolean node
1601   Node* bolisnum = _gvn.transform(new BoolNode(cmpisnan, BoolTest::eq));
1602 
1603   if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1604     { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1605       // The pow or exp intrinsic returned a NaN, which requires a call
1606       // to the runtime.  Recompile with the runtime call.
1607       uncommon_trap(Deoptimization::Reason_intrinsic,
1608                     Deoptimization::Action_make_not_entrant);
1609     }
1610     return result;
1611   } else {
1612     // If this inlining ever returned NaN in the past, we compile a call
1613     // to the runtime to properly handle corner cases
1614 
1615     IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1616     Node* if_slow = _gvn.transform(new IfFalseNode(iff));
1617     Node* if_fast = _gvn.transform(new IfTrueNode(iff));
1618 
1619     if (!if_slow->is_top()) {
1620       RegionNode* result_region = new RegionNode(3);
1621       PhiNode*    result_val = new PhiNode(result_region, Type::DOUBLE);
1622 
1623       result_region->init_req(1, if_fast);
1624       result_val->init_req(1, result);
1625 
1626       set_control(if_slow);
1627 
1628       const TypePtr* no_memory_effects = NULL;
1629       Node* rt = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1630                                    no_memory_effects,
1631                                    x, top(), y, y ? top() : NULL);
1632       Node* value = _gvn.transform(new ProjNode(rt, TypeFunc::Parms+0));
1633 #ifdef ASSERT
1634       Node* value_top = _gvn.transform(new ProjNode(rt, TypeFunc::Parms+1));
1635       assert(value_top == top(), "second value must be top");
1636 #endif
1637 
1638       result_region->init_req(2, control());
1639       result_val->init_req(2, value);
1640       set_control(_gvn.transform(result_region));
1641       return _gvn.transform(result_val);
1642     } else {
1643       return result;
1644     }
1645   }
1646 }
1647 
1648 //------------------------------inline_pow-------------------------------------
1649 // Inline power instructions, if possible.
1650 bool LibraryCallKit::inline_pow() {
1651   // Pseudocode for pow
1652   // if (y == 2) {
1653   //   return x * x;
1654   // } else {
1655   //   if (x <= 0.0) {
1656   //     long longy = (long)y;
1657   //     if ((double)longy == y) { // if y is long
1658   //       if (y + 1 == y) longy = 0; // huge number: even
1659   //       result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y);
1660   //     } else {
1661   //       result = NaN;
1662   //     }
1663   //   } else {
1664   //     result = DPow(x,y);
1665   //   }
1666   //   if (result != result)?  {
1667   //     result = uncommon_trap() or runtime_call();
1668   //   }
1669   //   return result;
1670   // }
1671 
1672   Node* x = round_double_node(argument(0));
1673   Node* y = round_double_node(argument(2));
1674 
1675   Node* result = NULL;
1676 
1677   Node*   const_two_node = makecon(TypeD::make(2.0));
1678   Node*   cmp_node       = _gvn.transform(new CmpDNode(y, const_two_node));
1679   Node*   bool_node      = _gvn.transform(new BoolNode(cmp_node, BoolTest::eq));
1680   IfNode* if_node        = create_and_xform_if(control(), bool_node, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1681   Node*   if_true        = _gvn.transform(new IfTrueNode(if_node));
1682   Node*   if_false       = _gvn.transform(new IfFalseNode(if_node));
1683 
1684   RegionNode* region_node = new RegionNode(3);
1685   region_node->init_req(1, if_true);
1686 
1687   Node* phi_node = new PhiNode(region_node, Type::DOUBLE);
1688   // special case for x^y where y == 2, we can convert it to x * x
1689   phi_node->init_req(1, _gvn.transform(new MulDNode(x, x)));
1690 
1691   // set control to if_false since we will now process the false branch
1692   set_control(if_false);
1693 
1694   if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1695     // Short form: skip the fancy tests and just check for NaN result.
1696     result = _gvn.transform(new PowDNode(C, control(), x, y));
1697   } else {
1698     // If this inlining ever returned NaN in the past, include all
1699     // checks + call to the runtime.
1700 
1701     // Set the merge point for If node with condition of (x <= 0.0)
1702     // There are four possible paths to region node and phi node
1703     RegionNode *r = new RegionNode(4);
1704     Node *phi = new PhiNode(r, Type::DOUBLE);
1705 
1706     // Build the first if node: if (x <= 0.0)
1707     // Node for 0 constant
1708     Node *zeronode = makecon(TypeD::ZERO);
1709     // Check x:0
1710     Node *cmp = _gvn.transform(new CmpDNode(x, zeronode));
1711     // Check: If (x<=0) then go complex path
1712     Node *bol1 = _gvn.transform(new BoolNode( cmp, BoolTest::le ));
1713     // Branch either way
1714     IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1715     // Fast path taken; set region slot 3
1716     Node *fast_taken = _gvn.transform(new IfFalseNode(if1));
1717     r->init_req(3,fast_taken); // Capture fast-control
1718 
1719     // Fast path not-taken, i.e. slow path
1720     Node *complex_path = _gvn.transform(new IfTrueNode(if1));
1721 
1722     // Set fast path result
1723     Node *fast_result = _gvn.transform(new PowDNode(C, control(), x, y));
1724     phi->init_req(3, fast_result);
1725 
1726     // Complex path
1727     // Build the second if node (if y is long)
1728     // Node for (long)y
1729     Node *longy = _gvn.transform(new ConvD2LNode(y));
1730     // Node for (double)((long) y)
1731     Node *doublelongy= _gvn.transform(new ConvL2DNode(longy));
1732     // Check (double)((long) y) : y
1733     Node *cmplongy= _gvn.transform(new CmpDNode(doublelongy, y));
1734     // Check if (y isn't long) then go to slow path
1735 
1736     Node *bol2 = _gvn.transform(new BoolNode( cmplongy, BoolTest::ne ));
1737     // Branch either way
1738     IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1739     Node* ylong_path = _gvn.transform(new IfFalseNode(if2));
1740 
1741     Node *slow_path = _gvn.transform(new IfTrueNode(if2));
1742 
1743     // Calculate DPow(abs(x), y)*(1 & (long)y)
1744     // Node for constant 1
1745     Node *conone = longcon(1);
1746     // 1& (long)y
1747     Node *signnode= _gvn.transform(new AndLNode(conone, longy));
1748 
1749     // A huge number is always even. Detect a huge number by checking
1750     // if y + 1 == y and set integer to be tested for parity to 0.
1751     // Required for corner case:
1752     // (long)9.223372036854776E18 = max_jlong
1753     // (double)(long)9.223372036854776E18 = 9.223372036854776E18
1754     // max_jlong is odd but 9.223372036854776E18 is even
1755     Node* yplus1 = _gvn.transform(new AddDNode(y, makecon(TypeD::make(1))));
1756     Node *cmpyplus1= _gvn.transform(new CmpDNode(yplus1, y));
1757     Node *bolyplus1 = _gvn.transform(new BoolNode( cmpyplus1, BoolTest::eq ));
1758     Node* correctedsign = NULL;
1759     if (ConditionalMoveLimit != 0) {
1760       correctedsign = _gvn.transform(CMoveNode::make(NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
1761     } else {
1762       IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN);
1763       RegionNode *r = new RegionNode(3);
1764       Node *phi = new PhiNode(r, TypeLong::LONG);
1765       r->init_req(1, _gvn.transform(new IfFalseNode(ifyplus1)));
1766       r->init_req(2, _gvn.transform(new IfTrueNode(ifyplus1)));
1767       phi->init_req(1, signnode);
1768       phi->init_req(2, longcon(0));
1769       correctedsign = _gvn.transform(phi);
1770       ylong_path = _gvn.transform(r);
1771       record_for_igvn(r);
1772     }
1773 
1774     // zero node
1775     Node *conzero = longcon(0);
1776     // Check (1&(long)y)==0?
1777     Node *cmpeq1 = _gvn.transform(new CmpLNode(correctedsign, conzero));
1778     // Check if (1&(long)y)!=0?, if so the result is negative
1779     Node *bol3 = _gvn.transform(new BoolNode( cmpeq1, BoolTest::ne ));
1780     // abs(x)
1781     Node *absx=_gvn.transform(new AbsDNode(x));
1782     // abs(x)^y
1783     Node *absxpowy = _gvn.transform(new PowDNode(C, control(), absx, y));
1784     // -abs(x)^y
1785     Node *negabsxpowy = _gvn.transform(new NegDNode (absxpowy));
1786     // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
1787     Node *signresult = NULL;
1788     if (ConditionalMoveLimit != 0) {
1789       signresult = _gvn.transform(CMoveNode::make(NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
1790     } else {
1791       IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN);
1792       RegionNode *r = new RegionNode(3);
1793       Node *phi = new PhiNode(r, Type::DOUBLE);
1794       r->init_req(1, _gvn.transform(new IfFalseNode(ifyeven)));
1795       r->init_req(2, _gvn.transform(new IfTrueNode(ifyeven)));
1796       phi->init_req(1, absxpowy);
1797       phi->init_req(2, negabsxpowy);
1798       signresult = _gvn.transform(phi);
1799       ylong_path = _gvn.transform(r);
1800       record_for_igvn(r);
1801     }
1802     // Set complex path fast result
1803     r->init_req(2, ylong_path);
1804     phi->init_req(2, signresult);
1805 
1806     static const jlong nan_bits = CONST64(0x7ff8000000000000);
1807     Node *slow_result = makecon(TypeD::make(*(double*)&nan_bits)); // return NaN
1808     r->init_req(1,slow_path);
1809     phi->init_req(1,slow_result);
1810 
1811     // Post merge
1812     set_control(_gvn.transform(r));
1813     record_for_igvn(r);
1814     result = _gvn.transform(phi);
1815   }
1816 
1817   result = finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
1818 
1819   // control from finish_pow_exp is now input to the region node
1820   region_node->set_req(2, control());
1821   // the result from finish_pow_exp is now input to the phi node
1822   phi_node->init_req(2, result);
1823   set_control(_gvn.transform(region_node));
1824   record_for_igvn(region_node);
1825   set_result(_gvn.transform(phi_node));
1826 
1827   C->set_has_split_ifs(true); // Has chance for split-if optimization
1828   return true;
1829 }
1830 
1831 //------------------------------runtime_math-----------------------------
1832 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1833   assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1834          "must be (DD)D or (D)D type");
1835 
1836   // Inputs
1837   Node* a = round_double_node(argument(0));
1838   Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL;
1839 
1840   const TypePtr* no_memory_effects = NULL;
1841   Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1842                                  no_memory_effects,
1843                                  a, top(), b, b ? top() : NULL);
1844   Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1845 #ifdef ASSERT
1846   Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1847   assert(value_top == top(), "second value must be top");
1848 #endif
1849 
1850   set_result(value);
1851   return true;
1852 }
1853 
1854 //------------------------------inline_math_native-----------------------------
1855 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1856 #define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
1857   switch (id) {
1858     // These intrinsics are not properly supported on all hardware
1859   case vmIntrinsics::_dcos:   return Matcher::has_match_rule(Op_CosD)   ? inline_trig(id) :
1860     runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos),   "COS");
1861   case vmIntrinsics::_dsin:   return Matcher::has_match_rule(Op_SinD)   ? inline_trig(id) :
1862     runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin),   "SIN");
1863   case vmIntrinsics::_dtan:   return Matcher::has_match_rule(Op_TanD)   ? inline_trig(id) :
1864     runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan),   "TAN");
1865 
1866   case vmIntrinsics::_dlog:
1867     return StubRoutines::dlog() != NULL ?
1868     runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog(), "dlog") :
1869     runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog),   "LOG");
1870   case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) :
1871     runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
1872 
1873     // These intrinsics are supported on all hardware
1874   case vmIntrinsics::_dsqrt:  return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false;
1875   case vmIntrinsics::_dabs:   return Matcher::has_match_rule(Op_AbsD)   ? inline_math(id) : false;
1876 
1877   case vmIntrinsics::_dexp:
1878     return StubRoutines::dexp() != NULL ?
1879       runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(),  "dexp") :
1880       runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp),  "EXP");
1881   case vmIntrinsics::_dpow:   return Matcher::has_match_rule(Op_PowD)   ? inline_pow()    :
1882     runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow),  "POW");
1883 #undef FN_PTR
1884 
1885    // These intrinsics are not yet correctly implemented
1886   case vmIntrinsics::_datan2:
1887     return false;
1888 
1889   default:
1890     fatal_unexpected_iid(id);
1891     return false;
1892   }
1893 }
1894 
1895 static bool is_simple_name(Node* n) {
1896   return (n->req() == 1         // constant
1897           || (n->is_Type() && n->as_Type()->type()->singleton())
1898           || n->is_Proj()       // parameter or return value
1899           || n->is_Phi()        // local of some sort
1900           );
1901 }
1902 
1903 //----------------------------inline_notify-----------------------------------*
1904 bool LibraryCallKit::inline_notify(vmIntrinsics::ID id) {
1905   const TypeFunc* ftype = OptoRuntime::monitor_notify_Type();
1906   address func;
1907   if (id == vmIntrinsics::_notify) {
1908     func = OptoRuntime::monitor_notify_Java();
1909   } else {
1910     func = OptoRuntime::monitor_notifyAll_Java();
1911   }
1912   Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, NULL, TypeRawPtr::BOTTOM, argument(0));
1913   make_slow_call_ex(call, env()->Throwable_klass(), false);
1914   return true;
1915 }
1916 
1917 
1918 //----------------------------inline_min_max-----------------------------------
1919 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1920   set_result(generate_min_max(id, argument(0), argument(1)));
1921   return true;
1922 }
1923 
1924 void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
1925   Node* bol = _gvn.transform( new BoolNode(test, BoolTest::overflow) );
1926   IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
1927   Node* fast_path = _gvn.transform( new IfFalseNode(check));
1928   Node* slow_path = _gvn.transform( new IfTrueNode(check) );
1929 
1930   {
1931     PreserveJVMState pjvms(this);
1932     PreserveReexecuteState preexecs(this);
1933     jvms()->set_should_reexecute(true);
1934 
1935     set_control(slow_path);
1936     set_i_o(i_o());
1937 
1938     uncommon_trap(Deoptimization::Reason_intrinsic,
1939                   Deoptimization::Action_none);
1940   }
1941 
1942   set_control(fast_path);
1943   set_result(math);
1944 }
1945 
1946 template <typename OverflowOp>
1947 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
1948   typedef typename OverflowOp::MathOp MathOp;
1949 
1950   MathOp* mathOp = new MathOp(arg1, arg2);
1951   Node* operation = _gvn.transform( mathOp );
1952   Node* ofcheck = _gvn.transform( new OverflowOp(arg1, arg2) );
1953   inline_math_mathExact(operation, ofcheck);
1954   return true;
1955 }
1956 
1957 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
1958   return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
1959 }
1960 
1961 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
1962   return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
1963 }
1964 
1965 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
1966   return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
1967 }
1968 
1969 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
1970   return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
1971 }
1972 
1973 bool LibraryCallKit::inline_math_negateExactI() {
1974   return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
1975 }
1976 
1977 bool LibraryCallKit::inline_math_negateExactL() {
1978   return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
1979 }
1980 
1981 bool LibraryCallKit::inline_math_multiplyExactI() {
1982   return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
1983 }
1984 
1985 bool LibraryCallKit::inline_math_multiplyExactL() {
1986   return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
1987 }
1988 
1989 Node*
1990 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
1991   // These are the candidate return value:
1992   Node* xvalue = x0;
1993   Node* yvalue = y0;
1994 
1995   if (xvalue == yvalue) {
1996     return xvalue;
1997   }
1998 
1999   bool want_max = (id == vmIntrinsics::_max);
2000 
2001   const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
2002   const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
2003   if (txvalue == NULL || tyvalue == NULL)  return top();
2004   // This is not really necessary, but it is consistent with a
2005   // hypothetical MaxINode::Value method:
2006   int widen = MAX2(txvalue->_widen, tyvalue->_widen);
2007 
2008   // %%% This folding logic should (ideally) be in a different place.
2009   // Some should be inside IfNode, and there to be a more reliable
2010   // transformation of ?: style patterns into cmoves.  We also want
2011   // more powerful optimizations around cmove and min/max.
2012 
2013   // Try to find a dominating comparison of these guys.
2014   // It can simplify the index computation for Arrays.copyOf
2015   // and similar uses of System.arraycopy.
2016   // First, compute the normalized version of CmpI(x, y).
2017   int   cmp_op = Op_CmpI;
2018   Node* xkey = xvalue;
2019   Node* ykey = yvalue;
2020   Node* ideal_cmpxy = _gvn.transform(new CmpINode(xkey, ykey));
2021   if (ideal_cmpxy->is_Cmp()) {
2022     // E.g., if we have CmpI(length - offset, count),
2023     // it might idealize to CmpI(length, count + offset)
2024     cmp_op = ideal_cmpxy->Opcode();
2025     xkey = ideal_cmpxy->in(1);
2026     ykey = ideal_cmpxy->in(2);
2027   }
2028 
2029   // Start by locating any relevant comparisons.
2030   Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
2031   Node* cmpxy = NULL;
2032   Node* cmpyx = NULL;
2033   for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
2034     Node* cmp = start_from->fast_out(k);
2035     if (cmp->outcnt() > 0 &&            // must have prior uses
2036         cmp->in(0) == NULL &&           // must be context-independent
2037         cmp->Opcode() == cmp_op) {      // right kind of compare
2038       if (cmp->in(1) == xkey && cmp->in(2) == ykey)  cmpxy = cmp;
2039       if (cmp->in(1) == ykey && cmp->in(2) == xkey)  cmpyx = cmp;
2040     }
2041   }
2042 
2043   const int NCMPS = 2;
2044   Node* cmps[NCMPS] = { cmpxy, cmpyx };
2045   int cmpn;
2046   for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2047     if (cmps[cmpn] != NULL)  break;     // find a result
2048   }
2049   if (cmpn < NCMPS) {
2050     // Look for a dominating test that tells us the min and max.
2051     int depth = 0;                // Limit search depth for speed
2052     Node* dom = control();
2053     for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
2054       if (++depth >= 100)  break;
2055       Node* ifproj = dom;
2056       if (!ifproj->is_Proj())  continue;
2057       Node* iff = ifproj->in(0);
2058       if (!iff->is_If())  continue;
2059       Node* bol = iff->in(1);
2060       if (!bol->is_Bool())  continue;
2061       Node* cmp = bol->in(1);
2062       if (cmp == NULL)  continue;
2063       for (cmpn = 0; cmpn < NCMPS; cmpn++)
2064         if (cmps[cmpn] == cmp)  break;
2065       if (cmpn == NCMPS)  continue;
2066       BoolTest::mask btest = bol->as_Bool()->_test._test;
2067       if (ifproj->is_IfFalse())  btest = BoolTest(btest).negate();
2068       if (cmp->in(1) == ykey)    btest = BoolTest(btest).commute();
2069       // At this point, we know that 'x btest y' is true.
2070       switch (btest) {
2071       case BoolTest::eq:
2072         // They are proven equal, so we can collapse the min/max.
2073         // Either value is the answer.  Choose the simpler.
2074         if (is_simple_name(yvalue) && !is_simple_name(xvalue))
2075           return yvalue;
2076         return xvalue;
2077       case BoolTest::lt:          // x < y
2078       case BoolTest::le:          // x <= y
2079         return (want_max ? yvalue : xvalue);
2080       case BoolTest::gt:          // x > y
2081       case BoolTest::ge:          // x >= y
2082         return (want_max ? xvalue : yvalue);
2083       }
2084     }
2085   }
2086 
2087   // We failed to find a dominating test.
2088   // Let's pick a test that might GVN with prior tests.
2089   Node*          best_bol   = NULL;
2090   BoolTest::mask best_btest = BoolTest::illegal;
2091   for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2092     Node* cmp = cmps[cmpn];
2093     if (cmp == NULL)  continue;
2094     for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
2095       Node* bol = cmp->fast_out(j);
2096       if (!bol->is_Bool())  continue;
2097       BoolTest::mask btest = bol->as_Bool()->_test._test;
2098       if (btest == BoolTest::eq || btest == BoolTest::ne)  continue;
2099       if (cmp->in(1) == ykey)   btest = BoolTest(btest).commute();
2100       if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
2101         best_bol   = bol->as_Bool();
2102         best_btest = btest;
2103       }
2104     }
2105   }
2106 
2107   Node* answer_if_true  = NULL;
2108   Node* answer_if_false = NULL;
2109   switch (best_btest) {
2110   default:
2111     if (cmpxy == NULL)
2112       cmpxy = ideal_cmpxy;
2113     best_bol = _gvn.transform(new BoolNode(cmpxy, BoolTest::lt));
2114     // and fall through:
2115   case BoolTest::lt:          // x < y
2116   case BoolTest::le:          // x <= y
2117     answer_if_true  = (want_max ? yvalue : xvalue);
2118     answer_if_false = (want_max ? xvalue : yvalue);
2119     break;
2120   case BoolTest::gt:          // x > y
2121   case BoolTest::ge:          // x >= y
2122     answer_if_true  = (want_max ? xvalue : yvalue);
2123     answer_if_false = (want_max ? yvalue : xvalue);
2124     break;
2125   }
2126 
2127   jint hi, lo;
2128   if (want_max) {
2129     // We can sharpen the minimum.
2130     hi = MAX2(txvalue->_hi, tyvalue->_hi);
2131     lo = MAX2(txvalue->_lo, tyvalue->_lo);
2132   } else {
2133     // We can sharpen the maximum.
2134     hi = MIN2(txvalue->_hi, tyvalue->_hi);
2135     lo = MIN2(txvalue->_lo, tyvalue->_lo);
2136   }
2137 
2138   // Use a flow-free graph structure, to avoid creating excess control edges
2139   // which could hinder other optimizations.
2140   // Since Math.min/max is often used with arraycopy, we want
2141   // tightly_coupled_allocation to be able to see beyond min/max expressions.
2142   Node* cmov = CMoveNode::make(NULL, best_bol,
2143                                answer_if_false, answer_if_true,
2144                                TypeInt::make(lo, hi, widen));
2145 
2146   return _gvn.transform(cmov);
2147 
2148   /*
2149   // This is not as desirable as it may seem, since Min and Max
2150   // nodes do not have a full set of optimizations.
2151   // And they would interfere, anyway, with 'if' optimizations
2152   // and with CMoveI canonical forms.
2153   switch (id) {
2154   case vmIntrinsics::_min:
2155     result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2156   case vmIntrinsics::_max:
2157     result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2158   default:
2159     ShouldNotReachHere();
2160   }
2161   */
2162 }
2163 
2164 inline int
2165 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
2166   const TypePtr* base_type = TypePtr::NULL_PTR;
2167   if (base != NULL)  base_type = _gvn.type(base)->isa_ptr();
2168   if (base_type == NULL) {
2169     // Unknown type.
2170     return Type::AnyPtr;
2171   } else if (base_type == TypePtr::NULL_PTR) {
2172     // Since this is a NULL+long form, we have to switch to a rawptr.
2173     base   = _gvn.transform(new CastX2PNode(offset));
2174     offset = MakeConX(0);
2175     return Type::RawPtr;
2176   } else if (base_type->base() == Type::RawPtr) {
2177     return Type::RawPtr;
2178   } else if (base_type->isa_oopptr()) {
2179     // Base is never null => always a heap address.
2180     if (base_type->ptr() == TypePtr::NotNull) {
2181       return Type::OopPtr;
2182     }
2183     // Offset is small => always a heap address.
2184     const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2185     if (offset_type != NULL &&
2186         base_type->offset() == 0 &&     // (should always be?)
2187         offset_type->_lo >= 0 &&
2188         !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2189       return Type::OopPtr;
2190     }
2191     // Otherwise, it might either be oop+off or NULL+addr.
2192     return Type::AnyPtr;
2193   } else {
2194     // No information:
2195     return Type::AnyPtr;
2196   }
2197 }
2198 
2199 inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
2200   int kind = classify_unsafe_addr(base, offset);
2201   if (kind == Type::RawPtr) {
2202     return basic_plus_adr(top(), base, offset);
2203   } else {
2204     return basic_plus_adr(base, offset);
2205   }
2206 }
2207 
2208 //--------------------------inline_number_methods-----------------------------
2209 // inline int     Integer.numberOfLeadingZeros(int)
2210 // inline int        Long.numberOfLeadingZeros(long)
2211 //
2212 // inline int     Integer.numberOfTrailingZeros(int)
2213 // inline int        Long.numberOfTrailingZeros(long)
2214 //
2215 // inline int     Integer.bitCount(int)
2216 // inline int        Long.bitCount(long)
2217 //
2218 // inline char  Character.reverseBytes(char)
2219 // inline short     Short.reverseBytes(short)
2220 // inline int     Integer.reverseBytes(int)
2221 // inline long       Long.reverseBytes(long)
2222 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2223   Node* arg = argument(0);
2224   Node* n = NULL;
2225   switch (id) {
2226   case vmIntrinsics::_numberOfLeadingZeros_i:   n = new CountLeadingZerosINode( arg);  break;
2227   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2228   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2229   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2230   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2231   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2232   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2233   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2234   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2235   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2236   default:  fatal_unexpected_iid(id);  break;
2237   }
2238   set_result(_gvn.transform(n));
2239   return true;
2240 }
2241 
2242 //----------------------------inline_unsafe_access----------------------------
2243 
2244 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2245 
2246 // Helper that guards and inserts a pre-barrier.
2247 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2248                                         Node* pre_val, bool need_mem_bar) {
2249   // We could be accessing the referent field of a reference object. If so, when G1
2250   // is enabled, we need to log the value in the referent field in an SATB buffer.
2251   // This routine performs some compile time filters and generates suitable
2252   // runtime filters that guard the pre-barrier code.
2253   // Also add memory barrier for non volatile load from the referent field
2254   // to prevent commoning of loads across safepoint.
2255   if (!UseG1GC && !need_mem_bar)
2256     return;
2257 
2258   // Some compile time checks.
2259 
2260   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2261   const TypeX* otype = offset->find_intptr_t_type();
2262   if (otype != NULL && otype->is_con() &&
2263       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2264     // Constant offset but not the reference_offset so just return
2265     return;
2266   }
2267 
2268   // We only need to generate the runtime guards for instances.
2269   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2270   if (btype != NULL) {
2271     if (btype->isa_aryptr()) {
2272       // Array type so nothing to do
2273       return;
2274     }
2275 
2276     const TypeInstPtr* itype = btype->isa_instptr();
2277     if (itype != NULL) {
2278       // Can the klass of base_oop be statically determined to be
2279       // _not_ a sub-class of Reference and _not_ Object?
2280       ciKlass* klass = itype->klass();
2281       if ( klass->is_loaded() &&
2282           !klass->is_subtype_of(env()->Reference_klass()) &&
2283           !env()->Object_klass()->is_subtype_of(klass)) {
2284         return;
2285       }
2286     }
2287   }
2288 
2289   // The compile time filters did not reject base_oop/offset so
2290   // we need to generate the following runtime filters
2291   //
2292   // if (offset == java_lang_ref_Reference::_reference_offset) {
2293   //   if (instance_of(base, java.lang.ref.Reference)) {
2294   //     pre_barrier(_, pre_val, ...);
2295   //   }
2296   // }
2297 
2298   float likely   = PROB_LIKELY(  0.999);
2299   float unlikely = PROB_UNLIKELY(0.999);
2300 
2301   IdealKit ideal(this);
2302 #define __ ideal.
2303 
2304   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
2305 
2306   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
2307       // Update graphKit memory and control from IdealKit.
2308       sync_kit(ideal);
2309 
2310       Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
2311       Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
2312 
2313       // Update IdealKit memory and control from graphKit.
2314       __ sync_kit(this);
2315 
2316       Node* one = __ ConI(1);
2317       // is_instof == 0 if base_oop == NULL
2318       __ if_then(is_instof, BoolTest::eq, one, unlikely); {
2319 
2320         // Update graphKit from IdeakKit.
2321         sync_kit(ideal);
2322 
2323         // Use the pre-barrier to record the value in the referent field
2324         pre_barrier(false /* do_load */,
2325                     __ ctrl(),
2326                     NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2327                     pre_val /* pre_val */,
2328                     T_OBJECT);
2329         if (need_mem_bar) {
2330           // Add memory barrier to prevent commoning reads from this field
2331           // across safepoint since GC can change its value.
2332           insert_mem_bar(Op_MemBarCPUOrder);
2333         }
2334         // Update IdealKit from graphKit.
2335         __ sync_kit(this);
2336 
2337       } __ end_if(); // _ref_type != ref_none
2338   } __ end_if(); // offset == referent_offset
2339 
2340   // Final sync IdealKit and GraphKit.
2341   final_sync(ideal);
2342 #undef __
2343 }
2344 
2345 
2346 // Interpret Unsafe.fieldOffset cookies correctly:
2347 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
2348 
2349 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
2350   // Attempt to infer a sharper value type from the offset and base type.
2351   ciKlass* sharpened_klass = NULL;
2352 
2353   // See if it is an instance field, with an object type.
2354   if (alias_type->field() != NULL) {
2355     assert(!is_native_ptr, "native pointer op cannot use a java address");
2356     if (alias_type->field()->type()->is_klass()) {
2357       sharpened_klass = alias_type->field()->type()->as_klass();
2358     }
2359   }
2360 
2361   // See if it is a narrow oop array.
2362   if (adr_type->isa_aryptr()) {
2363     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2364       const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2365       if (elem_type != NULL) {
2366         sharpened_klass = elem_type->klass();
2367       }
2368     }
2369   }
2370 
2371   // The sharpened class might be unloaded if there is no class loader
2372   // contraint in place.
2373   if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2374     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2375 
2376 #ifndef PRODUCT
2377     if (C->print_intrinsics() || C->print_inlining()) {
2378       tty->print("  from base type: ");  adr_type->dump();
2379       tty->print("  sharpened value: ");  tjp->dump();
2380     }
2381 #endif
2382     // Sharpen the value type.
2383     return tjp;
2384   }
2385   return NULL;
2386 }
2387 
2388 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
2389   if (callee()->is_static())  return false;  // caller must have the capability!
2390 
2391 #ifndef PRODUCT
2392   {
2393     ResourceMark rm;
2394     // Check the signatures.
2395     ciSignature* sig = callee()->signature();
2396 #ifdef ASSERT
2397     if (!is_store) {
2398       // Object getObject(Object base, int/long offset), etc.
2399       BasicType rtype = sig->return_type()->basic_type();
2400       if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2401           rtype = T_ADDRESS;  // it is really a C void*
2402       assert(rtype == type, "getter must return the expected value");
2403       if (!is_native_ptr) {
2404         assert(sig->count() == 2, "oop getter has 2 arguments");
2405         assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2406         assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2407       } else {
2408         assert(sig->count() == 1, "native getter has 1 argument");
2409         assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
2410       }
2411     } else {
2412       // void putObject(Object base, int/long offset, Object x), etc.
2413       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2414       if (!is_native_ptr) {
2415         assert(sig->count() == 3, "oop putter has 3 arguments");
2416         assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2417         assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2418       } else {
2419         assert(sig->count() == 2, "native putter has 2 arguments");
2420         assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long");
2421       }
2422       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2423       if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name())
2424         vtype = T_ADDRESS;  // it is really a C void*
2425       assert(vtype == type, "putter must accept the expected value");
2426     }
2427 #endif // ASSERT
2428  }
2429 #endif //PRODUCT
2430 
2431   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2432 
2433   Node* receiver = argument(0);  // type: oop
2434 
2435   // Build address expression.
2436   Node* adr;
2437   Node* heap_base_oop = top();
2438   Node* offset = top();
2439   Node* val;
2440 
2441   if (!is_native_ptr) {
2442     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2443     Node* base = argument(1);  // type: oop
2444     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2445     offset = argument(2);  // type: long
2446     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2447     // to be plain byte offsets, which are also the same as those accepted
2448     // by oopDesc::field_base.
2449     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2450            "fieldOffset must be byte-scaled");
2451     // 32-bit machines ignore the high half!
2452     offset = ConvL2X(offset);
2453     adr = make_unsafe_address(base, offset);
2454     heap_base_oop = base;
2455     val = is_store ? argument(4) : NULL;
2456   } else {
2457     Node* ptr = argument(1);  // type: long
2458     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2459     adr = make_unsafe_address(NULL, ptr);
2460     val = is_store ? argument(3) : NULL;
2461   }
2462 
2463   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2464 
2465   // First guess at the value type.
2466   const Type *value_type = Type::get_const_basic_type(type);
2467 
2468   // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
2469   // there was not enough information to nail it down.
2470   Compile::AliasType* alias_type = C->alias_type(adr_type);
2471   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2472 
2473   // We will need memory barriers unless we can determine a unique
2474   // alias category for this reference.  (Note:  If for some reason
2475   // the barriers get omitted and the unsafe reference begins to "pollute"
2476   // the alias analysis of the rest of the graph, either Compile::can_alias
2477   // or Compile::must_alias will throw a diagnostic assert.)
2478   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2479 
2480   // If we are reading the value of the referent field of a Reference
2481   // object (either by using Unsafe directly or through reflection)
2482   // then, if G1 is enabled, we need to record the referent in an
2483   // SATB log buffer using the pre-barrier mechanism.
2484   // Also we need to add memory barrier to prevent commoning reads
2485   // from this field across safepoint since GC can change its value.
2486   bool need_read_barrier = !is_native_ptr && !is_store &&
2487                            offset != top() && heap_base_oop != top();
2488 
2489   if (!is_store && type == T_OBJECT) {
2490     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2491     if (tjp != NULL) {
2492       value_type = tjp;
2493     }
2494   }
2495 
2496   receiver = null_check(receiver);
2497   if (stopped()) {
2498     return true;
2499   }
2500   // Heap pointers get a null-check from the interpreter,
2501   // as a courtesy.  However, this is not guaranteed by Unsafe,
2502   // and it is not possible to fully distinguish unintended nulls
2503   // from intended ones in this API.
2504 
2505   if (is_volatile) {
2506     // We need to emit leading and trailing CPU membars (see below) in
2507     // addition to memory membars when is_volatile. This is a little
2508     // too strong, but avoids the need to insert per-alias-type
2509     // volatile membars (for stores; compare Parse::do_put_xxx), which
2510     // we cannot do effectively here because we probably only have a
2511     // rough approximation of type.
2512     need_mem_bar = true;
2513     // For Stores, place a memory ordering barrier now.
2514     if (is_store) {
2515       insert_mem_bar(Op_MemBarRelease);
2516     } else {
2517       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2518         insert_mem_bar(Op_MemBarVolatile);
2519       }
2520     }
2521   }
2522 
2523   // Memory barrier to prevent normal and 'unsafe' accesses from
2524   // bypassing each other.  Happens after null checks, so the
2525   // exception paths do not take memory state from the memory barrier,
2526   // so there's no problems making a strong assert about mixing users
2527   // of safe & unsafe memory.
2528   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2529 
2530    if (!is_store) {
2531     Node* p = NULL;
2532     // Try to constant fold a load from a constant field
2533     ciField* field = alias_type->field();
2534     if (heap_base_oop != top() &&
2535         field != NULL && field->is_constant() && field->layout_type() == type) {
2536       // final or stable field
2537       const Type* con_type = Type::make_constant(alias_type->field(), heap_base_oop);
2538       if (con_type != NULL) {
2539         p = makecon(con_type);
2540       }
2541     }
2542     if (p == NULL) {
2543       MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2544       // To be valid, unsafe loads may depend on other conditions than
2545       // the one that guards them: pin the Load node
2546       p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile);
2547       // load value
2548       switch (type) {
2549       case T_BOOLEAN:
2550       case T_CHAR:
2551       case T_BYTE:
2552       case T_SHORT:
2553       case T_INT:
2554       case T_LONG:
2555       case T_FLOAT:
2556       case T_DOUBLE:
2557         break;
2558       case T_OBJECT:
2559         if (need_read_barrier) {
2560           insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2561         }
2562         break;
2563       case T_ADDRESS:
2564         // Cast to an int type.
2565         p = _gvn.transform(new CastP2XNode(NULL, p));
2566         p = ConvX2UL(p);
2567         break;
2568       default:
2569         fatal("unexpected type %d: %s", type, type2name(type));
2570         break;
2571       }
2572     }
2573     // The load node has the control of the preceding MemBarCPUOrder.  All
2574     // following nodes will have the control of the MemBarCPUOrder inserted at
2575     // the end of this method.  So, pushing the load onto the stack at a later
2576     // point is fine.
2577     set_result(p);
2578   } else {
2579     // place effect of store into memory
2580     switch (type) {
2581     case T_DOUBLE:
2582       val = dstore_rounding(val);
2583       break;
2584     case T_ADDRESS:
2585       // Repackage the long as a pointer.
2586       val = ConvL2X(val);
2587       val = _gvn.transform(new CastX2PNode(val));
2588       break;
2589     }
2590 
2591     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2592     if (type != T_OBJECT ) {
2593       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
2594     } else {
2595       // Possibly an oop being stored to Java heap or native memory
2596       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2597         // oop to Java heap.
2598         (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2599       } else {
2600         // We can't tell at compile time if we are storing in the Java heap or outside
2601         // of it. So we need to emit code to conditionally do the proper type of
2602         // store.
2603 
2604         IdealKit ideal(this);
2605 #define __ ideal.
2606         // QQQ who knows what probability is here??
2607         __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2608           // Sync IdealKit and graphKit.
2609           sync_kit(ideal);
2610           Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2611           // Update IdealKit memory.
2612           __ sync_kit(this);
2613         } __ else_(); {
2614           __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile);
2615         } __ end_if();
2616         // Final sync IdealKit and GraphKit.
2617         final_sync(ideal);
2618 #undef __
2619       }
2620     }
2621   }
2622 
2623   if (is_volatile) {
2624     if (!is_store) {
2625       insert_mem_bar(Op_MemBarAcquire);
2626     } else {
2627       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2628         insert_mem_bar(Op_MemBarVolatile);
2629       }
2630     }
2631   }
2632 
2633   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2634 
2635   return true;
2636 }
2637 
2638 //----------------------------inline_unsafe_load_store----------------------------
2639 // This method serves a couple of different customers (depending on LoadStoreKind):
2640 //
2641 // LS_cmpxchg:
2642 //   public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
2643 //   public final native boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
2644 //   public final native boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
2645 //
2646 // LS_xadd:
2647 //   public int  getAndAddInt( Object o, long offset, int  delta)
2648 //   public long getAndAddLong(Object o, long offset, long delta)
2649 //
2650 // LS_xchg:
2651 //   int    getAndSet(Object o, long offset, int    newValue)
2652 //   long   getAndSet(Object o, long offset, long   newValue)
2653 //   Object getAndSet(Object o, long offset, Object newValue)
2654 //
2655 bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
2656   // This basic scheme here is the same as inline_unsafe_access, but
2657   // differs in enough details that combining them would make the code
2658   // overly confusing.  (This is a true fact! I originally combined
2659   // them, but even I was confused by it!) As much code/comments as
2660   // possible are retained from inline_unsafe_access though to make
2661   // the correspondences clearer. - dl
2662 
2663   if (callee()->is_static())  return false;  // caller must have the capability!
2664 
2665 #ifndef PRODUCT
2666   BasicType rtype;
2667   {
2668     ResourceMark rm;
2669     // Check the signatures.
2670     ciSignature* sig = callee()->signature();
2671     rtype = sig->return_type()->basic_type();
2672     if (kind == LS_xadd || kind == LS_xchg) {
2673       // Check the signatures.
2674 #ifdef ASSERT
2675       assert(rtype == type, "get and set must return the expected type");
2676       assert(sig->count() == 3, "get and set has 3 arguments");
2677       assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2678       assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2679       assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2680 #endif // ASSERT
2681     } else if (kind == LS_cmpxchg) {
2682       // Check the signatures.
2683 #ifdef ASSERT
2684       assert(rtype == T_BOOLEAN, "CAS must return boolean");
2685       assert(sig->count() == 4, "CAS has 4 arguments");
2686       assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2687       assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2688 #endif // ASSERT
2689     } else {
2690       ShouldNotReachHere();
2691     }
2692   }
2693 #endif //PRODUCT
2694 
2695   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2696 
2697   // Get arguments:
2698   Node* receiver = NULL;
2699   Node* base     = NULL;
2700   Node* offset   = NULL;
2701   Node* oldval   = NULL;
2702   Node* newval   = NULL;
2703   if (kind == LS_cmpxchg) {
2704     const bool two_slot_type = type2size[type] == 2;
2705     receiver = argument(0);  // type: oop
2706     base     = argument(1);  // type: oop
2707     offset   = argument(2);  // type: long
2708     oldval   = argument(4);  // type: oop, int, or long
2709     newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
2710   } else if (kind == LS_xadd || kind == LS_xchg){
2711     receiver = argument(0);  // type: oop
2712     base     = argument(1);  // type: oop
2713     offset   = argument(2);  // type: long
2714     oldval   = NULL;
2715     newval   = argument(4);  // type: oop, int, or long
2716   }
2717 
2718   // Null check receiver.
2719   receiver = null_check(receiver);
2720   if (stopped()) {
2721     return true;
2722   }
2723 
2724   // Build field offset expression.
2725   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2726   // to be plain byte offsets, which are also the same as those accepted
2727   // by oopDesc::field_base.
2728   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2729   // 32-bit machines ignore the high half of long offsets
2730   offset = ConvL2X(offset);
2731   Node* adr = make_unsafe_address(base, offset);
2732   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2733 
2734   // For CAS, unlike inline_unsafe_access, there seems no point in
2735   // trying to refine types. Just use the coarse types here.
2736   const Type *value_type = Type::get_const_basic_type(type);
2737   Compile::AliasType* alias_type = C->alias_type(adr_type);
2738   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2739 
2740   if (kind == LS_xchg && type == T_OBJECT) {
2741     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2742     if (tjp != NULL) {
2743       value_type = tjp;
2744     }
2745   }
2746 
2747   int alias_idx = C->get_alias_index(adr_type);
2748 
2749   // Memory-model-wise, a LoadStore acts like a little synchronized
2750   // block, so needs barriers on each side.  These don't translate
2751   // into actual barriers on most machines, but we still need rest of
2752   // compiler to respect ordering.
2753 
2754   insert_mem_bar(Op_MemBarRelease);
2755   insert_mem_bar(Op_MemBarCPUOrder);
2756 
2757   // 4984716: MemBars must be inserted before this
2758   //          memory node in order to avoid a false
2759   //          dependency which will confuse the scheduler.
2760   Node *mem = memory(alias_idx);
2761 
2762   // For now, we handle only those cases that actually exist: ints,
2763   // longs, and Object. Adding others should be straightforward.
2764   Node* load_store = NULL;
2765   switch(type) {
2766   case T_INT:
2767     if (kind == LS_xadd) {
2768       load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
2769     } else if (kind == LS_xchg) {
2770       load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
2771     } else if (kind == LS_cmpxchg) {
2772       load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
2773     } else {
2774       ShouldNotReachHere();
2775     }
2776     break;
2777   case T_LONG:
2778     if (kind == LS_xadd) {
2779       load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
2780     } else if (kind == LS_xchg) {
2781       load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
2782     } else if (kind == LS_cmpxchg) {
2783       load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2784     } else {
2785       ShouldNotReachHere();
2786     }
2787     break;
2788   case T_OBJECT:
2789     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2790     // could be delayed during Parse (for example, in adjust_map_after_if()).
2791     // Execute transformation here to avoid barrier generation in such case.
2792     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2793       newval = _gvn.makecon(TypePtr::NULL_PTR);
2794 
2795     // Reference stores need a store barrier.
2796     if (kind == LS_xchg) {
2797       // If pre-barrier must execute before the oop store, old value will require do_load here.
2798       if (!can_move_pre_barrier()) {
2799         pre_barrier(true /* do_load*/,
2800                     control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2801                     NULL /* pre_val*/,
2802                     T_OBJECT);
2803       } // Else move pre_barrier to use load_store value, see below.
2804     } else if (kind == LS_cmpxchg) {
2805       // Same as for newval above:
2806       if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2807         oldval = _gvn.makecon(TypePtr::NULL_PTR);
2808       }
2809       // The only known value which might get overwritten is oldval.
2810       pre_barrier(false /* do_load */,
2811                   control(), NULL, NULL, max_juint, NULL, NULL,
2812                   oldval /* pre_val */,
2813                   T_OBJECT);
2814     } else {
2815       ShouldNotReachHere();
2816     }
2817 
2818 #ifdef _LP64
2819     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2820       Node *newval_enc = _gvn.transform(new EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2821       if (kind == LS_xchg) {
2822         load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr,
2823                                                        newval_enc, adr_type, value_type->make_narrowoop()));
2824       } else {
2825         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2826         Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2827         load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr,
2828                                                                 newval_enc, oldval_enc));
2829       }
2830     } else
2831 #endif
2832     {
2833       if (kind == LS_xchg) {
2834         load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2835       } else {
2836         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2837         load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2838       }
2839     }
2840     if (kind == LS_cmpxchg) {
2841       // Emit the post barrier only when the actual store happened.
2842       // This makes sense to check only for compareAndSet that can fail to set the value.
2843       // CAS success path is marked more likely since we anticipate this is a performance
2844       // critical path, while CAS failure path can use the penalty for going through unlikely
2845       // path as backoff. Which is still better than doing a store barrier there.
2846       IdealKit ideal(this);
2847       ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
2848         sync_kit(ideal);
2849         post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2850         ideal.sync_kit(this);
2851       } ideal.end_if();
2852       final_sync(ideal);
2853     } else {
2854       post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2855     }
2856     break;
2857   default:
2858     fatal("unexpected type %d: %s", type, type2name(type));
2859     break;
2860   }
2861 
2862   // SCMemProjNodes represent the memory state of a LoadStore. Their
2863   // main role is to prevent LoadStore nodes from being optimized away
2864   // when their results aren't used.
2865   Node* proj = _gvn.transform(new SCMemProjNode(load_store));
2866   set_memory(proj, alias_idx);
2867 
2868   if (type == T_OBJECT && kind == LS_xchg) {
2869 #ifdef _LP64
2870     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2871       load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
2872     }
2873 #endif
2874     if (can_move_pre_barrier()) {
2875       // Don't need to load pre_val. The old value is returned by load_store.
2876       // The pre_barrier can execute after the xchg as long as no safepoint
2877       // gets inserted between them.
2878       pre_barrier(false /* do_load */,
2879                   control(), NULL, NULL, max_juint, NULL, NULL,
2880                   load_store /* pre_val */,
2881                   T_OBJECT);
2882     }
2883   }
2884 
2885   // Add the trailing membar surrounding the access
2886   insert_mem_bar(Op_MemBarCPUOrder);
2887   insert_mem_bar(Op_MemBarAcquire);
2888 
2889   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2890   set_result(load_store);
2891   return true;
2892 }
2893 
2894 //----------------------------inline_unsafe_ordered_store----------------------
2895 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2896 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2897 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2898 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2899   // This is another variant of inline_unsafe_access, differing in
2900   // that it always issues store-store ("release") barrier and ensures
2901   // store-atomicity (which only matters for "long").
2902 
2903   if (callee()->is_static())  return false;  // caller must have the capability!
2904 
2905 #ifndef PRODUCT
2906   {
2907     ResourceMark rm;
2908     // Check the signatures.
2909     ciSignature* sig = callee()->signature();
2910 #ifdef ASSERT
2911     BasicType rtype = sig->return_type()->basic_type();
2912     assert(rtype == T_VOID, "must return void");
2913     assert(sig->count() == 3, "has 3 arguments");
2914     assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
2915     assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
2916 #endif // ASSERT
2917   }
2918 #endif //PRODUCT
2919 
2920   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2921 
2922   // Get arguments:
2923   Node* receiver = argument(0);  // type: oop
2924   Node* base     = argument(1);  // type: oop
2925   Node* offset   = argument(2);  // type: long
2926   Node* val      = argument(4);  // type: oop, int, or long
2927 
2928   // Null check receiver.
2929   receiver = null_check(receiver);
2930   if (stopped()) {
2931     return true;
2932   }
2933 
2934   // Build field offset expression.
2935   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2936   // 32-bit machines ignore the high half of long offsets
2937   offset = ConvL2X(offset);
2938   Node* adr = make_unsafe_address(base, offset);
2939   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2940   const Type *value_type = Type::get_const_basic_type(type);
2941   Compile::AliasType* alias_type = C->alias_type(adr_type);
2942 
2943   insert_mem_bar(Op_MemBarRelease);
2944   insert_mem_bar(Op_MemBarCPUOrder);
2945   // Ensure that the store is atomic for longs:
2946   const bool require_atomic_access = true;
2947   Node* store;
2948   if (type == T_OBJECT) // reference stores need a store barrier.
2949     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
2950   else {
2951     store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
2952   }
2953   insert_mem_bar(Op_MemBarCPUOrder);
2954   return true;
2955 }
2956 
2957 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
2958   // Regardless of form, don't allow previous ld/st to move down,
2959   // then issue acquire, release, or volatile mem_bar.
2960   insert_mem_bar(Op_MemBarCPUOrder);
2961   switch(id) {
2962     case vmIntrinsics::_loadFence:
2963       insert_mem_bar(Op_LoadFence);
2964       return true;
2965     case vmIntrinsics::_storeFence:
2966       insert_mem_bar(Op_StoreFence);
2967       return true;
2968     case vmIntrinsics::_fullFence:
2969       insert_mem_bar(Op_MemBarVolatile);
2970       return true;
2971     default:
2972       fatal_unexpected_iid(id);
2973       return false;
2974   }
2975 }
2976 
2977 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
2978   if (!kls->is_Con()) {
2979     return true;
2980   }
2981   const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
2982   if (klsptr == NULL) {
2983     return true;
2984   }
2985   ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
2986   // don't need a guard for a klass that is already initialized
2987   return !ik->is_initialized();
2988 }
2989 
2990 //----------------------------inline_unsafe_allocate---------------------------
2991 // public native Object sun.misc.Unsafe.allocateInstance(Class<?> cls);
2992 bool LibraryCallKit::inline_unsafe_allocate() {
2993   if (callee()->is_static())  return false;  // caller must have the capability!
2994 
2995   null_check_receiver();  // null-check, then ignore
2996   Node* cls = null_check(argument(1));
2997   if (stopped())  return true;
2998 
2999   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3000   kls = null_check(kls);
3001   if (stopped())  return true;  // argument was like int.class
3002 
3003   Node* test = NULL;
3004   if (LibraryCallKit::klass_needs_init_guard(kls)) {
3005     // Note:  The argument might still be an illegal value like
3006     // Serializable.class or Object[].class.   The runtime will handle it.
3007     // But we must make an explicit check for initialization.
3008     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3009     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3010     // can generate code to load it as unsigned byte.
3011     Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3012     Node* bits = intcon(InstanceKlass::fully_initialized);
3013     test = _gvn.transform(new SubINode(inst, bits));
3014     // The 'test' is non-zero if we need to take a slow path.
3015   }
3016 
3017   Node* obj = new_instance(kls, test);
3018   set_result(obj);
3019   return true;
3020 }
3021 
3022 #ifdef TRACE_HAVE_INTRINSICS
3023 /*
3024  * oop -> myklass
3025  * myklass->trace_id |= USED
3026  * return myklass->trace_id & ~0x3
3027  */
3028 bool LibraryCallKit::inline_native_classID() {
3029   null_check_receiver();  // null-check, then ignore
3030   Node* cls = null_check(argument(1), T_OBJECT);
3031   Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3032   kls = null_check(kls, T_OBJECT);
3033   ByteSize offset = TRACE_ID_OFFSET;
3034   Node* insp = basic_plus_adr(kls, in_bytes(offset));
3035   Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
3036   Node* bits = longcon(~0x03l); // ignore bit 0 & 1
3037   Node* andl = _gvn.transform(new AndLNode(tvalue, bits));
3038   Node* clsused = longcon(0x01l); // set the class bit
3039   Node* orl = _gvn.transform(new OrLNode(tvalue, clsused));
3040 
3041   const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
3042   store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
3043   set_result(andl);
3044   return true;
3045 }
3046 
3047 bool LibraryCallKit::inline_native_threadID() {
3048   Node* tls_ptr = NULL;
3049   Node* cur_thr = generate_current_thread(tls_ptr);
3050   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3051   Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3052   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
3053 
3054   Node* threadid = NULL;
3055   size_t thread_id_size = OSThread::thread_id_size();
3056   if (thread_id_size == (size_t) BytesPerLong) {
3057     threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered));
3058   } else if (thread_id_size == (size_t) BytesPerInt) {
3059     threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered);
3060   } else {
3061     ShouldNotReachHere();
3062   }
3063   set_result(threadid);
3064   return true;
3065 }
3066 #endif
3067 
3068 //------------------------inline_native_time_funcs--------------
3069 // inline code for System.currentTimeMillis() and System.nanoTime()
3070 // these have the same type and signature
3071 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3072   const TypeFunc* tf = OptoRuntime::void_long_Type();
3073   const TypePtr* no_memory_effects = NULL;
3074   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3075   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3076 #ifdef ASSERT
3077   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3078   assert(value_top == top(), "second value must be top");
3079 #endif
3080   set_result(value);
3081   return true;
3082 }
3083 
3084 //------------------------inline_native_currentThread------------------
3085 bool LibraryCallKit::inline_native_currentThread() {
3086   Node* junk = NULL;
3087   set_result(generate_current_thread(junk));
3088   return true;
3089 }
3090 
3091 //------------------------inline_native_isInterrupted------------------
3092 // private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
3093 bool LibraryCallKit::inline_native_isInterrupted() {
3094   // Add a fast path to t.isInterrupted(clear_int):
3095   //   (t == Thread.current() &&
3096   //    (!TLS._osthread._interrupted || WINDOWS_ONLY(false) NOT_WINDOWS(!clear_int)))
3097   //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
3098   // So, in the common case that the interrupt bit is false,
3099   // we avoid making a call into the VM.  Even if the interrupt bit
3100   // is true, if the clear_int argument is false, we avoid the VM call.
3101   // However, if the receiver is not currentThread, we must call the VM,
3102   // because there must be some locking done around the operation.
3103 
3104   // We only go to the fast case code if we pass two guards.
3105   // Paths which do not pass are accumulated in the slow_region.
3106 
3107   enum {
3108     no_int_result_path   = 1, // t == Thread.current() && !TLS._osthread._interrupted
3109     no_clear_result_path = 2, // t == Thread.current() &&  TLS._osthread._interrupted && !clear_int
3110     slow_result_path     = 3, // slow path: t.isInterrupted(clear_int)
3111     PATH_LIMIT
3112   };
3113 
3114   // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
3115   // out of the function.
3116   insert_mem_bar(Op_MemBarCPUOrder);
3117 
3118   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3119   PhiNode*    result_val = new PhiNode(result_rgn, TypeInt::BOOL);
3120 
3121   RegionNode* slow_region = new RegionNode(1);
3122   record_for_igvn(slow_region);
3123 
3124   // (a) Receiving thread must be the current thread.
3125   Node* rec_thr = argument(0);
3126   Node* tls_ptr = NULL;
3127   Node* cur_thr = generate_current_thread(tls_ptr);
3128   Node* cmp_thr = _gvn.transform(new CmpPNode(cur_thr, rec_thr));
3129   Node* bol_thr = _gvn.transform(new BoolNode(cmp_thr, BoolTest::ne));
3130 
3131   generate_slow_guard(bol_thr, slow_region);
3132 
3133   // (b) Interrupt bit on TLS must be false.
3134   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3135   Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3136   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3137 
3138   // Set the control input on the field _interrupted read to prevent it floating up.
3139   Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
3140   Node* cmp_bit = _gvn.transform(new CmpINode(int_bit, intcon(0)));
3141   Node* bol_bit = _gvn.transform(new BoolNode(cmp_bit, BoolTest::ne));
3142 
3143   IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3144 
3145   // First fast path:  if (!TLS._interrupted) return false;
3146   Node* false_bit = _gvn.transform(new IfFalseNode(iff_bit));
3147   result_rgn->init_req(no_int_result_path, false_bit);
3148   result_val->init_req(no_int_result_path, intcon(0));
3149 
3150   // drop through to next case
3151   set_control( _gvn.transform(new IfTrueNode(iff_bit)));
3152 
3153 #ifndef TARGET_OS_FAMILY_windows
3154   // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
3155   Node* clr_arg = argument(1);
3156   Node* cmp_arg = _gvn.transform(new CmpINode(clr_arg, intcon(0)));
3157   Node* bol_arg = _gvn.transform(new BoolNode(cmp_arg, BoolTest::ne));
3158   IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
3159 
3160   // Second fast path:  ... else if (!clear_int) return true;
3161   Node* false_arg = _gvn.transform(new IfFalseNode(iff_arg));
3162   result_rgn->init_req(no_clear_result_path, false_arg);
3163   result_val->init_req(no_clear_result_path, intcon(1));
3164 
3165   // drop through to next case
3166   set_control( _gvn.transform(new IfTrueNode(iff_arg)));
3167 #else
3168   // To return true on Windows you must read the _interrupted field
3169   // and check the the event state i.e. take the slow path.
3170 #endif // TARGET_OS_FAMILY_windows
3171 
3172   // (d) Otherwise, go to the slow path.
3173   slow_region->add_req(control());
3174   set_control( _gvn.transform(slow_region));
3175 
3176   if (stopped()) {
3177     // There is no slow path.
3178     result_rgn->init_req(slow_result_path, top());
3179     result_val->init_req(slow_result_path, top());
3180   } else {
3181     // non-virtual because it is a private non-static
3182     CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
3183 
3184     Node* slow_val = set_results_for_java_call(slow_call);
3185     // this->control() comes from set_results_for_java_call
3186 
3187     Node* fast_io  = slow_call->in(TypeFunc::I_O);
3188     Node* fast_mem = slow_call->in(TypeFunc::Memory);
3189 
3190     // These two phis are pre-filled with copies of of the fast IO and Memory
3191     PhiNode* result_mem  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3192     PhiNode* result_io   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
3193 
3194     result_rgn->init_req(slow_result_path, control());
3195     result_io ->init_req(slow_result_path, i_o());
3196     result_mem->init_req(slow_result_path, reset_memory());
3197     result_val->init_req(slow_result_path, slow_val);
3198 
3199     set_all_memory(_gvn.transform(result_mem));
3200     set_i_o(       _gvn.transform(result_io));
3201   }
3202 
3203   C->set_has_split_ifs(true); // Has chance for split-if optimization
3204   set_result(result_rgn, result_val);
3205   return true;
3206 }
3207 
3208 //---------------------------load_mirror_from_klass----------------------------
3209 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3210 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3211   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3212   return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3213 }
3214 
3215 //-----------------------load_klass_from_mirror_common-------------------------
3216 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3217 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3218 // and branch to the given path on the region.
3219 // If never_see_null, take an uncommon trap on null, so we can optimistically
3220 // compile for the non-null case.
3221 // If the region is NULL, force never_see_null = true.
3222 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3223                                                     bool never_see_null,
3224                                                     RegionNode* region,
3225                                                     int null_path,
3226                                                     int offset) {
3227   if (region == NULL)  never_see_null = true;
3228   Node* p = basic_plus_adr(mirror, offset);
3229   const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3230   Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3231   Node* null_ctl = top();
3232   kls = null_check_oop(kls, &null_ctl, never_see_null);
3233   if (region != NULL) {
3234     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3235     region->init_req(null_path, null_ctl);
3236   } else {
3237     assert(null_ctl == top(), "no loose ends");
3238   }
3239   return kls;
3240 }
3241 
3242 //--------------------(inline_native_Class_query helpers)---------------------
3243 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER.
3244 // Fall through if (mods & mask) == bits, take the guard otherwise.
3245 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3246   // Branch around if the given klass has the given modifier bit set.
3247   // Like generate_guard, adds a new path onto the region.
3248   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3249   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3250   Node* mask = intcon(modifier_mask);
3251   Node* bits = intcon(modifier_bits);
3252   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3253   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3254   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3255   return generate_fair_guard(bol, region);
3256 }
3257 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3258   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3259 }
3260 
3261 //-------------------------inline_native_Class_query-------------------
3262 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3263   const Type* return_type = TypeInt::BOOL;
3264   Node* prim_return_value = top();  // what happens if it's a primitive class?
3265   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3266   bool expect_prim = false;     // most of these guys expect to work on refs
3267 
3268   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3269 
3270   Node* mirror = argument(0);
3271   Node* obj    = top();
3272 
3273   switch (id) {
3274   case vmIntrinsics::_isInstance:
3275     // nothing is an instance of a primitive type
3276     prim_return_value = intcon(0);
3277     obj = argument(1);
3278     break;
3279   case vmIntrinsics::_getModifiers:
3280     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3281     assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3282     return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3283     break;
3284   case vmIntrinsics::_isInterface:
3285     prim_return_value = intcon(0);
3286     break;
3287   case vmIntrinsics::_isArray:
3288     prim_return_value = intcon(0);
3289     expect_prim = true;  // cf. ObjectStreamClass.getClassSignature
3290     break;
3291   case vmIntrinsics::_isPrimitive:
3292     prim_return_value = intcon(1);
3293     expect_prim = true;  // obviously
3294     break;
3295   case vmIntrinsics::_getSuperclass:
3296     prim_return_value = null();
3297     return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3298     break;
3299   case vmIntrinsics::_getClassAccessFlags:
3300     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3301     return_type = TypeInt::INT;  // not bool!  6297094
3302     break;
3303   default:
3304     fatal_unexpected_iid(id);
3305     break;
3306   }
3307 
3308   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3309   if (mirror_con == NULL)  return false;  // cannot happen?
3310 
3311 #ifndef PRODUCT
3312   if (C->print_intrinsics() || C->print_inlining()) {
3313     ciType* k = mirror_con->java_mirror_type();
3314     if (k) {
3315       tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3316       k->print_name();
3317       tty->cr();
3318     }
3319   }
3320 #endif
3321 
3322   // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3323   RegionNode* region = new RegionNode(PATH_LIMIT);
3324   record_for_igvn(region);
3325   PhiNode* phi = new PhiNode(region, return_type);
3326 
3327   // The mirror will never be null of Reflection.getClassAccessFlags, however
3328   // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3329   // if it is. See bug 4774291.
3330 
3331   // For Reflection.getClassAccessFlags(), the null check occurs in
3332   // the wrong place; see inline_unsafe_access(), above, for a similar
3333   // situation.
3334   mirror = null_check(mirror);
3335   // If mirror or obj is dead, only null-path is taken.
3336   if (stopped())  return true;
3337 
3338   if (expect_prim)  never_see_null = false;  // expect nulls (meaning prims)
3339 
3340   // Now load the mirror's klass metaobject, and null-check it.
3341   // Side-effects region with the control path if the klass is null.
3342   Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
3343   // If kls is null, we have a primitive mirror.
3344   phi->init_req(_prim_path, prim_return_value);
3345   if (stopped()) { set_result(region, phi); return true; }
3346   bool safe_for_replace = (region->in(_prim_path) == top());
3347 
3348   Node* p;  // handy temp
3349   Node* null_ctl;
3350 
3351   // Now that we have the non-null klass, we can perform the real query.
3352   // For constant classes, the query will constant-fold in LoadNode::Value.
3353   Node* query_value = top();
3354   switch (id) {
3355   case vmIntrinsics::_isInstance:
3356     // nothing is an instance of a primitive type
3357     query_value = gen_instanceof(obj, kls, safe_for_replace);
3358     break;
3359 
3360   case vmIntrinsics::_getModifiers:
3361     p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3362     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3363     break;
3364 
3365   case vmIntrinsics::_isInterface:
3366     // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3367     if (generate_interface_guard(kls, region) != NULL)
3368       // A guard was added.  If the guard is taken, it was an interface.
3369       phi->add_req(intcon(1));
3370     // If we fall through, it's a plain class.
3371     query_value = intcon(0);
3372     break;
3373 
3374   case vmIntrinsics::_isArray:
3375     // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
3376     if (generate_array_guard(kls, region) != NULL)
3377       // A guard was added.  If the guard is taken, it was an array.
3378       phi->add_req(intcon(1));
3379     // If we fall through, it's a plain class.
3380     query_value = intcon(0);
3381     break;
3382 
3383   case vmIntrinsics::_isPrimitive:
3384     query_value = intcon(0); // "normal" path produces false
3385     break;
3386 
3387   case vmIntrinsics::_getSuperclass:
3388     // The rules here are somewhat unfortunate, but we can still do better
3389     // with random logic than with a JNI call.
3390     // Interfaces store null or Object as _super, but must report null.
3391     // Arrays store an intermediate super as _super, but must report Object.
3392     // Other types can report the actual _super.
3393     // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3394     if (generate_interface_guard(kls, region) != NULL)
3395       // A guard was added.  If the guard is taken, it was an interface.
3396       phi->add_req(null());
3397     if (generate_array_guard(kls, region) != NULL)
3398       // A guard was added.  If the guard is taken, it was an array.
3399       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
3400     // If we fall through, it's a plain class.  Get its _super.
3401     p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
3402     kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
3403     null_ctl = top();
3404     kls = null_check_oop(kls, &null_ctl);
3405     if (null_ctl != top()) {
3406       // If the guard is taken, Object.superClass is null (both klass and mirror).
3407       region->add_req(null_ctl);
3408       phi   ->add_req(null());
3409     }
3410     if (!stopped()) {
3411       query_value = load_mirror_from_klass(kls);
3412     }
3413     break;
3414 
3415   case vmIntrinsics::_getClassAccessFlags:
3416     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3417     query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3418     break;
3419 
3420   default:
3421     fatal_unexpected_iid(id);
3422     break;
3423   }
3424 
3425   // Fall-through is the normal case of a query to a real class.
3426   phi->init_req(1, query_value);
3427   region->init_req(1, control());
3428 
3429   C->set_has_split_ifs(true); // Has chance for split-if optimization
3430   set_result(region, phi);
3431   return true;
3432 }
3433 
3434 //-------------------------inline_Class_cast-------------------
3435 bool LibraryCallKit::inline_Class_cast() {
3436   Node* mirror = argument(0); // Class
3437   Node* obj    = argument(1);
3438   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3439   if (mirror_con == NULL) {
3440     return false;  // dead path (mirror->is_top()).
3441   }
3442   if (obj == NULL || obj->is_top()) {
3443     return false;  // dead path
3444   }
3445   const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3446 
3447   // First, see if Class.cast() can be folded statically.
3448   // java_mirror_type() returns non-null for compile-time Class constants.
3449   ciType* tm = mirror_con->java_mirror_type();
3450   if (tm != NULL && tm->is_klass() &&
3451       tp != NULL && tp->klass() != NULL) {
3452     if (!tp->klass()->is_loaded()) {
3453       // Don't use intrinsic when class is not loaded.
3454       return false;
3455     } else {
3456       int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
3457       if (static_res == Compile::SSC_always_true) {
3458         // isInstance() is true - fold the code.
3459         set_result(obj);
3460         return true;
3461       } else if (static_res == Compile::SSC_always_false) {
3462         // Don't use intrinsic, have to throw ClassCastException.
3463         // If the reference is null, the non-intrinsic bytecode will
3464         // be optimized appropriately.
3465         return false;
3466       }
3467     }
3468   }
3469 
3470   // Bailout intrinsic and do normal inlining if exception path is frequent.
3471   if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3472     return false;
3473   }
3474 
3475   // Generate dynamic checks.
3476   // Class.cast() is java implementation of _checkcast bytecode.
3477   // Do checkcast (Parse::do_checkcast()) optimizations here.
3478 
3479   mirror = null_check(mirror);
3480   // If mirror is dead, only null-path is taken.
3481   if (stopped()) {
3482     return true;
3483   }
3484 
3485   // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3486   enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3487   RegionNode* region = new RegionNode(PATH_LIMIT);
3488   record_for_igvn(region);
3489 
3490   // Now load the mirror's klass metaobject, and null-check it.
3491   // If kls is null, we have a primitive mirror and
3492   // nothing is an instance of a primitive type.
3493   Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3494 
3495   Node* res = top();
3496   if (!stopped()) {
3497     Node* bad_type_ctrl = top();
3498     // Do checkcast optimizations.
3499     res = gen_checkcast(obj, kls, &bad_type_ctrl);
3500     region->init_req(_bad_type_path, bad_type_ctrl);
3501   }
3502   if (region->in(_prim_path) != top() ||
3503       region->in(_bad_type_path) != top()) {
3504     // Let Interpreter throw ClassCastException.
3505     PreserveJVMState pjvms(this);
3506     set_control(_gvn.transform(region));
3507     uncommon_trap(Deoptimization::Reason_intrinsic,
3508                   Deoptimization::Action_maybe_recompile);
3509   }
3510   if (!stopped()) {
3511     set_result(res);
3512   }
3513   return true;
3514 }
3515 
3516 
3517 //--------------------------inline_native_subtype_check------------------------
3518 // This intrinsic takes the JNI calls out of the heart of
3519 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3520 bool LibraryCallKit::inline_native_subtype_check() {
3521   // Pull both arguments off the stack.
3522   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3523   args[0] = argument(0);
3524   args[1] = argument(1);
3525   Node* klasses[2];             // corresponding Klasses: superk, subk
3526   klasses[0] = klasses[1] = top();
3527 
3528   enum {
3529     // A full decision tree on {superc is prim, subc is prim}:
3530     _prim_0_path = 1,           // {P,N} => false
3531                                 // {P,P} & superc!=subc => false
3532     _prim_same_path,            // {P,P} & superc==subc => true
3533     _prim_1_path,               // {N,P} => false
3534     _ref_subtype_path,          // {N,N} & subtype check wins => true
3535     _both_ref_path,             // {N,N} & subtype check loses => false
3536     PATH_LIMIT
3537   };
3538 
3539   RegionNode* region = new RegionNode(PATH_LIMIT);
3540   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3541   record_for_igvn(region);
3542 
3543   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3544   const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3545   int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3546 
3547   // First null-check both mirrors and load each mirror's klass metaobject.
3548   int which_arg;
3549   for (which_arg = 0; which_arg <= 1; which_arg++) {
3550     Node* arg = args[which_arg];
3551     arg = null_check(arg);
3552     if (stopped())  break;
3553     args[which_arg] = arg;
3554 
3555     Node* p = basic_plus_adr(arg, class_klass_offset);
3556     Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3557     klasses[which_arg] = _gvn.transform(kls);
3558   }
3559 
3560   // Having loaded both klasses, test each for null.
3561   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3562   for (which_arg = 0; which_arg <= 1; which_arg++) {
3563     Node* kls = klasses[which_arg];
3564     Node* null_ctl = top();
3565     kls = null_check_oop(kls, &null_ctl, never_see_null);
3566     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3567     region->init_req(prim_path, null_ctl);
3568     if (stopped())  break;
3569     klasses[which_arg] = kls;
3570   }
3571 
3572   if (!stopped()) {
3573     // now we have two reference types, in klasses[0..1]
3574     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3575     Node* superk = klasses[0];  // the receiver
3576     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3577     // now we have a successful reference subtype check
3578     region->set_req(_ref_subtype_path, control());
3579   }
3580 
3581   // If both operands are primitive (both klasses null), then
3582   // we must return true when they are identical primitives.
3583   // It is convenient to test this after the first null klass check.
3584   set_control(region->in(_prim_0_path)); // go back to first null check
3585   if (!stopped()) {
3586     // Since superc is primitive, make a guard for the superc==subc case.
3587     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3588     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3589     generate_guard(bol_eq, region, PROB_FAIR);
3590     if (region->req() == PATH_LIMIT+1) {
3591       // A guard was added.  If the added guard is taken, superc==subc.
3592       region->swap_edges(PATH_LIMIT, _prim_same_path);
3593       region->del_req(PATH_LIMIT);
3594     }
3595     region->set_req(_prim_0_path, control()); // Not equal after all.
3596   }
3597 
3598   // these are the only paths that produce 'true':
3599   phi->set_req(_prim_same_path,   intcon(1));
3600   phi->set_req(_ref_subtype_path, intcon(1));
3601 
3602   // pull together the cases:
3603   assert(region->req() == PATH_LIMIT, "sane region");
3604   for (uint i = 1; i < region->req(); i++) {
3605     Node* ctl = region->in(i);
3606     if (ctl == NULL || ctl == top()) {
3607       region->set_req(i, top());
3608       phi   ->set_req(i, top());
3609     } else if (phi->in(i) == NULL) {
3610       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3611     }
3612   }
3613 
3614   set_control(_gvn.transform(region));
3615   set_result(_gvn.transform(phi));
3616   return true;
3617 }
3618 
3619 //---------------------generate_array_guard_common------------------------
3620 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3621                                                   bool obj_array, bool not_array) {
3622 
3623   if (stopped()) {
3624     return NULL;
3625   }
3626 
3627   // If obj_array/non_array==false/false:
3628   // Branch around if the given klass is in fact an array (either obj or prim).
3629   // If obj_array/non_array==false/true:
3630   // Branch around if the given klass is not an array klass of any kind.
3631   // If obj_array/non_array==true/true:
3632   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3633   // If obj_array/non_array==true/false:
3634   // Branch around if the kls is an oop array (Object[] or subtype)
3635   //
3636   // Like generate_guard, adds a new path onto the region.
3637   jint  layout_con = 0;
3638   Node* layout_val = get_layout_helper(kls, layout_con);
3639   if (layout_val == NULL) {
3640     bool query = (obj_array
3641                   ? Klass::layout_helper_is_objArray(layout_con)
3642                   : Klass::layout_helper_is_array(layout_con));
3643     if (query == not_array) {
3644       return NULL;                       // never a branch
3645     } else {                             // always a branch
3646       Node* always_branch = control();
3647       if (region != NULL)
3648         region->add_req(always_branch);
3649       set_control(top());
3650       return always_branch;
3651     }
3652   }
3653   // Now test the correct condition.
3654   jint  nval = (obj_array
3655                 ? ((jint)Klass::_lh_array_tag_type_value
3656                    <<    Klass::_lh_array_tag_shift)
3657                 : Klass::_lh_neutral_value);
3658   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3659   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
3660   // invert the test if we are looking for a non-array
3661   if (not_array)  btest = BoolTest(btest).negate();
3662   Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3663   return generate_fair_guard(bol, region);
3664 }
3665 
3666 
3667 //-----------------------inline_native_newArray--------------------------
3668 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3669 bool LibraryCallKit::inline_native_newArray() {
3670   Node* mirror    = argument(0);
3671   Node* count_val = argument(1);
3672 
3673   mirror = null_check(mirror);
3674   // If mirror or obj is dead, only null-path is taken.
3675   if (stopped())  return true;
3676 
3677   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3678   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3679   PhiNode*    result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3680   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
3681   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3682 
3683   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3684   Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3685                                                   result_reg, _slow_path);
3686   Node* normal_ctl   = control();
3687   Node* no_array_ctl = result_reg->in(_slow_path);
3688 
3689   // Generate code for the slow case.  We make a call to newArray().
3690   set_control(no_array_ctl);
3691   if (!stopped()) {
3692     // Either the input type is void.class, or else the
3693     // array klass has not yet been cached.  Either the
3694     // ensuing call will throw an exception, or else it
3695     // will cache the array klass for next time.
3696     PreserveJVMState pjvms(this);
3697     CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
3698     Node* slow_result = set_results_for_java_call(slow_call);
3699     // this->control() comes from set_results_for_java_call
3700     result_reg->set_req(_slow_path, control());
3701     result_val->set_req(_slow_path, slow_result);
3702     result_io ->set_req(_slow_path, i_o());
3703     result_mem->set_req(_slow_path, reset_memory());
3704   }
3705 
3706   set_control(normal_ctl);
3707   if (!stopped()) {
3708     // Normal case:  The array type has been cached in the java.lang.Class.
3709     // The following call works fine even if the array type is polymorphic.
3710     // It could be a dynamic mix of int[], boolean[], Object[], etc.
3711     Node* obj = new_array(klass_node, count_val, 0);  // no arguments to push
3712     result_reg->init_req(_normal_path, control());
3713     result_val->init_req(_normal_path, obj);
3714     result_io ->init_req(_normal_path, i_o());
3715     result_mem->init_req(_normal_path, reset_memory());
3716   }
3717 
3718   // Return the combined state.
3719   set_i_o(        _gvn.transform(result_io)  );
3720   set_all_memory( _gvn.transform(result_mem));
3721 
3722   C->set_has_split_ifs(true); // Has chance for split-if optimization
3723   set_result(result_reg, result_val);
3724   return true;
3725 }
3726 
3727 //----------------------inline_native_getLength--------------------------
3728 // public static native int java.lang.reflect.Array.getLength(Object array);
3729 bool LibraryCallKit::inline_native_getLength() {
3730   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3731 
3732   Node* array = null_check(argument(0));
3733   // If array is dead, only null-path is taken.
3734   if (stopped())  return true;
3735 
3736   // Deoptimize if it is a non-array.
3737   Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
3738 
3739   if (non_array != NULL) {
3740     PreserveJVMState pjvms(this);
3741     set_control(non_array);
3742     uncommon_trap(Deoptimization::Reason_intrinsic,
3743                   Deoptimization::Action_maybe_recompile);
3744   }
3745 
3746   // If control is dead, only non-array-path is taken.
3747   if (stopped())  return true;
3748 
3749   // The works fine even if the array type is polymorphic.
3750   // It could be a dynamic mix of int[], boolean[], Object[], etc.
3751   Node* result = load_array_length(array);
3752 
3753   C->set_has_split_ifs(true);  // Has chance for split-if optimization
3754   set_result(result);
3755   return true;
3756 }
3757 
3758 //------------------------inline_array_copyOf----------------------------
3759 // public static <T,U> T[] java.util.Arrays.copyOf(     U[] original, int newLength,         Class<? extends T[]> newType);
3760 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from,      int to, Class<? extends T[]> newType);
3761 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3762   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3763 
3764   // Get the arguments.
3765   Node* original          = argument(0);
3766   Node* start             = is_copyOfRange? argument(1): intcon(0);
3767   Node* end               = is_copyOfRange? argument(2): argument(1);
3768   Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3769 
3770   Node* newcopy = NULL;
3771 
3772   // Set the original stack and the reexecute bit for the interpreter to reexecute
3773   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3774   { PreserveReexecuteState preexecs(this);
3775     jvms()->set_should_reexecute(true);
3776 
3777     array_type_mirror = null_check(array_type_mirror);
3778     original          = null_check(original);
3779 
3780     // Check if a null path was taken unconditionally.
3781     if (stopped())  return true;
3782 
3783     Node* orig_length = load_array_length(original);
3784 
3785     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3786     klass_node = null_check(klass_node);
3787 
3788     RegionNode* bailout = new RegionNode(1);
3789     record_for_igvn(bailout);
3790 
3791     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3792     // Bail out if that is so.
3793     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3794     if (not_objArray != NULL) {
3795       // Improve the klass node's type from the new optimistic assumption:
3796       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3797       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3798       Node* cast = new CastPPNode(klass_node, akls);
3799       cast->init_req(0, control());
3800       klass_node = _gvn.transform(cast);
3801     }
3802 
3803     // Bail out if either start or end is negative.
3804     generate_negative_guard(start, bailout, &start);
3805     generate_negative_guard(end,   bailout, &end);
3806 
3807     Node* length = end;
3808     if (_gvn.type(start) != TypeInt::ZERO) {
3809       length = _gvn.transform(new SubINode(end, start));
3810     }
3811 
3812     // Bail out if length is negative.
3813     // Without this the new_array would throw
3814     // NegativeArraySizeException but IllegalArgumentException is what
3815     // should be thrown
3816     generate_negative_guard(length, bailout, &length);
3817 
3818     if (bailout->req() > 1) {
3819       PreserveJVMState pjvms(this);
3820       set_control(_gvn.transform(bailout));
3821       uncommon_trap(Deoptimization::Reason_intrinsic,
3822                     Deoptimization::Action_maybe_recompile);
3823     }
3824 
3825     if (!stopped()) {
3826       // How many elements will we copy from the original?
3827       // The answer is MinI(orig_length - start, length).
3828       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3829       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3830 
3831       // Generate a direct call to the right arraycopy function(s).
3832       // We know the copy is disjoint but we might not know if the
3833       // oop stores need checking.
3834       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3835       // This will fail a store-check if x contains any non-nulls.
3836 
3837       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3838       // loads/stores but it is legal only if we're sure the
3839       // Arrays.copyOf would succeed. So we need all input arguments
3840       // to the copyOf to be validated, including that the copy to the
3841       // new array won't trigger an ArrayStoreException. That subtype
3842       // check can be optimized if we know something on the type of
3843       // the input array from type speculation.
3844       if (_gvn.type(klass_node)->singleton()) {
3845         ciKlass* subk   = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
3846         ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3847 
3848         int test = C->static_subtype_check(superk, subk);
3849         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3850           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3851           if (t_original->speculative_type() != NULL) {
3852             original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3853           }
3854         }
3855       }
3856 
3857       bool validated = false;
3858       // Reason_class_check rather than Reason_intrinsic because we
3859       // want to intrinsify even if this traps.
3860       if (!too_many_traps(Deoptimization::Reason_class_check)) {
3861         Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original),
3862                                                    klass_node);
3863 
3864         if (not_subtype_ctrl != top()) {
3865           PreserveJVMState pjvms(this);
3866           set_control(not_subtype_ctrl);
3867           uncommon_trap(Deoptimization::Reason_class_check,
3868                         Deoptimization::Action_make_not_entrant);
3869           assert(stopped(), "Should be stopped");
3870         }
3871         validated = true;
3872       }
3873 
3874       if (!stopped()) {
3875         newcopy = new_array(klass_node, length, 0);  // no arguments to push
3876 
3877         ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true,
3878                                                 load_object_klass(original), klass_node);
3879         if (!is_copyOfRange) {
3880           ac->set_copyof(validated);
3881         } else {
3882           ac->set_copyofrange(validated);
3883         }
3884         Node* n = _gvn.transform(ac);
3885         if (n == ac) {
3886           ac->connect_outputs(this);
3887         } else {
3888           assert(validated, "shouldn't transform if all arguments not validated");
3889           set_all_memory(n);
3890         }
3891       }
3892     }
3893   } // original reexecute is set back here
3894 
3895   C->set_has_split_ifs(true); // Has chance for split-if optimization
3896   if (!stopped()) {
3897     set_result(newcopy);
3898   }
3899   return true;
3900 }
3901 
3902 
3903 //----------------------generate_virtual_guard---------------------------
3904 // Helper for hashCode and clone.  Peeks inside the vtable to avoid a call.
3905 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3906                                              RegionNode* slow_region) {
3907   ciMethod* method = callee();
3908   int vtable_index = method->vtable_index();
3909   assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3910          "bad index %d", vtable_index);
3911   // Get the Method* out of the appropriate vtable entry.
3912   int entry_offset  = (InstanceKlass::vtable_start_offset() +
3913                      vtable_index*vtableEntry::size()) * wordSize +
3914                      vtableEntry::method_offset_in_bytes();
3915   Node* entry_addr  = basic_plus_adr(obj_klass, entry_offset);
3916   Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3917 
3918   // Compare the target method with the expected method (e.g., Object.hashCode).
3919   const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3920 
3921   Node* native_call = makecon(native_call_addr);
3922   Node* chk_native  = _gvn.transform(new CmpPNode(target_call, native_call));
3923   Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
3924 
3925   return generate_slow_guard(test_native, slow_region);
3926 }
3927 
3928 //-----------------------generate_method_call----------------------------
3929 // Use generate_method_call to make a slow-call to the real
3930 // method if the fast path fails.  An alternative would be to
3931 // use a stub like OptoRuntime::slow_arraycopy_Java.
3932 // This only works for expanding the current library call,
3933 // not another intrinsic.  (E.g., don't use this for making an
3934 // arraycopy call inside of the copyOf intrinsic.)
3935 CallJavaNode*
3936 LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
3937   // When compiling the intrinsic method itself, do not use this technique.
3938   guarantee(callee() != C->method(), "cannot make slow-call to self");
3939 
3940   ciMethod* method = callee();
3941   // ensure the JVMS we have will be correct for this call
3942   guarantee(method_id == method->intrinsic_id(), "must match");
3943 
3944   const TypeFunc* tf = TypeFunc::make(method);
3945   CallJavaNode* slow_call;
3946   if (is_static) {
3947     assert(!is_virtual, "");
3948     slow_call = new CallStaticJavaNode(C, tf,
3949                            SharedRuntime::get_resolve_static_call_stub(),
3950                            method, bci());
3951   } else if (is_virtual) {
3952     null_check_receiver();
3953     int vtable_index = Method::invalid_vtable_index;
3954     if (UseInlineCaches) {
3955       // Suppress the vtable call
3956     } else {
3957       // hashCode and clone are not a miranda methods,
3958       // so the vtable index is fixed.
3959       // No need to use the linkResolver to get it.
3960        vtable_index = method->vtable_index();
3961        assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3962               "bad index %d", vtable_index);
3963     }
3964     slow_call = new CallDynamicJavaNode(tf,
3965                           SharedRuntime::get_resolve_virtual_call_stub(),
3966                           method, vtable_index, bci());
3967   } else {  // neither virtual nor static:  opt_virtual
3968     null_check_receiver();
3969     slow_call = new CallStaticJavaNode(C, tf,
3970                                 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3971                                 method, bci());
3972     slow_call->set_optimized_virtual(true);
3973   }
3974   set_arguments_for_java_call(slow_call);
3975   set_edges_for_java_call(slow_call);
3976   return slow_call;
3977 }
3978 
3979 
3980 /**
3981  * Build special case code for calls to hashCode on an object. This call may
3982  * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
3983  * slightly different code.
3984  */
3985 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3986   assert(is_static == callee()->is_static(), "correct intrinsic selection");
3987   assert(!(is_virtual && is_static), "either virtual, special, or static");
3988 
3989   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3990 
3991   RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3992   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
3993   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
3994   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3995   Node* obj = NULL;
3996   if (!is_static) {
3997     // Check for hashing null object
3998     obj = null_check_receiver();
3999     if (stopped())  return true;        // unconditionally null
4000     result_reg->init_req(_null_path, top());
4001     result_val->init_req(_null_path, top());
4002   } else {
4003     // Do a null check, and return zero if null.
4004     // System.identityHashCode(null) == 0
4005     obj = argument(0);
4006     Node* null_ctl = top();
4007     obj = null_check_oop(obj, &null_ctl);
4008     result_reg->init_req(_null_path, null_ctl);
4009     result_val->init_req(_null_path, _gvn.intcon(0));
4010   }
4011 
4012   // Unconditionally null?  Then return right away.
4013   if (stopped()) {
4014     set_control( result_reg->in(_null_path));
4015     if (!stopped())
4016       set_result(result_val->in(_null_path));
4017     return true;
4018   }
4019 
4020   // We only go to the fast case code if we pass a number of guards.  The
4021   // paths which do not pass are accumulated in the slow_region.
4022   RegionNode* slow_region = new RegionNode(1);
4023   record_for_igvn(slow_region);
4024 
4025   // If this is a virtual call, we generate a funny guard.  We pull out
4026   // the vtable entry corresponding to hashCode() from the target object.
4027   // If the target method which we are calling happens to be the native
4028   // Object hashCode() method, we pass the guard.  We do not need this
4029   // guard for non-virtual calls -- the caller is known to be the native
4030   // Object hashCode().
4031   if (is_virtual) {
4032     // After null check, get the object's klass.
4033     Node* obj_klass = load_object_klass(obj);
4034     generate_virtual_guard(obj_klass, slow_region);
4035   }
4036 
4037   // Get the header out of the object, use LoadMarkNode when available
4038   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4039   // The control of the load must be NULL. Otherwise, the load can move before
4040   // the null check after castPP removal.
4041   Node* no_ctrl = NULL;
4042   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4043 
4044   // Test the header to see if it is unlocked.
4045   Node *lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
4046   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4047   Node *unlocked_val   = _gvn.MakeConX(markOopDesc::unlocked_value);
4048   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4049   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4050 
4051   generate_slow_guard(test_unlocked, slow_region);
4052 
4053   // Get the hash value and check to see that it has been properly assigned.
4054   // We depend on hash_mask being at most 32 bits and avoid the use of
4055   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4056   // vm: see markOop.hpp.
4057   Node *hash_mask      = _gvn.intcon(markOopDesc::hash_mask);
4058   Node *hash_shift     = _gvn.intcon(markOopDesc::hash_shift);
4059   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4060   // This hack lets the hash bits live anywhere in the mark object now, as long
4061   // as the shift drops the relevant bits into the low 32 bits.  Note that
4062   // Java spec says that HashCode is an int so there's no point in capturing
4063   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4064   hshifted_header      = ConvX2I(hshifted_header);
4065   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4066 
4067   Node *no_hash_val    = _gvn.intcon(markOopDesc::no_hash);
4068   Node *chk_assigned   = _gvn.transform(new CmpINode( hash_val, no_hash_val));
4069   Node *test_assigned  = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
4070 
4071   generate_slow_guard(test_assigned, slow_region);
4072 
4073   Node* init_mem = reset_memory();
4074   // fill in the rest of the null path:
4075   result_io ->init_req(_null_path, i_o());
4076   result_mem->init_req(_null_path, init_mem);
4077 
4078   result_val->init_req(_fast_path, hash_val);
4079   result_reg->init_req(_fast_path, control());
4080   result_io ->init_req(_fast_path, i_o());
4081   result_mem->init_req(_fast_path, init_mem);
4082 
4083   // Generate code for the slow case.  We make a call to hashCode().
4084   set_control(_gvn.transform(slow_region));
4085   if (!stopped()) {
4086     // No need for PreserveJVMState, because we're using up the present state.
4087     set_all_memory(init_mem);
4088     vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4089     CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
4090     Node* slow_result = set_results_for_java_call(slow_call);
4091     // this->control() comes from set_results_for_java_call
4092     result_reg->init_req(_slow_path, control());
4093     result_val->init_req(_slow_path, slow_result);
4094     result_io  ->set_req(_slow_path, i_o());
4095     result_mem ->set_req(_slow_path, reset_memory());
4096   }
4097 
4098   // Return the combined state.
4099   set_i_o(        _gvn.transform(result_io)  );
4100   set_all_memory( _gvn.transform(result_mem));
4101 
4102   set_result(result_reg, result_val);
4103   return true;
4104 }
4105 
4106 //---------------------------inline_native_getClass----------------------------
4107 // public final native Class<?> java.lang.Object.getClass();
4108 //
4109 // Build special case code for calls to getClass on an object.
4110 bool LibraryCallKit::inline_native_getClass() {
4111   Node* obj = null_check_receiver();
4112   if (stopped())  return true;
4113   set_result(load_mirror_from_klass(load_object_klass(obj)));
4114   return true;
4115 }
4116 
4117 //-----------------inline_native_Reflection_getCallerClass---------------------
4118 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4119 //
4120 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4121 //
4122 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4123 // in that it must skip particular security frames and checks for
4124 // caller sensitive methods.
4125 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4126 #ifndef PRODUCT
4127   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4128     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4129   }
4130 #endif
4131 
4132   if (!jvms()->has_method()) {
4133 #ifndef PRODUCT
4134     if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4135       tty->print_cr("  Bailing out because intrinsic was inlined at top level");
4136     }
4137 #endif
4138     return false;
4139   }
4140 
4141   // Walk back up the JVM state to find the caller at the required
4142   // depth.
4143   JVMState* caller_jvms = jvms();
4144 
4145   // Cf. JVM_GetCallerClass
4146   // NOTE: Start the loop at depth 1 because the current JVM state does
4147   // not include the Reflection.getCallerClass() frame.
4148   for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
4149     ciMethod* m = caller_jvms->method();
4150     switch (n) {
4151     case 0:
4152       fatal("current JVM state does not include the Reflection.getCallerClass frame");
4153       break;
4154     case 1:
4155       // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
4156       if (!m->caller_sensitive()) {
4157 #ifndef PRODUCT
4158         if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4159           tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
4160         }
4161 #endif
4162         return false;  // bail-out; let JVM_GetCallerClass do the work
4163       }
4164       break;
4165     default:
4166       if (!m->is_ignored_by_security_stack_walk()) {
4167         // We have reached the desired frame; return the holder class.
4168         // Acquire method holder as java.lang.Class and push as constant.
4169         ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
4170         ciInstance* caller_mirror = caller_klass->java_mirror();
4171         set_result(makecon(TypeInstPtr::make(caller_mirror)));
4172 
4173 #ifndef PRODUCT
4174         if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4175           tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
4176           tty->print_cr("  JVM state at this point:");
4177           for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4178             ciMethod* m = jvms()->of_depth(i)->method();
4179             tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4180           }
4181         }
4182 #endif
4183         return true;
4184       }
4185       break;
4186     }
4187   }
4188 
4189 #ifndef PRODUCT
4190   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4191     tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4192     tty->print_cr("  JVM state at this point:");
4193     for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4194       ciMethod* m = jvms()->of_depth(i)->method();
4195       tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4196     }
4197   }
4198 #endif
4199 
4200   return false;  // bail-out; let JVM_GetCallerClass do the work
4201 }
4202 
4203 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4204   Node* arg = argument(0);
4205   Node* result = NULL;
4206 
4207   switch (id) {
4208   case vmIntrinsics::_floatToRawIntBits:    result = new MoveF2INode(arg);  break;
4209   case vmIntrinsics::_intBitsToFloat:       result = new MoveI2FNode(arg);  break;
4210   case vmIntrinsics::_doubleToRawLongBits:  result = new MoveD2LNode(arg);  break;
4211   case vmIntrinsics::_longBitsToDouble:     result = new MoveL2DNode(arg);  break;
4212 
4213   case vmIntrinsics::_doubleToLongBits: {
4214     // two paths (plus control) merge in a wood
4215     RegionNode *r = new RegionNode(3);
4216     Node *phi = new PhiNode(r, TypeLong::LONG);
4217 
4218     Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
4219     // Build the boolean node
4220     Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4221 
4222     // Branch either way.
4223     // NaN case is less traveled, which makes all the difference.
4224     IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4225     Node *opt_isnan = _gvn.transform(ifisnan);
4226     assert( opt_isnan->is_If(), "Expect an IfNode");
4227     IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4228     Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4229 
4230     set_control(iftrue);
4231 
4232     static const jlong nan_bits = CONST64(0x7ff8000000000000);
4233     Node *slow_result = longcon(nan_bits); // return NaN
4234     phi->init_req(1, _gvn.transform( slow_result ));
4235     r->init_req(1, iftrue);
4236 
4237     // Else fall through
4238     Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4239     set_control(iffalse);
4240 
4241     phi->init_req(2, _gvn.transform(new MoveD2LNode(arg)));
4242     r->init_req(2, iffalse);
4243 
4244     // Post merge
4245     set_control(_gvn.transform(r));
4246     record_for_igvn(r);
4247 
4248     C->set_has_split_ifs(true); // Has chance for split-if optimization
4249     result = phi;
4250     assert(result->bottom_type()->isa_long(), "must be");
4251     break;
4252   }
4253 
4254   case vmIntrinsics::_floatToIntBits: {
4255     // two paths (plus control) merge in a wood
4256     RegionNode *r = new RegionNode(3);
4257     Node *phi = new PhiNode(r, TypeInt::INT);
4258 
4259     Node *cmpisnan = _gvn.transform(new CmpFNode(arg, arg));
4260     // Build the boolean node
4261     Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4262 
4263     // Branch either way.
4264     // NaN case is less traveled, which makes all the difference.
4265     IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4266     Node *opt_isnan = _gvn.transform(ifisnan);
4267     assert( opt_isnan->is_If(), "Expect an IfNode");
4268     IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4269     Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4270 
4271     set_control(iftrue);
4272 
4273     static const jint nan_bits = 0x7fc00000;
4274     Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4275     phi->init_req(1, _gvn.transform( slow_result ));
4276     r->init_req(1, iftrue);
4277 
4278     // Else fall through
4279     Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4280     set_control(iffalse);
4281 
4282     phi->init_req(2, _gvn.transform(new MoveF2INode(arg)));
4283     r->init_req(2, iffalse);
4284 
4285     // Post merge
4286     set_control(_gvn.transform(r));
4287     record_for_igvn(r);
4288 
4289     C->set_has_split_ifs(true); // Has chance for split-if optimization
4290     result = phi;
4291     assert(result->bottom_type()->isa_int(), "must be");
4292     break;
4293   }
4294 
4295   default:
4296     fatal_unexpected_iid(id);
4297     break;
4298   }
4299   set_result(_gvn.transform(result));
4300   return true;
4301 }
4302 
4303 //----------------------inline_unsafe_copyMemory-------------------------
4304 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4305 bool LibraryCallKit::inline_unsafe_copyMemory() {
4306   if (callee()->is_static())  return false;  // caller must have the capability!
4307   null_check_receiver();  // null-check receiver
4308   if (stopped())  return true;
4309 
4310   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
4311 
4312   Node* src_ptr =         argument(1);   // type: oop
4313   Node* src_off = ConvL2X(argument(2));  // type: long
4314   Node* dst_ptr =         argument(4);   // type: oop
4315   Node* dst_off = ConvL2X(argument(5));  // type: long
4316   Node* size    = ConvL2X(argument(7));  // type: long
4317 
4318   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4319          "fieldOffset must be byte-scaled");
4320 
4321   Node* src = make_unsafe_address(src_ptr, src_off);
4322   Node* dst = make_unsafe_address(dst_ptr, dst_off);
4323 
4324   // Conservatively insert a memory barrier on all memory slices.
4325   // Do not let writes of the copy source or destination float below the copy.
4326   insert_mem_bar(Op_MemBarCPUOrder);
4327 
4328   // Call it.  Note that the length argument is not scaled.
4329   make_runtime_call(RC_LEAF|RC_NO_FP,
4330                     OptoRuntime::fast_arraycopy_Type(),
4331                     StubRoutines::unsafe_arraycopy(),
4332                     "unsafe_arraycopy",
4333                     TypeRawPtr::BOTTOM,
4334                     src, dst, size XTOP);
4335 
4336   // Do not let reads of the copy destination float above the copy.
4337   insert_mem_bar(Op_MemBarCPUOrder);
4338 
4339   return true;
4340 }
4341 
4342 //------------------------clone_coping-----------------------------------
4343 // Helper function for inline_native_clone.
4344 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4345   assert(obj_size != NULL, "");
4346   Node* raw_obj = alloc_obj->in(1);
4347   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4348 
4349   AllocateNode* alloc = NULL;
4350   if (ReduceBulkZeroing) {
4351     // We will be completely responsible for initializing this object -
4352     // mark Initialize node as complete.
4353     alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4354     // The object was just allocated - there should be no any stores!
4355     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4356     // Mark as complete_with_arraycopy so that on AllocateNode
4357     // expansion, we know this AllocateNode is initialized by an array
4358     // copy and a StoreStore barrier exists after the array copy.
4359     alloc->initialization()->set_complete_with_arraycopy();
4360   }
4361 
4362   // Copy the fastest available way.
4363   // TODO: generate fields copies for small objects instead.
4364   Node* src  = obj;
4365   Node* dest = alloc_obj;
4366   Node* size = _gvn.transform(obj_size);
4367 
4368   // Exclude the header but include array length to copy by 8 bytes words.
4369   // Can't use base_offset_in_bytes(bt) since basic type is unknown.
4370   int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
4371                             instanceOopDesc::base_offset_in_bytes();
4372   // base_off:
4373   // 8  - 32-bit VM
4374   // 12 - 64-bit VM, compressed klass
4375   // 16 - 64-bit VM, normal klass
4376   if (base_off % BytesPerLong != 0) {
4377     assert(UseCompressedClassPointers, "");
4378     if (is_array) {
4379       // Exclude length to copy by 8 bytes words.
4380       base_off += sizeof(int);
4381     } else {
4382       // Include klass to copy by 8 bytes words.
4383       base_off = instanceOopDesc::klass_offset_in_bytes();
4384     }
4385     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4386   }
4387   src  = basic_plus_adr(src,  base_off);
4388   dest = basic_plus_adr(dest, base_off);
4389 
4390   // Compute the length also, if needed:
4391   Node* countx = size;
4392   countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
4393   countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong) ));
4394 
4395   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4396 
4397   ArrayCopyNode* ac = ArrayCopyNode::make(this, false, src, NULL, dest, NULL, countx, false);
4398   ac->set_clonebasic();
4399   Node* n = _gvn.transform(ac);
4400   if (n == ac) {
4401     set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
4402   } else {
4403     set_all_memory(n);
4404   }
4405 
4406   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4407   if (card_mark) {
4408     assert(!is_array, "");
4409     // Put in store barrier for any and all oops we are sticking
4410     // into this object.  (We could avoid this if we could prove
4411     // that the object type contains no oop fields at all.)
4412     Node* no_particular_value = NULL;
4413     Node* no_particular_field = NULL;
4414     int raw_adr_idx = Compile::AliasIdxRaw;
4415     post_barrier(control(),
4416                  memory(raw_adr_type),
4417                  alloc_obj,
4418                  no_particular_field,
4419                  raw_adr_idx,
4420                  no_particular_value,
4421                  T_OBJECT,
4422                  false);
4423   }
4424 
4425   // Do not let reads from the cloned object float above the arraycopy.
4426   if (alloc != NULL) {
4427     // Do not let stores that initialize this object be reordered with
4428     // a subsequent store that would make this object accessible by
4429     // other threads.
4430     // Record what AllocateNode this StoreStore protects so that
4431     // escape analysis can go from the MemBarStoreStoreNode to the
4432     // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4433     // based on the escape status of the AllocateNode.
4434     insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
4435   } else {
4436     insert_mem_bar(Op_MemBarCPUOrder);
4437   }
4438 }
4439 
4440 //------------------------inline_native_clone----------------------------
4441 // protected native Object java.lang.Object.clone();
4442 //
4443 // Here are the simple edge cases:
4444 //  null receiver => normal trap
4445 //  virtual and clone was overridden => slow path to out-of-line clone
4446 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4447 //
4448 // The general case has two steps, allocation and copying.
4449 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4450 //
4451 // Copying also has two cases, oop arrays and everything else.
4452 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4453 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4454 //
4455 // These steps fold up nicely if and when the cloned object's klass
4456 // can be sharply typed as an object array, a type array, or an instance.
4457 //
4458 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4459   PhiNode* result_val;
4460 
4461   // Set the reexecute bit for the interpreter to reexecute
4462   // the bytecode that invokes Object.clone if deoptimization happens.
4463   { PreserveReexecuteState preexecs(this);
4464     jvms()->set_should_reexecute(true);
4465 
4466     Node* obj = null_check_receiver();
4467     if (stopped())  return true;
4468 
4469     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4470 
4471     // If we are going to clone an instance, we need its exact type to
4472     // know the number and types of fields to convert the clone to
4473     // loads/stores. Maybe a speculative type can help us.
4474     if (!obj_type->klass_is_exact() &&
4475         obj_type->speculative_type() != NULL &&
4476         obj_type->speculative_type()->is_instance_klass()) {
4477       ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4478       if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4479           !spec_ik->has_injected_fields()) {
4480         ciKlass* k = obj_type->klass();
4481         if (!k->is_instance_klass() ||
4482             k->as_instance_klass()->is_interface() ||
4483             k->as_instance_klass()->has_subklass()) {
4484           obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4485         }
4486       }
4487     }
4488 
4489     Node* obj_klass = load_object_klass(obj);
4490     const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4491     const TypeOopPtr*   toop   = ((tklass != NULL)
4492                                 ? tklass->as_instance_type()
4493                                 : TypeInstPtr::NOTNULL);
4494 
4495     // Conservatively insert a memory barrier on all memory slices.
4496     // Do not let writes into the original float below the clone.
4497     insert_mem_bar(Op_MemBarCPUOrder);
4498 
4499     // paths into result_reg:
4500     enum {
4501       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4502       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4503       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4504       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4505       PATH_LIMIT
4506     };
4507     RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4508     result_val             = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4509     PhiNode*    result_i_o = new PhiNode(result_reg, Type::ABIO);
4510     PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4511     record_for_igvn(result_reg);
4512 
4513     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4514     int raw_adr_idx = Compile::AliasIdxRaw;
4515 
4516     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4517     if (array_ctl != NULL) {
4518       // It's an array.
4519       PreserveJVMState pjvms(this);
4520       set_control(array_ctl);
4521       Node* obj_length = load_array_length(obj);
4522       Node* obj_size  = NULL;
4523       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
4524 
4525       if (!use_ReduceInitialCardMarks()) {
4526         // If it is an oop array, it requires very special treatment,
4527         // because card marking is required on each card of the array.
4528         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4529         if (is_obja != NULL) {
4530           PreserveJVMState pjvms2(this);
4531           set_control(is_obja);
4532           // Generate a direct call to the right arraycopy function(s).
4533           Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4534           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL);
4535           ac->set_cloneoop();
4536           Node* n = _gvn.transform(ac);
4537           assert(n == ac, "cannot disappear");
4538           ac->connect_outputs(this);
4539 
4540           result_reg->init_req(_objArray_path, control());
4541           result_val->init_req(_objArray_path, alloc_obj);
4542           result_i_o ->set_req(_objArray_path, i_o());
4543           result_mem ->set_req(_objArray_path, reset_memory());
4544         }
4545       }
4546       // Otherwise, there are no card marks to worry about.
4547       // (We can dispense with card marks if we know the allocation
4548       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4549       //  causes the non-eden paths to take compensating steps to
4550       //  simulate a fresh allocation, so that no further
4551       //  card marks are required in compiled code to initialize
4552       //  the object.)
4553 
4554       if (!stopped()) {
4555         copy_to_clone(obj, alloc_obj, obj_size, true, false);
4556 
4557         // Present the results of the copy.
4558         result_reg->init_req(_array_path, control());
4559         result_val->init_req(_array_path, alloc_obj);
4560         result_i_o ->set_req(_array_path, i_o());
4561         result_mem ->set_req(_array_path, reset_memory());
4562       }
4563     }
4564 
4565     // We only go to the instance fast case code if we pass a number of guards.
4566     // The paths which do not pass are accumulated in the slow_region.
4567     RegionNode* slow_region = new RegionNode(1);
4568     record_for_igvn(slow_region);
4569     if (!stopped()) {
4570       // It's an instance (we did array above).  Make the slow-path tests.
4571       // If this is a virtual call, we generate a funny guard.  We grab
4572       // the vtable entry corresponding to clone() from the target object.
4573       // If the target method which we are calling happens to be the
4574       // Object clone() method, we pass the guard.  We do not need this
4575       // guard for non-virtual calls; the caller is known to be the native
4576       // Object clone().
4577       if (is_virtual) {
4578         generate_virtual_guard(obj_klass, slow_region);
4579       }
4580 
4581       // The object must be cloneable and must not have a finalizer.
4582       // Both of these conditions may be checked in a single test.
4583       // We could optimize the cloneable test further, but we don't care.
4584       generate_access_flags_guard(obj_klass,
4585                                   // Test both conditions:
4586                                   JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
4587                                   // Must be cloneable but not finalizer:
4588                                   JVM_ACC_IS_CLONEABLE,
4589                                   slow_region);
4590     }
4591 
4592     if (!stopped()) {
4593       // It's an instance, and it passed the slow-path tests.
4594       PreserveJVMState pjvms(this);
4595       Node* obj_size  = NULL;
4596       // Need to deoptimize on exception from allocation since Object.clone intrinsic
4597       // is reexecuted if deoptimization occurs and there could be problems when merging
4598       // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
4599       Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
4600 
4601       copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
4602 
4603       // Present the results of the slow call.
4604       result_reg->init_req(_instance_path, control());
4605       result_val->init_req(_instance_path, alloc_obj);
4606       result_i_o ->set_req(_instance_path, i_o());
4607       result_mem ->set_req(_instance_path, reset_memory());
4608     }
4609 
4610     // Generate code for the slow case.  We make a call to clone().
4611     set_control(_gvn.transform(slow_region));
4612     if (!stopped()) {
4613       PreserveJVMState pjvms(this);
4614       CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4615       Node* slow_result = set_results_for_java_call(slow_call);
4616       // this->control() comes from set_results_for_java_call
4617       result_reg->init_req(_slow_path, control());
4618       result_val->init_req(_slow_path, slow_result);
4619       result_i_o ->set_req(_slow_path, i_o());
4620       result_mem ->set_req(_slow_path, reset_memory());
4621     }
4622 
4623     // Return the combined state.
4624     set_control(    _gvn.transform(result_reg));
4625     set_i_o(        _gvn.transform(result_i_o));
4626     set_all_memory( _gvn.transform(result_mem));
4627   } // original reexecute is set back here
4628 
4629   set_result(_gvn.transform(result_val));
4630   return true;
4631 }
4632 
4633 // If we have a tighly coupled allocation, the arraycopy may take care
4634 // of the array initialization. If one of the guards we insert between
4635 // the allocation and the arraycopy causes a deoptimization, an
4636 // unitialized array will escape the compiled method. To prevent that
4637 // we set the JVM state for uncommon traps between the allocation and
4638 // the arraycopy to the state before the allocation so, in case of
4639 // deoptimization, we'll reexecute the allocation and the
4640 // initialization.
4641 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
4642   if (alloc != NULL) {
4643     ciMethod* trap_method = alloc->jvms()->method();
4644     int trap_bci = alloc->jvms()->bci();
4645 
4646     if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &
4647           !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
4648       // Make sure there's no store between the allocation and the
4649       // arraycopy otherwise visible side effects could be rexecuted
4650       // in case of deoptimization and cause incorrect execution.
4651       bool no_interfering_store = true;
4652       Node* mem = alloc->in(TypeFunc::Memory);
4653       if (mem->is_MergeMem()) {
4654         for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
4655           Node* n = mms.memory();
4656           if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4657             assert(n->is_Store(), "what else?");
4658             no_interfering_store = false;
4659             break;
4660           }
4661         }
4662       } else {
4663         for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
4664           Node* n = mms.memory();
4665           if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4666             assert(n->is_Store(), "what else?");
4667             no_interfering_store = false;
4668             break;
4669           }
4670         }
4671       }
4672 
4673       if (no_interfering_store) {
4674         JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
4675         uint size = alloc->req();
4676         SafePointNode* sfpt = new SafePointNode(size, old_jvms);
4677         old_jvms->set_map(sfpt);
4678         for (uint i = 0; i < size; i++) {
4679           sfpt->init_req(i, alloc->in(i));
4680         }
4681         // re-push array length for deoptimization
4682         sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
4683         old_jvms->set_sp(old_jvms->sp()+1);
4684         old_jvms->set_monoff(old_jvms->monoff()+1);
4685         old_jvms->set_scloff(old_jvms->scloff()+1);
4686         old_jvms->set_endoff(old_jvms->endoff()+1);
4687         old_jvms->set_should_reexecute(true);
4688 
4689         sfpt->set_i_o(map()->i_o());
4690         sfpt->set_memory(map()->memory());
4691         sfpt->set_control(map()->control());
4692 
4693         JVMState* saved_jvms = jvms();
4694         saved_reexecute_sp = _reexecute_sp;
4695 
4696         set_jvms(sfpt->jvms());
4697         _reexecute_sp = jvms()->sp();
4698 
4699         return saved_jvms;
4700       }
4701     }
4702   }
4703   return NULL;
4704 }
4705 
4706 // In case of a deoptimization, we restart execution at the
4707 // allocation, allocating a new array. We would leave an uninitialized
4708 // array in the heap that GCs wouldn't expect. Move the allocation
4709 // after the traps so we don't allocate the array if we
4710 // deoptimize. This is possible because tightly_coupled_allocation()
4711 // guarantees there's no observer of the allocated array at this point
4712 // and the control flow is simple enough.
4713 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp) {
4714   if (saved_jvms != NULL && !stopped()) {
4715     assert(alloc != NULL, "only with a tightly coupled allocation");
4716     // restore JVM state to the state at the arraycopy
4717     saved_jvms->map()->set_control(map()->control());
4718     assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4719     assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4720     // If we've improved the types of some nodes (null check) while
4721     // emitting the guards, propagate them to the current state
4722     map()->replaced_nodes().apply(saved_jvms->map());
4723     set_jvms(saved_jvms);
4724     _reexecute_sp = saved_reexecute_sp;
4725 
4726     // Remove the allocation from above the guards
4727     CallProjections callprojs;
4728     alloc->extract_projections(&callprojs, true);
4729     InitializeNode* init = alloc->initialization();
4730     Node* alloc_mem = alloc->in(TypeFunc::Memory);
4731     C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4732     C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4733     C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4734 
4735     // move the allocation here (after the guards)
4736     _gvn.hash_delete(alloc);
4737     alloc->set_req(TypeFunc::Control, control());
4738     alloc->set_req(TypeFunc::I_O, i_o());
4739     Node *mem = reset_memory();
4740     set_all_memory(mem);
4741     alloc->set_req(TypeFunc::Memory, mem);
4742     set_control(init->proj_out(TypeFunc::Control));
4743     set_i_o(callprojs.fallthrough_ioproj);
4744 
4745     // Update memory as done in GraphKit::set_output_for_allocation()
4746     const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4747     const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4748     if (ary_type->isa_aryptr() && length_type != NULL) {
4749       ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4750     }
4751     const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4752     int            elemidx  = C->get_alias_index(telemref);
4753     set_memory(init->proj_out(TypeFunc::Memory), Compile::AliasIdxRaw);
4754     set_memory(init->proj_out(TypeFunc::Memory), elemidx);
4755 
4756     Node* allocx = _gvn.transform(alloc);
4757     assert(allocx == alloc, "where has the allocation gone?");
4758     assert(dest->is_CheckCastPP(), "not an allocation result?");
4759 
4760     _gvn.hash_delete(dest);
4761     dest->set_req(0, control());
4762     Node* destx = _gvn.transform(dest);
4763     assert(destx == dest, "where has the allocation result gone?");
4764   }
4765 }
4766 
4767 
4768 //------------------------------inline_arraycopy-----------------------
4769 // public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
4770 //                                                      Object dest, int destPos,
4771 //                                                      int length);
4772 bool LibraryCallKit::inline_arraycopy() {
4773   // Get the arguments.
4774   Node* src         = argument(0);  // type: oop
4775   Node* src_offset  = argument(1);  // type: int
4776   Node* dest        = argument(2);  // type: oop
4777   Node* dest_offset = argument(3);  // type: int
4778   Node* length      = argument(4);  // type: int
4779 
4780 
4781   // Check for allocation before we add nodes that would confuse
4782   // tightly_coupled_allocation()
4783   AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4784 
4785   int saved_reexecute_sp = -1;
4786   JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
4787   // See arraycopy_restore_alloc_state() comment
4788   // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards
4789   // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
4790   // if saved_jvms == NULL and alloc != NULL, we can’t emit any guards
4791   bool can_emit_guards = (alloc == NULL || saved_jvms != NULL);
4792 
4793   // The following tests must be performed
4794   // (1) src and dest are arrays.
4795   // (2) src and dest arrays must have elements of the same BasicType
4796   // (3) src and dest must not be null.
4797   // (4) src_offset must not be negative.
4798   // (5) dest_offset must not be negative.
4799   // (6) length must not be negative.
4800   // (7) src_offset + length must not exceed length of src.
4801   // (8) dest_offset + length must not exceed length of dest.
4802   // (9) each element of an oop array must be assignable
4803 
4804   // (3) src and dest must not be null.
4805   // always do this here because we need the JVM state for uncommon traps
4806   Node* null_ctl = top();
4807   src  = saved_jvms != NULL ? null_check_oop(src, &null_ctl, true, true) : null_check(src,  T_ARRAY);
4808   assert(null_ctl->is_top(), "no null control here");
4809   dest = null_check(dest, T_ARRAY);
4810 
4811   if (!can_emit_guards) {
4812     // if saved_jvms == NULL and alloc != NULL, we don't emit any
4813     // guards but the arraycopy node could still take advantage of a
4814     // tightly allocated allocation. tightly_coupled_allocation() is
4815     // called again to make sure it takes the null check above into
4816     // account: the null check is mandatory and if it caused an
4817     // uncommon trap to be emitted then the allocation can't be
4818     // considered tightly coupled in this context.
4819     alloc = tightly_coupled_allocation(dest, NULL);
4820   }
4821 
4822   bool validated = false;
4823 
4824   const Type* src_type  = _gvn.type(src);
4825   const Type* dest_type = _gvn.type(dest);
4826   const TypeAryPtr* top_src  = src_type->isa_aryptr();
4827   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4828 
4829   // Do we have the type of src?
4830   bool has_src = (top_src != NULL && top_src->klass() != NULL);
4831   // Do we have the type of dest?
4832   bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4833   // Is the type for src from speculation?
4834   bool src_spec = false;
4835   // Is the type for dest from speculation?
4836   bool dest_spec = false;
4837 
4838   if ((!has_src || !has_dest) && can_emit_guards) {
4839     // We don't have sufficient type information, let's see if
4840     // speculative types can help. We need to have types for both src
4841     // and dest so that it pays off.
4842 
4843     // Do we already have or could we have type information for src
4844     bool could_have_src = has_src;
4845     // Do we already have or could we have type information for dest
4846     bool could_have_dest = has_dest;
4847 
4848     ciKlass* src_k = NULL;
4849     if (!has_src) {
4850       src_k = src_type->speculative_type_not_null();
4851       if (src_k != NULL && src_k->is_array_klass()) {
4852         could_have_src = true;
4853       }
4854     }
4855 
4856     ciKlass* dest_k = NULL;
4857     if (!has_dest) {
4858       dest_k = dest_type->speculative_type_not_null();
4859       if (dest_k != NULL && dest_k->is_array_klass()) {
4860         could_have_dest = true;
4861       }
4862     }
4863 
4864     if (could_have_src && could_have_dest) {
4865       // This is going to pay off so emit the required guards
4866       if (!has_src) {
4867         src = maybe_cast_profiled_obj(src, src_k, true);
4868         src_type  = _gvn.type(src);
4869         top_src  = src_type->isa_aryptr();
4870         has_src = (top_src != NULL && top_src->klass() != NULL);
4871         src_spec = true;
4872       }
4873       if (!has_dest) {
4874         dest = maybe_cast_profiled_obj(dest, dest_k, true);
4875         dest_type  = _gvn.type(dest);
4876         top_dest  = dest_type->isa_aryptr();
4877         has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4878         dest_spec = true;
4879       }
4880     }
4881   }
4882 
4883   if (has_src && has_dest && can_emit_guards) {
4884     BasicType src_elem  = top_src->klass()->as_array_klass()->element_type()->basic_type();
4885     BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4886     if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4887     if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4888 
4889     if (src_elem == dest_elem && src_elem == T_OBJECT) {
4890       // If both arrays are object arrays then having the exact types
4891       // for both will remove the need for a subtype check at runtime
4892       // before the call and may make it possible to pick a faster copy
4893       // routine (without a subtype check on every element)
4894       // Do we have the exact type of src?
4895       bool could_have_src = src_spec;
4896       // Do we have the exact type of dest?
4897       bool could_have_dest = dest_spec;
4898       ciKlass* src_k = top_src->klass();
4899       ciKlass* dest_k = top_dest->klass();
4900       if (!src_spec) {
4901         src_k = src_type->speculative_type_not_null();
4902         if (src_k != NULL && src_k->is_array_klass()) {
4903           could_have_src = true;
4904         }
4905       }
4906       if (!dest_spec) {
4907         dest_k = dest_type->speculative_type_not_null();
4908         if (dest_k != NULL && dest_k->is_array_klass()) {
4909           could_have_dest = true;
4910         }
4911       }
4912       if (could_have_src && could_have_dest) {
4913         // If we can have both exact types, emit the missing guards
4914         if (could_have_src && !src_spec) {
4915           src = maybe_cast_profiled_obj(src, src_k, true);
4916         }
4917         if (could_have_dest && !dest_spec) {
4918           dest = maybe_cast_profiled_obj(dest, dest_k, true);
4919         }
4920       }
4921     }
4922   }
4923 
4924   ciMethod* trap_method = method();
4925   int trap_bci = bci();
4926   if (saved_jvms != NULL) {
4927     trap_method = alloc->jvms()->method();
4928     trap_bci = alloc->jvms()->bci();
4929   }
4930 
4931   if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
4932       can_emit_guards &&
4933       !src->is_top() && !dest->is_top()) {
4934     // validate arguments: enables transformation the ArrayCopyNode
4935     validated = true;
4936 
4937     RegionNode* slow_region = new RegionNode(1);
4938     record_for_igvn(slow_region);
4939 
4940     // (1) src and dest are arrays.
4941     generate_non_array_guard(load_object_klass(src), slow_region);
4942     generate_non_array_guard(load_object_klass(dest), slow_region);
4943 
4944     // (2) src and dest arrays must have elements of the same BasicType
4945     // done at macro expansion or at Ideal transformation time
4946 
4947     // (4) src_offset must not be negative.
4948     generate_negative_guard(src_offset, slow_region);
4949 
4950     // (5) dest_offset must not be negative.
4951     generate_negative_guard(dest_offset, slow_region);
4952 
4953     // (7) src_offset + length must not exceed length of src.
4954     generate_limit_guard(src_offset, length,
4955                          load_array_length(src),
4956                          slow_region);
4957 
4958     // (8) dest_offset + length must not exceed length of dest.
4959     generate_limit_guard(dest_offset, length,
4960                          load_array_length(dest),
4961                          slow_region);
4962 
4963     // (9) each element of an oop array must be assignable
4964     Node* src_klass  = load_object_klass(src);
4965     Node* dest_klass = load_object_klass(dest);
4966     Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
4967 
4968     if (not_subtype_ctrl != top()) {
4969       PreserveJVMState pjvms(this);
4970       set_control(not_subtype_ctrl);
4971       uncommon_trap(Deoptimization::Reason_intrinsic,
4972                     Deoptimization::Action_make_not_entrant);
4973       assert(stopped(), "Should be stopped");
4974     }
4975     {
4976       PreserveJVMState pjvms(this);
4977       set_control(_gvn.transform(slow_region));
4978       uncommon_trap(Deoptimization::Reason_intrinsic,
4979                     Deoptimization::Action_make_not_entrant);
4980       assert(stopped(), "Should be stopped");
4981     }
4982   }
4983 
4984   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp);
4985 
4986   if (stopped()) {
4987     return true;
4988   }
4989 
4990   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
4991                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
4992                                           // so the compiler has a chance to eliminate them: during macro expansion,
4993                                           // we have to set their control (CastPP nodes are eliminated).
4994                                           load_object_klass(src), load_object_klass(dest),
4995                                           load_array_length(src), load_array_length(dest));
4996 
4997   ac->set_arraycopy(validated);
4998 
4999   Node* n = _gvn.transform(ac);
5000   if (n == ac) {
5001     ac->connect_outputs(this);
5002   } else {
5003     assert(validated, "shouldn't transform if all arguments not validated");
5004     set_all_memory(n);
5005   }
5006 
5007   return true;
5008 }
5009 
5010 
5011 // Helper function which determines if an arraycopy immediately follows
5012 // an allocation, with no intervening tests or other escapes for the object.
5013 AllocateArrayNode*
5014 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
5015                                            RegionNode* slow_region) {
5016   if (stopped())             return NULL;  // no fast path
5017   if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
5018 
5019   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
5020   if (alloc == NULL)  return NULL;
5021 
5022   Node* rawmem = memory(Compile::AliasIdxRaw);
5023   // Is the allocation's memory state untouched?
5024   if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
5025     // Bail out if there have been raw-memory effects since the allocation.
5026     // (Example:  There might have been a call or safepoint.)
5027     return NULL;
5028   }
5029   rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
5030   if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
5031     return NULL;
5032   }
5033 
5034   // There must be no unexpected observers of this allocation.
5035   for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
5036     Node* obs = ptr->fast_out(i);
5037     if (obs != this->map()) {
5038       return NULL;
5039     }
5040   }
5041 
5042   // This arraycopy must unconditionally follow the allocation of the ptr.
5043   Node* alloc_ctl = ptr->in(0);
5044   assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
5045 
5046   Node* ctl = control();
5047   while (ctl != alloc_ctl) {
5048     // There may be guards which feed into the slow_region.
5049     // Any other control flow means that we might not get a chance
5050     // to finish initializing the allocated object.
5051     if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
5052       IfNode* iff = ctl->in(0)->as_If();
5053       Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
5054       assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
5055       if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
5056         ctl = iff->in(0);       // This test feeds the known slow_region.
5057         continue;
5058       }
5059       // One more try:  Various low-level checks bottom out in
5060       // uncommon traps.  If the debug-info of the trap omits
5061       // any reference to the allocation, as we've already
5062       // observed, then there can be no objection to the trap.
5063       bool found_trap = false;
5064       for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
5065         Node* obs = not_ctl->fast_out(j);
5066         if (obs->in(0) == not_ctl && obs->is_Call() &&
5067             (obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
5068           found_trap = true; break;
5069         }
5070       }
5071       if (found_trap) {
5072         ctl = iff->in(0);       // This test feeds a harmless uncommon trap.
5073         continue;
5074       }
5075     }
5076     return NULL;
5077   }
5078 
5079   // If we get this far, we have an allocation which immediately
5080   // precedes the arraycopy, and we can take over zeroing the new object.
5081   // The arraycopy will finish the initialization, and provide
5082   // a new control state to which we will anchor the destination pointer.
5083 
5084   return alloc;
5085 }
5086 
5087 //-------------inline_encodeISOArray-----------------------------------
5088 // encode char[] to byte[] in ISO_8859_1
5089 bool LibraryCallKit::inline_encodeISOArray() {
5090   assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
5091   // no receiver since it is static method
5092   Node *src         = argument(0);
5093   Node *src_offset  = argument(1);
5094   Node *dst         = argument(2);
5095   Node *dst_offset  = argument(3);
5096   Node *length      = argument(4);
5097 
5098   const Type* src_type = src->Value(&_gvn);
5099   const Type* dst_type = dst->Value(&_gvn);
5100   const TypeAryPtr* top_src = src_type->isa_aryptr();
5101   const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5102   if (top_src  == NULL || top_src->klass()  == NULL ||
5103       top_dest == NULL || top_dest->klass() == NULL) {
5104     // failed array check
5105     return false;
5106   }
5107 
5108   // Figure out the size and type of the elements we will be copying.
5109   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5110   BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5111   if (!((src_elem == T_CHAR) || (src_elem== T_BYTE)) || dst_elem != T_BYTE) {
5112     return false;
5113   }
5114 
5115   Node* src_start = array_element_address(src, src_offset, T_CHAR);
5116   Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5117   // 'src_start' points to src array + scaled offset
5118   // 'dst_start' points to dst array + scaled offset
5119 
5120   const TypeAryPtr* mtype = TypeAryPtr::BYTES;
5121   Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
5122   enc = _gvn.transform(enc);
5123   Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
5124   set_memory(res_mem, mtype);
5125   set_result(enc);
5126   return true;
5127 }
5128 
5129 //-------------inline_multiplyToLen-----------------------------------
5130 bool LibraryCallKit::inline_multiplyToLen() {
5131   assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
5132 
5133   address stubAddr = StubRoutines::multiplyToLen();
5134   if (stubAddr == NULL) {
5135     return false; // Intrinsic's stub is not implemented on this platform
5136   }
5137   const char* stubName = "multiplyToLen";
5138 
5139   assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
5140 
5141   // no receiver because it is a static method
5142   Node* x    = argument(0);
5143   Node* xlen = argument(1);
5144   Node* y    = argument(2);
5145   Node* ylen = argument(3);
5146   Node* z    = argument(4);
5147 
5148   const Type* x_type = x->Value(&_gvn);
5149   const Type* y_type = y->Value(&_gvn);
5150   const TypeAryPtr* top_x = x_type->isa_aryptr();
5151   const TypeAryPtr* top_y = y_type->isa_aryptr();
5152   if (top_x  == NULL || top_x->klass()  == NULL ||
5153       top_y == NULL || top_y->klass() == NULL) {
5154     // failed array check
5155     return false;
5156   }
5157 
5158   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5159   BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5160   if (x_elem != T_INT || y_elem != T_INT) {
5161     return false;
5162   }
5163 
5164   // Set the original stack and the reexecute bit for the interpreter to reexecute
5165   // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
5166   // on the return from z array allocation in runtime.
5167   { PreserveReexecuteState preexecs(this);
5168     jvms()->set_should_reexecute(true);
5169 
5170     Node* x_start = array_element_address(x, intcon(0), x_elem);
5171     Node* y_start = array_element_address(y, intcon(0), y_elem);
5172     // 'x_start' points to x array + scaled xlen
5173     // 'y_start' points to y array + scaled ylen
5174 
5175     // Allocate the result array
5176     Node* zlen = _gvn.transform(new AddINode(xlen, ylen));
5177     ciKlass* klass = ciTypeArrayKlass::make(T_INT);
5178     Node* klass_node = makecon(TypeKlassPtr::make(klass));
5179 
5180     IdealKit ideal(this);
5181 
5182 #define __ ideal.
5183      Node* one = __ ConI(1);
5184      Node* zero = __ ConI(0);
5185      IdealVariable need_alloc(ideal), z_alloc(ideal);  __ declarations_done();
5186      __ set(need_alloc, zero);
5187      __ set(z_alloc, z);
5188      __ if_then(z, BoolTest::eq, null()); {
5189        __ increment (need_alloc, one);
5190      } __ else_(); {
5191        // Update graphKit memory and control from IdealKit.
5192        sync_kit(ideal);
5193        Node* zlen_arg = load_array_length(z);
5194        // Update IdealKit memory and control from graphKit.
5195        __ sync_kit(this);
5196        __ if_then(zlen_arg, BoolTest::lt, zlen); {
5197          __ increment (need_alloc, one);
5198        } __ end_if();
5199      } __ end_if();
5200 
5201      __ if_then(__ value(need_alloc), BoolTest::ne, zero); {
5202        // Update graphKit memory and control from IdealKit.
5203        sync_kit(ideal);
5204        Node * narr = new_array(klass_node, zlen, 1);
5205        // Update IdealKit memory and control from graphKit.
5206        __ sync_kit(this);
5207        __ set(z_alloc, narr);
5208      } __ end_if();
5209 
5210      sync_kit(ideal);
5211      z = __ value(z_alloc);
5212      // Can't use TypeAryPtr::INTS which uses Bottom offset.
5213      _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
5214      // Final sync IdealKit and GraphKit.
5215      final_sync(ideal);
5216 #undef __
5217 
5218     Node* z_start = array_element_address(z, intcon(0), T_INT);
5219 
5220     Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5221                                    OptoRuntime::multiplyToLen_Type(),
5222                                    stubAddr, stubName, TypePtr::BOTTOM,
5223                                    x_start, xlen, y_start, ylen, z_start, zlen);
5224   } // original reexecute is set back here
5225 
5226   C->set_has_split_ifs(true); // Has chance for split-if optimization
5227   set_result(z);
5228   return true;
5229 }
5230 
5231 //-------------inline_squareToLen------------------------------------
5232 bool LibraryCallKit::inline_squareToLen() {
5233   assert(UseSquareToLenIntrinsic, "not implemented on this platform");
5234 
5235   address stubAddr = StubRoutines::squareToLen();
5236   if (stubAddr == NULL) {
5237     return false; // Intrinsic's stub is not implemented on this platform
5238   }
5239   const char* stubName = "squareToLen";
5240 
5241   assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5242 
5243   Node* x    = argument(0);
5244   Node* len  = argument(1);
5245   Node* z    = argument(2);
5246   Node* zlen = argument(3);
5247 
5248   const Type* x_type = x->Value(&_gvn);
5249   const Type* z_type = z->Value(&_gvn);
5250   const TypeAryPtr* top_x = x_type->isa_aryptr();
5251   const TypeAryPtr* top_z = z_type->isa_aryptr();
5252   if (top_x  == NULL || top_x->klass()  == NULL ||
5253       top_z  == NULL || top_z->klass()  == NULL) {
5254     // failed array check
5255     return false;
5256   }
5257 
5258   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5259   BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5260   if (x_elem != T_INT || z_elem != T_INT) {
5261     return false;
5262   }
5263 
5264 
5265   Node* x_start = array_element_address(x, intcon(0), x_elem);
5266   Node* z_start = array_element_address(z, intcon(0), z_elem);
5267 
5268   Node*  call = make_runtime_call(RC_LEAF|RC_NO_FP,
5269                                   OptoRuntime::squareToLen_Type(),
5270                                   stubAddr, stubName, TypePtr::BOTTOM,
5271                                   x_start, len, z_start, zlen);
5272 
5273   set_result(z);
5274   return true;
5275 }
5276 
5277 //-------------inline_mulAdd------------------------------------------
5278 bool LibraryCallKit::inline_mulAdd() {
5279   assert(UseMulAddIntrinsic, "not implemented on this platform");
5280 
5281   address stubAddr = StubRoutines::mulAdd();
5282   if (stubAddr == NULL) {
5283     return false; // Intrinsic's stub is not implemented on this platform
5284   }
5285   const char* stubName = "mulAdd";
5286 
5287   assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5288 
5289   Node* out      = argument(0);
5290   Node* in       = argument(1);
5291   Node* offset   = argument(2);
5292   Node* len      = argument(3);
5293   Node* k        = argument(4);
5294 
5295   const Type* out_type = out->Value(&_gvn);
5296   const Type* in_type = in->Value(&_gvn);
5297   const TypeAryPtr* top_out = out_type->isa_aryptr();
5298   const TypeAryPtr* top_in = in_type->isa_aryptr();
5299   if (top_out  == NULL || top_out->klass()  == NULL ||
5300       top_in == NULL || top_in->klass() == NULL) {
5301     // failed array check
5302     return false;
5303   }
5304 
5305   BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5306   BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5307   if (out_elem != T_INT || in_elem != T_INT) {
5308     return false;
5309   }
5310 
5311   Node* outlen = load_array_length(out);
5312   Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
5313   Node* out_start = array_element_address(out, intcon(0), out_elem);
5314   Node* in_start = array_element_address(in, intcon(0), in_elem);
5315 
5316   Node*  call = make_runtime_call(RC_LEAF|RC_NO_FP,
5317                                   OptoRuntime::mulAdd_Type(),
5318                                   stubAddr, stubName, TypePtr::BOTTOM,
5319                                   out_start,in_start, new_offset, len, k);
5320   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5321   set_result(result);
5322   return true;
5323 }
5324 
5325 //-------------inline_montgomeryMultiply-----------------------------------
5326 bool LibraryCallKit::inline_montgomeryMultiply() {
5327   address stubAddr = StubRoutines::montgomeryMultiply();
5328   if (stubAddr == NULL) {
5329     return false; // Intrinsic's stub is not implemented on this platform
5330   }
5331 
5332   assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
5333   const char* stubName = "montgomery_square";
5334 
5335   assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
5336 
5337   Node* a    = argument(0);
5338   Node* b    = argument(1);
5339   Node* n    = argument(2);
5340   Node* len  = argument(3);
5341   Node* inv  = argument(4);
5342   Node* m    = argument(6);
5343 
5344   const Type* a_type = a->Value(&_gvn);
5345   const TypeAryPtr* top_a = a_type->isa_aryptr();
5346   const Type* b_type = b->Value(&_gvn);
5347   const TypeAryPtr* top_b = b_type->isa_aryptr();
5348   const Type* n_type = a->Value(&_gvn);
5349   const TypeAryPtr* top_n = n_type->isa_aryptr();
5350   const Type* m_type = a->Value(&_gvn);
5351   const TypeAryPtr* top_m = m_type->isa_aryptr();
5352   if (top_a  == NULL || top_a->klass()  == NULL ||
5353       top_b == NULL || top_b->klass()  == NULL ||
5354       top_n == NULL || top_n->klass()  == NULL ||
5355       top_m == NULL || top_m->klass()  == NULL) {
5356     // failed array check
5357     return false;
5358   }
5359 
5360   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5361   BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5362   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5363   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5364   if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5365     return false;
5366   }
5367 
5368   // Make the call
5369   {
5370     Node* a_start = array_element_address(a, intcon(0), a_elem);
5371     Node* b_start = array_element_address(b, intcon(0), b_elem);
5372     Node* n_start = array_element_address(n, intcon(0), n_elem);
5373     Node* m_start = array_element_address(m, intcon(0), m_elem);
5374 
5375     Node* call = make_runtime_call(RC_LEAF,
5376                                    OptoRuntime::montgomeryMultiply_Type(),
5377                                    stubAddr, stubName, TypePtr::BOTTOM,
5378                                    a_start, b_start, n_start, len, inv, top(),
5379                                    m_start);
5380     set_result(m);
5381   }
5382 
5383   return true;
5384 }
5385 
5386 bool LibraryCallKit::inline_montgomerySquare() {
5387   address stubAddr = StubRoutines::montgomerySquare();
5388   if (stubAddr == NULL) {
5389     return false; // Intrinsic's stub is not implemented on this platform
5390   }
5391 
5392   assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
5393   const char* stubName = "montgomery_square";
5394 
5395   assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
5396 
5397   Node* a    = argument(0);
5398   Node* n    = argument(1);
5399   Node* len  = argument(2);
5400   Node* inv  = argument(3);
5401   Node* m    = argument(5);
5402 
5403   const Type* a_type = a->Value(&_gvn);
5404   const TypeAryPtr* top_a = a_type->isa_aryptr();
5405   const Type* n_type = a->Value(&_gvn);
5406   const TypeAryPtr* top_n = n_type->isa_aryptr();
5407   const Type* m_type = a->Value(&_gvn);
5408   const TypeAryPtr* top_m = m_type->isa_aryptr();
5409   if (top_a  == NULL || top_a->klass()  == NULL ||
5410       top_n == NULL || top_n->klass()  == NULL ||
5411       top_m == NULL || top_m->klass()  == NULL) {
5412     // failed array check
5413     return false;
5414   }
5415 
5416   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5417   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5418   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5419   if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5420     return false;
5421   }
5422 
5423   // Make the call
5424   {
5425     Node* a_start = array_element_address(a, intcon(0), a_elem);
5426     Node* n_start = array_element_address(n, intcon(0), n_elem);
5427     Node* m_start = array_element_address(m, intcon(0), m_elem);
5428 
5429     Node* call = make_runtime_call(RC_LEAF,
5430                                    OptoRuntime::montgomerySquare_Type(),
5431                                    stubAddr, stubName, TypePtr::BOTTOM,
5432                                    a_start, n_start, len, inv, top(),
5433                                    m_start);
5434     set_result(m);
5435   }
5436 
5437   return true;
5438 }
5439 
5440 
5441 /**
5442  * Calculate CRC32 for byte.
5443  * int java.util.zip.CRC32.update(int crc, int b)
5444  */
5445 bool LibraryCallKit::inline_updateCRC32() {
5446   assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5447   assert(callee()->signature()->size() == 2, "update has 2 parameters");
5448   // no receiver since it is static method
5449   Node* crc  = argument(0); // type: int
5450   Node* b    = argument(1); // type: int
5451 
5452   /*
5453    *    int c = ~ crc;
5454    *    b = timesXtoThe32[(b ^ c) & 0xFF];
5455    *    b = b ^ (c >>> 8);
5456    *    crc = ~b;
5457    */
5458 
5459   Node* M1 = intcon(-1);
5460   crc = _gvn.transform(new XorINode(crc, M1));
5461   Node* result = _gvn.transform(new XorINode(crc, b));
5462   result = _gvn.transform(new AndINode(result, intcon(0xFF)));
5463 
5464   Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
5465   Node* offset = _gvn.transform(new LShiftINode(result, intcon(0x2)));
5466   Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
5467   result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
5468 
5469   crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
5470   result = _gvn.transform(new XorINode(crc, result));
5471   result = _gvn.transform(new XorINode(result, M1));
5472   set_result(result);
5473   return true;
5474 }
5475 
5476 /**
5477  * Calculate CRC32 for byte[] array.
5478  * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
5479  */
5480 bool LibraryCallKit::inline_updateBytesCRC32() {
5481   assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5482   assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5483   // no receiver since it is static method
5484   Node* crc     = argument(0); // type: int
5485   Node* src     = argument(1); // type: oop
5486   Node* offset  = argument(2); // type: int
5487   Node* length  = argument(3); // type: int
5488 
5489   const Type* src_type = src->Value(&_gvn);
5490   const TypeAryPtr* top_src = src_type->isa_aryptr();
5491   if (top_src  == NULL || top_src->klass()  == NULL) {
5492     // failed array check
5493     return false;
5494   }
5495 
5496   // Figure out the size and type of the elements we will be copying.
5497   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5498   if (src_elem != T_BYTE) {
5499     return false;
5500   }
5501 
5502   // 'src_start' points to src array + scaled offset
5503   Node* src_start = array_element_address(src, offset, src_elem);
5504 
5505   // We assume that range check is done by caller.
5506   // TODO: generate range check (offset+length < src.length) in debug VM.
5507 
5508   // Call the stub.
5509   address stubAddr = StubRoutines::updateBytesCRC32();
5510   const char *stubName = "updateBytesCRC32";
5511 
5512   Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5513                                  stubAddr, stubName, TypePtr::BOTTOM,
5514                                  crc, src_start, length);
5515   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5516   set_result(result);
5517   return true;
5518 }
5519 
5520 /**
5521  * Calculate CRC32 for ByteBuffer.
5522  * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
5523  */
5524 bool LibraryCallKit::inline_updateByteBufferCRC32() {
5525   assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5526   assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5527   // no receiver since it is static method
5528   Node* crc     = argument(0); // type: int
5529   Node* src     = argument(1); // type: long
5530   Node* offset  = argument(3); // type: int
5531   Node* length  = argument(4); // type: int
5532 
5533   src = ConvL2X(src);  // adjust Java long to machine word
5534   Node* base = _gvn.transform(new CastX2PNode(src));
5535   offset = ConvI2X(offset);
5536 
5537   // 'src_start' points to src array + scaled offset
5538   Node* src_start = basic_plus_adr(top(), base, offset);
5539 
5540   // Call the stub.
5541   address stubAddr = StubRoutines::updateBytesCRC32();
5542   const char *stubName = "updateBytesCRC32";
5543 
5544   Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5545                                  stubAddr, stubName, TypePtr::BOTTOM,
5546                                  crc, src_start, length);
5547   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5548   set_result(result);
5549   return true;
5550 }
5551 
5552 //------------------------------get_table_from_crc32c_class-----------------------
5553 Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) {
5554   Node* table = load_field_from_object(NULL, "byteTable", "[I", /*is_exact*/ false, /*is_static*/ true, crc32c_class);
5555   assert (table != NULL, "wrong version of java.util.zip.CRC32C");
5556 
5557   return table;
5558 }
5559 
5560 //------------------------------inline_updateBytesCRC32C-----------------------
5561 //
5562 // Calculate CRC32C for byte[] array.
5563 // int java.util.zip.CRC32C.updateBytes(int crc, byte[] buf, int off, int end)
5564 //
5565 bool LibraryCallKit::inline_updateBytesCRC32C() {
5566   assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5567   assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5568   assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5569   // no receiver since it is a static method
5570   Node* crc     = argument(0); // type: int
5571   Node* src     = argument(1); // type: oop
5572   Node* offset  = argument(2); // type: int
5573   Node* end     = argument(3); // type: int
5574 
5575   Node* length = _gvn.transform(new SubINode(end, offset));
5576 
5577   const Type* src_type = src->Value(&_gvn);
5578   const TypeAryPtr* top_src = src_type->isa_aryptr();
5579   if (top_src  == NULL || top_src->klass()  == NULL) {
5580     // failed array check
5581     return false;
5582   }
5583 
5584   // Figure out the size and type of the elements we will be copying.
5585   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5586   if (src_elem != T_BYTE) {
5587     return false;
5588   }
5589 
5590   // 'src_start' points to src array + scaled offset
5591   Node* src_start = array_element_address(src, offset, src_elem);
5592 
5593   // static final int[] byteTable in class CRC32C
5594   Node* table = get_table_from_crc32c_class(callee()->holder());
5595   Node* table_start = array_element_address(table, intcon(0), T_INT);
5596 
5597   // We assume that range check is done by caller.
5598   // TODO: generate range check (offset+length < src.length) in debug VM.
5599 
5600   // Call the stub.
5601   address stubAddr = StubRoutines::updateBytesCRC32C();
5602   const char *stubName = "updateBytesCRC32C";
5603 
5604   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5605                                  stubAddr, stubName, TypePtr::BOTTOM,
5606                                  crc, src_start, length, table_start);
5607   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5608   set_result(result);
5609   return true;
5610 }
5611 
5612 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
5613 //
5614 // Calculate CRC32C for DirectByteBuffer.
5615 // int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
5616 //
5617 bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() {
5618   assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5619   assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
5620   assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5621   // no receiver since it is a static method
5622   Node* crc     = argument(0); // type: int
5623   Node* src     = argument(1); // type: long
5624   Node* offset  = argument(3); // type: int
5625   Node* end     = argument(4); // type: int
5626 
5627   Node* length = _gvn.transform(new SubINode(end, offset));
5628 
5629   src = ConvL2X(src);  // adjust Java long to machine word
5630   Node* base = _gvn.transform(new CastX2PNode(src));
5631   offset = ConvI2X(offset);
5632 
5633   // 'src_start' points to src array + scaled offset
5634   Node* src_start = basic_plus_adr(top(), base, offset);
5635 
5636   // static final int[] byteTable in class CRC32C
5637   Node* table = get_table_from_crc32c_class(callee()->holder());
5638   Node* table_start = array_element_address(table, intcon(0), T_INT);
5639 
5640   // Call the stub.
5641   address stubAddr = StubRoutines::updateBytesCRC32C();
5642   const char *stubName = "updateBytesCRC32C";
5643 
5644   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5645                                  stubAddr, stubName, TypePtr::BOTTOM,
5646                                  crc, src_start, length, table_start);
5647   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5648   set_result(result);
5649   return true;
5650 }
5651 
5652 //------------------------------inline_updateBytesAdler32----------------------
5653 //
5654 // Calculate Adler32 checksum for byte[] array.
5655 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
5656 //
5657 bool LibraryCallKit::inline_updateBytesAdler32() {
5658   assert(UseAdler32Intrinsics, "Adler32 Instrinsic support need"); // check if we actually need to check this flag or check a different one
5659   assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5660   assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
5661   // no receiver since it is static method
5662   Node* crc     = argument(0); // type: int
5663   Node* src     = argument(1); // type: oop
5664   Node* offset  = argument(2); // type: int
5665   Node* length  = argument(3); // type: int
5666 
5667   const Type* src_type = src->Value(&_gvn);
5668   const TypeAryPtr* top_src = src_type->isa_aryptr();
5669   if (top_src  == NULL || top_src->klass()  == NULL) {
5670     // failed array check
5671     return false;
5672   }
5673 
5674   // Figure out the size and type of the elements we will be copying.
5675   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5676   if (src_elem != T_BYTE) {
5677     return false;
5678   }
5679 
5680   // 'src_start' points to src array + scaled offset
5681   Node* src_start = array_element_address(src, offset, src_elem);
5682 
5683   // We assume that range check is done by caller.
5684   // TODO: generate range check (offset+length < src.length) in debug VM.
5685 
5686   // Call the stub.
5687   address stubAddr = StubRoutines::updateBytesAdler32();
5688   const char *stubName = "updateBytesAdler32";
5689 
5690   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5691                                  stubAddr, stubName, TypePtr::BOTTOM,
5692                                  crc, src_start, length);
5693   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5694   set_result(result);
5695   return true;
5696 }
5697 
5698 //------------------------------inline_updateByteBufferAdler32---------------
5699 //
5700 // Calculate Adler32 checksum for DirectByteBuffer.
5701 // int java.util.zip.Adler32.updateByteBuffer(int crc, long buf, int off, int len)
5702 //
5703 bool LibraryCallKit::inline_updateByteBufferAdler32() {
5704   assert(UseAdler32Intrinsics, "Adler32 Instrinsic support need"); // check if we actually need to check this flag or check a different one
5705   assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5706   assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
5707   // no receiver since it is static method
5708   Node* crc     = argument(0); // type: int
5709   Node* src     = argument(1); // type: long
5710   Node* offset  = argument(3); // type: int
5711   Node* length  = argument(4); // type: int
5712 
5713   src = ConvL2X(src);  // adjust Java long to machine word
5714   Node* base = _gvn.transform(new CastX2PNode(src));
5715   offset = ConvI2X(offset);
5716 
5717   // 'src_start' points to src array + scaled offset
5718   Node* src_start = basic_plus_adr(top(), base, offset);
5719 
5720   // Call the stub.
5721   address stubAddr = StubRoutines::updateBytesAdler32();
5722   const char *stubName = "updateBytesAdler32";
5723 
5724   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5725                                  stubAddr, stubName, TypePtr::BOTTOM,
5726                                  crc, src_start, length);
5727 
5728   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5729   set_result(result);
5730   return true;
5731 }
5732 
5733 //----------------------------inline_reference_get----------------------------
5734 // public T java.lang.ref.Reference.get();
5735 bool LibraryCallKit::inline_reference_get() {
5736   const int referent_offset = java_lang_ref_Reference::referent_offset;
5737   guarantee(referent_offset > 0, "should have already been set");
5738 
5739   // Get the argument:
5740   Node* reference_obj = null_check_receiver();
5741   if (stopped()) return true;
5742 
5743   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
5744 
5745   ciInstanceKlass* klass = env()->Object_klass();
5746   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5747 
5748   Node* no_ctrl = NULL;
5749   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
5750 
5751   // Use the pre-barrier to record the value in the referent field
5752   pre_barrier(false /* do_load */,
5753               control(),
5754               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
5755               result /* pre_val */,
5756               T_OBJECT);
5757 
5758   // Add memory barrier to prevent commoning reads from this field
5759   // across safepoint since GC can change its value.
5760   insert_mem_bar(Op_MemBarCPUOrder);
5761 
5762   set_result(result);
5763   return true;
5764 }
5765 
5766 
5767 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5768                                               bool is_exact=true, bool is_static=false,
5769                                               ciInstanceKlass * fromKls=NULL) {
5770   if (fromKls == NULL) {
5771     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5772     assert(tinst != NULL, "obj is null");
5773     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5774     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5775     fromKls = tinst->klass()->as_instance_klass();
5776   } else {
5777     assert(is_static, "only for static field access");
5778   }
5779   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5780                                               ciSymbol::make(fieldTypeString),
5781                                               is_static);
5782 
5783   assert (field != NULL, "undefined field");
5784   if (field == NULL) return (Node *) NULL;
5785 
5786   if (is_static) {
5787     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5788     fromObj = makecon(tip);
5789   }
5790 
5791   // Next code  copied from Parse::do_get_xxx():
5792 
5793   // Compute address and memory type.
5794   int offset  = field->offset_in_bytes();
5795   bool is_vol = field->is_volatile();
5796   ciType* field_klass = field->type();
5797   assert(field_klass->is_loaded(), "should be loaded");
5798   const TypePtr* adr_type = C->alias_type(field)->adr_type();
5799   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5800   BasicType bt = field->layout_type();
5801 
5802   // Build the resultant type of the load
5803   const Type *type;
5804   if (bt == T_OBJECT) {
5805     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5806   } else {
5807     type = Type::get_const_basic_type(bt);
5808   }
5809 
5810   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
5811     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
5812   }
5813   // Build the load.
5814   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
5815   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
5816   // If reference is volatile, prevent following memory ops from
5817   // floating up past the volatile read.  Also prevents commoning
5818   // another volatile read.
5819   if (is_vol) {
5820     // Memory barrier includes bogus read of value to force load BEFORE membar
5821     insert_mem_bar(Op_MemBarAcquire, loadedField);
5822   }
5823   return loadedField;
5824 }
5825 
5826 
5827 //------------------------------inline_aescrypt_Block-----------------------
5828 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
5829   address stubAddr = NULL;
5830   const char *stubName;
5831   assert(UseAES, "need AES instruction support");
5832 
5833   switch(id) {
5834   case vmIntrinsics::_aescrypt_encryptBlock:
5835     stubAddr = StubRoutines::aescrypt_encryptBlock();
5836     stubName = "aescrypt_encryptBlock";
5837     break;
5838   case vmIntrinsics::_aescrypt_decryptBlock:
5839     stubAddr = StubRoutines::aescrypt_decryptBlock();
5840     stubName = "aescrypt_decryptBlock";
5841     break;
5842   }
5843   if (stubAddr == NULL) return false;
5844 
5845   Node* aescrypt_object = argument(0);
5846   Node* src             = argument(1);
5847   Node* src_offset      = argument(2);
5848   Node* dest            = argument(3);
5849   Node* dest_offset     = argument(4);
5850 
5851   // (1) src and dest are arrays.
5852   const Type* src_type = src->Value(&_gvn);
5853   const Type* dest_type = dest->Value(&_gvn);
5854   const TypeAryPtr* top_src = src_type->isa_aryptr();
5855   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5856   assert (top_src  != NULL && top_src->klass()  != NULL &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5857 
5858   // for the quick and dirty code we will skip all the checks.
5859   // we are just trying to get the call to be generated.
5860   Node* src_start  = src;
5861   Node* dest_start = dest;
5862   if (src_offset != NULL || dest_offset != NULL) {
5863     assert(src_offset != NULL && dest_offset != NULL, "");
5864     src_start  = array_element_address(src,  src_offset,  T_BYTE);
5865     dest_start = array_element_address(dest, dest_offset, T_BYTE);
5866   }
5867 
5868   // now need to get the start of its expanded key array
5869   // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5870   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5871   if (k_start == NULL) return false;
5872 
5873   if (Matcher::pass_original_key_for_aes()) {
5874     // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
5875     // compatibility issues between Java key expansion and SPARC crypto instructions
5876     Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5877     if (original_k_start == NULL) return false;
5878 
5879     // Call the stub.
5880     make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5881                       stubAddr, stubName, TypePtr::BOTTOM,
5882                       src_start, dest_start, k_start, original_k_start);
5883   } else {
5884     // Call the stub.
5885     make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5886                       stubAddr, stubName, TypePtr::BOTTOM,
5887                       src_start, dest_start, k_start);
5888   }
5889 
5890   return true;
5891 }
5892 
5893 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
5894 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
5895   address stubAddr = NULL;
5896   const char *stubName = NULL;
5897 
5898   assert(UseAES, "need AES instruction support");
5899 
5900   switch(id) {
5901   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
5902     stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
5903     stubName = "cipherBlockChaining_encryptAESCrypt";
5904     break;
5905   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
5906     stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
5907     stubName = "cipherBlockChaining_decryptAESCrypt";
5908     break;
5909   }
5910   if (stubAddr == NULL) return false;
5911 
5912   Node* cipherBlockChaining_object = argument(0);
5913   Node* src                        = argument(1);
5914   Node* src_offset                 = argument(2);
5915   Node* len                        = argument(3);
5916   Node* dest                       = argument(4);
5917   Node* dest_offset                = argument(5);
5918 
5919   // (1) src and dest are arrays.
5920   const Type* src_type = src->Value(&_gvn);
5921   const Type* dest_type = dest->Value(&_gvn);
5922   const TypeAryPtr* top_src = src_type->isa_aryptr();
5923   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5924   assert (top_src  != NULL && top_src->klass()  != NULL
5925           &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5926 
5927   // checks are the responsibility of the caller
5928   Node* src_start  = src;
5929   Node* dest_start = dest;
5930   if (src_offset != NULL || dest_offset != NULL) {
5931     assert(src_offset != NULL && dest_offset != NULL, "");
5932     src_start  = array_element_address(src,  src_offset,  T_BYTE);
5933     dest_start = array_element_address(dest, dest_offset, T_BYTE);
5934   }
5935 
5936   // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
5937   // (because of the predicated logic executed earlier).
5938   // so we cast it here safely.
5939   // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5940 
5941   Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
5942   if (embeddedCipherObj == NULL) return false;
5943 
5944   // cast it to what we know it will be at runtime
5945   const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
5946   assert(tinst != NULL, "CBC obj is null");
5947   assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
5948   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
5949   assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
5950 
5951   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
5952   const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
5953   const TypeOopPtr* xtype = aklass->as_instance_type();
5954   Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
5955   aescrypt_object = _gvn.transform(aescrypt_object);
5956 
5957   // we need to get the start of the aescrypt_object's expanded key array
5958   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5959   if (k_start == NULL) return false;
5960 
5961   // similarly, get the start address of the r vector
5962   Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
5963   if (objRvec == NULL) return false;
5964   Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
5965 
5966   Node* cbcCrypt;
5967   if (Matcher::pass_original_key_for_aes()) {
5968     // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
5969     // compatibility issues between Java key expansion and SPARC crypto instructions
5970     Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5971     if (original_k_start == NULL) return false;
5972 
5973     // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
5974     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5975                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5976                                  stubAddr, stubName, TypePtr::BOTTOM,
5977                                  src_start, dest_start, k_start, r_start, len, original_k_start);
5978   } else {
5979     // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
5980     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5981                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5982                                  stubAddr, stubName, TypePtr::BOTTOM,
5983                                  src_start, dest_start, k_start, r_start, len);
5984   }
5985 
5986   // return cipher length (int)
5987   Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
5988   set_result(retvalue);
5989   return true;
5990 }
5991 
5992 //------------------------------get_key_start_from_aescrypt_object-----------------------
5993 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
5994   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
5995   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
5996   if (objAESCryptKey == NULL) return (Node *) NULL;
5997 
5998   // now have the array, need to get the start address of the K array
5999   Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6000   return k_start;
6001 }
6002 
6003 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
6004 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6005   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6006   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6007   if (objAESCryptKey == NULL) return (Node *) NULL;
6008 
6009   // now have the array, need to get the start address of the lastKey array
6010   Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6011   return original_k_start;
6012 }
6013 
6014 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6015 // Return node representing slow path of predicate check.
6016 // the pseudo code we want to emulate with this predicate is:
6017 // for encryption:
6018 //    if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6019 // for decryption:
6020 //    if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6021 //    note cipher==plain is more conservative than the original java code but that's OK
6022 //
6023 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6024   // The receiver was checked for NULL already.
6025   Node* objCBC = argument(0);
6026 
6027   // Load embeddedCipher field of CipherBlockChaining object.
6028   Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6029 
6030   // get AESCrypt klass for instanceOf check
6031   // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6032   // will have same classloader as CipherBlockChaining object
6033   const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6034   assert(tinst != NULL, "CBCobj is null");
6035   assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6036 
6037   // we want to do an instanceof comparison against the AESCrypt class
6038   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6039   if (!klass_AESCrypt->is_loaded()) {
6040     // if AESCrypt is not even loaded, we never take the intrinsic fast path
6041     Node* ctrl = control();
6042     set_control(top()); // no regular fast path
6043     return ctrl;
6044   }
6045   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6046 
6047   Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6048   Node* cmp_instof  = _gvn.transform(new CmpINode(instof, intcon(1)));
6049   Node* bool_instof  = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6050 
6051   Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6052 
6053   // for encryption, we are done
6054   if (!decrypting)
6055     return instof_false;  // even if it is NULL
6056 
6057   // for decryption, we need to add a further check to avoid
6058   // taking the intrinsic path when cipher and plain are the same
6059   // see the original java code for why.
6060   RegionNode* region = new RegionNode(3);
6061   region->init_req(1, instof_false);
6062   Node* src = argument(1);
6063   Node* dest = argument(4);
6064   Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
6065   Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
6066   Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6067   region->init_req(2, src_dest_conjoint);
6068 
6069   record_for_igvn(region);
6070   return _gvn.transform(region);
6071 }
6072 
6073 //------------------------------inline_ghash_processBlocks
6074 bool LibraryCallKit::inline_ghash_processBlocks() {
6075   address stubAddr;
6076   const char *stubName;
6077   assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
6078 
6079   stubAddr = StubRoutines::ghash_processBlocks();
6080   stubName = "ghash_processBlocks";
6081 
6082   Node* data           = argument(0);
6083   Node* offset         = argument(1);
6084   Node* len            = argument(2);
6085   Node* state          = argument(3);
6086   Node* subkeyH        = argument(4);
6087 
6088   Node* state_start  = array_element_address(state, intcon(0), T_LONG);
6089   assert(state_start, "state is NULL");
6090   Node* subkeyH_start  = array_element_address(subkeyH, intcon(0), T_LONG);
6091   assert(subkeyH_start, "subkeyH is NULL");
6092   Node* data_start  = array_element_address(data, offset, T_BYTE);
6093   assert(data_start, "data is NULL");
6094 
6095   Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
6096                                   OptoRuntime::ghash_processBlocks_Type(),
6097                                   stubAddr, stubName, TypePtr::BOTTOM,
6098                                   state_start, subkeyH_start, data_start, len);
6099   return true;
6100 }
6101 
6102 //------------------------------inline_sha_implCompress-----------------------
6103 //
6104 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
6105 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
6106 //
6107 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
6108 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
6109 //
6110 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
6111 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
6112 //
6113 bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) {
6114   assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
6115 
6116   Node* sha_obj = argument(0);
6117   Node* src     = argument(1); // type oop
6118   Node* ofs     = argument(2); // type int
6119 
6120   const Type* src_type = src->Value(&_gvn);
6121   const TypeAryPtr* top_src = src_type->isa_aryptr();
6122   if (top_src  == NULL || top_src->klass()  == NULL) {
6123     // failed array check
6124     return false;
6125   }
6126   // Figure out the size and type of the elements we will be copying.
6127   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6128   if (src_elem != T_BYTE) {
6129     return false;
6130   }
6131   // 'src_start' points to src array + offset
6132   Node* src_start = array_element_address(src, ofs, src_elem);
6133   Node* state = NULL;
6134   address stubAddr;
6135   const char *stubName;
6136 
6137   switch(id) {
6138   case vmIntrinsics::_sha_implCompress:
6139     assert(UseSHA1Intrinsics, "need SHA1 instruction support");
6140     state = get_state_from_sha_object(sha_obj);
6141     stubAddr = StubRoutines::sha1_implCompress();
6142     stubName = "sha1_implCompress";
6143     break;
6144   case vmIntrinsics::_sha2_implCompress:
6145     assert(UseSHA256Intrinsics, "need SHA256 instruction support");
6146     state = get_state_from_sha_object(sha_obj);
6147     stubAddr = StubRoutines::sha256_implCompress();
6148     stubName = "sha256_implCompress";
6149     break;
6150   case vmIntrinsics::_sha5_implCompress:
6151     assert(UseSHA512Intrinsics, "need SHA512 instruction support");
6152     state = get_state_from_sha5_object(sha_obj);
6153     stubAddr = StubRoutines::sha512_implCompress();
6154     stubName = "sha512_implCompress";
6155     break;
6156   default:
6157     fatal_unexpected_iid(id);
6158     return false;
6159   }
6160   if (state == NULL) return false;
6161 
6162   // Call the stub.
6163   Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::sha_implCompress_Type(),
6164                                  stubAddr, stubName, TypePtr::BOTTOM,
6165                                  src_start, state);
6166 
6167   return true;
6168 }
6169 
6170 //------------------------------inline_digestBase_implCompressMB-----------------------
6171 //
6172 // Calculate SHA/SHA2/SHA5 for multi-block byte[] array.
6173 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
6174 //
6175 bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
6176   assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6177          "need SHA1/SHA256/SHA512 instruction support");
6178   assert((uint)predicate < 3, "sanity");
6179   assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
6180 
6181   Node* digestBase_obj = argument(0); // The receiver was checked for NULL already.
6182   Node* src            = argument(1); // byte[] array
6183   Node* ofs            = argument(2); // type int
6184   Node* limit          = argument(3); // type int
6185 
6186   const Type* src_type = src->Value(&_gvn);
6187   const TypeAryPtr* top_src = src_type->isa_aryptr();
6188   if (top_src  == NULL || top_src->klass()  == NULL) {
6189     // failed array check
6190     return false;
6191   }
6192   // Figure out the size and type of the elements we will be copying.
6193   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6194   if (src_elem != T_BYTE) {
6195     return false;
6196   }
6197   // 'src_start' points to src array + offset
6198   Node* src_start = array_element_address(src, ofs, src_elem);
6199 
6200   const char* klass_SHA_name = NULL;
6201   const char* stub_name = NULL;
6202   address     stub_addr = NULL;
6203   bool        long_state = false;
6204 
6205   switch (predicate) {
6206   case 0:
6207     if (UseSHA1Intrinsics) {
6208       klass_SHA_name = "sun/security/provider/SHA";
6209       stub_name = "sha1_implCompressMB";
6210       stub_addr = StubRoutines::sha1_implCompressMB();
6211     }
6212     break;
6213   case 1:
6214     if (UseSHA256Intrinsics) {
6215       klass_SHA_name = "sun/security/provider/SHA2";
6216       stub_name = "sha256_implCompressMB";
6217       stub_addr = StubRoutines::sha256_implCompressMB();
6218     }
6219     break;
6220   case 2:
6221     if (UseSHA512Intrinsics) {
6222       klass_SHA_name = "sun/security/provider/SHA5";
6223       stub_name = "sha512_implCompressMB";
6224       stub_addr = StubRoutines::sha512_implCompressMB();
6225       long_state = true;
6226     }
6227     break;
6228   default:
6229     fatal("unknown SHA intrinsic predicate: %d", predicate);
6230   }
6231   if (klass_SHA_name != NULL) {
6232     // get DigestBase klass to lookup for SHA klass
6233     const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
6234     assert(tinst != NULL, "digestBase_obj is not instance???");
6235     assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6236 
6237     ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6238     assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded");
6239     ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6240     return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit);
6241   }
6242   return false;
6243 }
6244 //------------------------------inline_sha_implCompressMB-----------------------
6245 bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA,
6246                                                bool long_state, address stubAddr, const char *stubName,
6247                                                Node* src_start, Node* ofs, Node* limit) {
6248   const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA);
6249   const TypeOopPtr* xtype = aklass->as_instance_type();
6250   Node* sha_obj = new CheckCastPPNode(control(), digestBase_obj, xtype);
6251   sha_obj = _gvn.transform(sha_obj);
6252 
6253   Node* state;
6254   if (long_state) {
6255     state = get_state_from_sha5_object(sha_obj);
6256   } else {
6257     state = get_state_from_sha_object(sha_obj);
6258   }
6259   if (state == NULL) return false;
6260 
6261   // Call the stub.
6262   Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6263                                  OptoRuntime::digestBase_implCompressMB_Type(),
6264                                  stubAddr, stubName, TypePtr::BOTTOM,
6265                                  src_start, state, ofs, limit);
6266   // return ofs (int)
6267   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6268   set_result(result);
6269 
6270   return true;
6271 }
6272 
6273 //------------------------------get_state_from_sha_object-----------------------
6274 Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
6275   Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
6276   assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
6277   if (sha_state == NULL) return (Node *) NULL;
6278 
6279   // now have the array, need to get the start address of the state array
6280   Node* state = array_element_address(sha_state, intcon(0), T_INT);
6281   return state;
6282 }
6283 
6284 //------------------------------get_state_from_sha5_object-----------------------
6285 Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
6286   Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
6287   assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
6288   if (sha_state == NULL) return (Node *) NULL;
6289 
6290   // now have the array, need to get the start address of the state array
6291   Node* state = array_element_address(sha_state, intcon(0), T_LONG);
6292   return state;
6293 }
6294 
6295 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
6296 // Return node representing slow path of predicate check.
6297 // the pseudo code we want to emulate with this predicate is:
6298 //    if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath
6299 //
6300 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
6301   assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6302          "need SHA1/SHA256/SHA512 instruction support");
6303   assert((uint)predicate < 3, "sanity");
6304 
6305   // The receiver was checked for NULL already.
6306   Node* digestBaseObj = argument(0);
6307 
6308   // get DigestBase klass for instanceOf check
6309   const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
6310   assert(tinst != NULL, "digestBaseObj is null");
6311   assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6312 
6313   const char* klass_SHA_name = NULL;
6314   switch (predicate) {
6315   case 0:
6316     if (UseSHA1Intrinsics) {
6317       // we want to do an instanceof comparison against the SHA class
6318       klass_SHA_name = "sun/security/provider/SHA";
6319     }
6320     break;
6321   case 1:
6322     if (UseSHA256Intrinsics) {
6323       // we want to do an instanceof comparison against the SHA2 class
6324       klass_SHA_name = "sun/security/provider/SHA2";
6325     }
6326     break;
6327   case 2:
6328     if (UseSHA512Intrinsics) {
6329       // we want to do an instanceof comparison against the SHA5 class
6330       klass_SHA_name = "sun/security/provider/SHA5";
6331     }
6332     break;
6333   default:
6334     fatal("unknown SHA intrinsic predicate: %d", predicate);
6335   }
6336 
6337   ciKlass* klass_SHA = NULL;
6338   if (klass_SHA_name != NULL) {
6339     klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6340   }
6341   if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) {
6342     // if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6343     Node* ctrl = control();
6344     set_control(top()); // no intrinsic path
6345     return ctrl;
6346   }
6347   ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6348 
6349   Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA)));
6350   Node* cmp_instof = _gvn.transform(new CmpINode(instofSHA, intcon(1)));
6351   Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6352   Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6353 
6354   return instof_false;  // even if it is NULL
6355 }
6356 
6357 bool LibraryCallKit::inline_profileBoolean() {
6358   Node* counts = argument(1);
6359   const TypeAryPtr* ary = NULL;
6360   ciArray* aobj = NULL;
6361   if (counts->is_Con()
6362       && (ary = counts->bottom_type()->isa_aryptr()) != NULL
6363       && (aobj = ary->const_oop()->as_array()) != NULL
6364       && (aobj->length() == 2)) {
6365     // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
6366     jint false_cnt = aobj->element_value(0).as_int();
6367     jint  true_cnt = aobj->element_value(1).as_int();
6368 
6369     if (C->log() != NULL) {
6370       C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
6371                      false_cnt, true_cnt);
6372     }
6373 
6374     if (false_cnt + true_cnt == 0) {
6375       // According to profile, never executed.
6376       uncommon_trap_exact(Deoptimization::Reason_intrinsic,
6377                           Deoptimization::Action_reinterpret);
6378       return true;
6379     }
6380 
6381     // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
6382     // is a number of each value occurrences.
6383     Node* result = argument(0);
6384     if (false_cnt == 0 || true_cnt == 0) {
6385       // According to profile, one value has been never seen.
6386       int expected_val = (false_cnt == 0) ? 1 : 0;
6387 
6388       Node* cmp  = _gvn.transform(new CmpINode(result, intcon(expected_val)));
6389       Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
6390 
6391       IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
6392       Node* fast_path = _gvn.transform(new IfTrueNode(check));
6393       Node* slow_path = _gvn.transform(new IfFalseNode(check));
6394 
6395       { // Slow path: uncommon trap for never seen value and then reexecute
6396         // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
6397         // the value has been seen at least once.
6398         PreserveJVMState pjvms(this);
6399         PreserveReexecuteState preexecs(this);
6400         jvms()->set_should_reexecute(true);
6401 
6402         set_control(slow_path);
6403         set_i_o(i_o());
6404 
6405         uncommon_trap_exact(Deoptimization::Reason_intrinsic,
6406                             Deoptimization::Action_reinterpret);
6407       }
6408       // The guard for never seen value enables sharpening of the result and
6409       // returning a constant. It allows to eliminate branches on the same value
6410       // later on.
6411       set_control(fast_path);
6412       result = intcon(expected_val);
6413     }
6414     // Stop profiling.
6415     // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
6416     // By replacing method body with profile data (represented as ProfileBooleanNode
6417     // on IR level) we effectively disable profiling.
6418     // It enables full speed execution once optimized code is generated.
6419     Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
6420     C->record_for_igvn(profile);
6421     set_result(profile);
6422     return true;
6423   } else {
6424     // Continue profiling.
6425     // Profile data isn't available at the moment. So, execute method's bytecode version.
6426     // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
6427     // is compiled and counters aren't available since corresponding MethodHandle
6428     // isn't a compile-time constant.
6429     return false;
6430   }
6431 }
6432 
6433 bool LibraryCallKit::inline_isCompileConstant() {
6434   Node* n = argument(0);
6435   set_result(n->is_Con() ? intcon(1) : intcon(0));
6436   return true;
6437 }