1 /*
   2  * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_library_call.cpp.incl"
  27 
  28 class LibraryIntrinsic : public InlineCallGenerator {
  29   // Extend the set of intrinsics known to the runtime:
  30  public:
  31  private:
  32   bool             _is_virtual;
  33   vmIntrinsics::ID _intrinsic_id;
  34 
  35  public:
  36   LibraryIntrinsic(ciMethod* m, bool is_virtual, vmIntrinsics::ID id)
  37     : InlineCallGenerator(m),
  38       _is_virtual(is_virtual),
  39       _intrinsic_id(id)
  40   {
  41   }
  42   virtual bool is_intrinsic() const { return true; }
  43   virtual bool is_virtual()   const { return _is_virtual; }
  44   virtual JVMState* generate(JVMState* jvms);
  45   vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
  46 };
  47 
  48 
  49 // Local helper class for LibraryIntrinsic:
  50 class LibraryCallKit : public GraphKit {
  51  private:
  52   LibraryIntrinsic* _intrinsic;   // the library intrinsic being called
  53 
  54  public:
  55   LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic)
  56     : GraphKit(caller),
  57       _intrinsic(intrinsic)
  58   {
  59   }
  60 
  61   ciMethod*         caller()    const    { return jvms()->method(); }
  62   int               bci()       const    { return jvms()->bci(); }
  63   LibraryIntrinsic* intrinsic() const    { return _intrinsic; }
  64   vmIntrinsics::ID  intrinsic_id() const { return _intrinsic->intrinsic_id(); }
  65   ciMethod*         callee()    const    { return _intrinsic->method(); }
  66   ciSignature*      signature() const    { return callee()->signature(); }
  67   int               arg_size()  const    { return callee()->arg_size(); }
  68 
  69   bool try_to_inline();
  70 
  71   // Helper functions to inline natives
  72   void push_result(RegionNode* region, PhiNode* value);
  73   Node* generate_guard(Node* test, RegionNode* region, float true_prob);
  74   Node* generate_slow_guard(Node* test, RegionNode* region);
  75   Node* generate_fair_guard(Node* test, RegionNode* region);
  76   Node* generate_negative_guard(Node* index, RegionNode* region,
  77                                 // resulting CastII of index:
  78                                 Node* *pos_index = NULL);
  79   Node* generate_nonpositive_guard(Node* index, bool never_negative,
  80                                    // resulting CastII of index:
  81                                    Node* *pos_index = NULL);
  82   Node* generate_limit_guard(Node* offset, Node* subseq_length,
  83                              Node* array_length,
  84                              RegionNode* region);
  85   Node* generate_current_thread(Node* &tls_output);
  86   address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
  87                               bool disjoint_bases, const char* &name);
  88   Node* load_mirror_from_klass(Node* klass);
  89   Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
  90                                       int nargs,
  91                                       RegionNode* region, int null_path,
  92                                       int offset);
  93   Node* load_klass_from_mirror(Node* mirror, bool never_see_null, int nargs,
  94                                RegionNode* region, int null_path) {
  95     int offset = java_lang_Class::klass_offset_in_bytes();
  96     return load_klass_from_mirror_common(mirror, never_see_null, nargs,
  97                                          region, null_path,
  98                                          offset);
  99   }
 100   Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
 101                                      int nargs,
 102                                      RegionNode* region, int null_path) {
 103     int offset = java_lang_Class::array_klass_offset_in_bytes();
 104     return load_klass_from_mirror_common(mirror, never_see_null, nargs,
 105                                          region, null_path,
 106                                          offset);
 107   }
 108   Node* generate_access_flags_guard(Node* kls,
 109                                     int modifier_mask, int modifier_bits,
 110                                     RegionNode* region);
 111   Node* generate_interface_guard(Node* kls, RegionNode* region);
 112   Node* generate_array_guard(Node* kls, RegionNode* region) {
 113     return generate_array_guard_common(kls, region, false, false);
 114   }
 115   Node* generate_non_array_guard(Node* kls, RegionNode* region) {
 116     return generate_array_guard_common(kls, region, false, true);
 117   }
 118   Node* generate_objArray_guard(Node* kls, RegionNode* region) {
 119     return generate_array_guard_common(kls, region, true, false);
 120   }
 121   Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
 122     return generate_array_guard_common(kls, region, true, true);
 123   }
 124   Node* generate_array_guard_common(Node* kls, RegionNode* region,
 125                                     bool obj_array, bool not_array);
 126   Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
 127   CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
 128                                      bool is_virtual = false, bool is_static = false);
 129   CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
 130     return generate_method_call(method_id, false, true);
 131   }
 132   CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
 133     return generate_method_call(method_id, true, false);
 134   }
 135 
 136   Node* make_string_method_node(int opcode, Node* str1, Node* cnt1, Node* str2, Node* cnt2);
 137   bool inline_string_compareTo();
 138   bool inline_string_indexOf();
 139   Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
 140   bool inline_string_equals();
 141   Node* pop_math_arg();
 142   bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
 143   bool inline_math_native(vmIntrinsics::ID id);
 144   bool inline_trig(vmIntrinsics::ID id);
 145   bool inline_trans(vmIntrinsics::ID id);
 146   bool inline_abs(vmIntrinsics::ID id);
 147   bool inline_sqrt(vmIntrinsics::ID id);
 148   bool inline_pow(vmIntrinsics::ID id);
 149   bool inline_exp(vmIntrinsics::ID id);
 150   bool inline_min_max(vmIntrinsics::ID id);
 151   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
 152   // This returns Type::AnyPtr, RawPtr, or OopPtr.
 153   int classify_unsafe_addr(Node* &base, Node* &offset);
 154   Node* make_unsafe_address(Node* base, Node* offset);
 155   bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
 156   bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
 157   bool inline_unsafe_allocate();
 158   bool inline_unsafe_copyMemory();
 159   bool inline_native_currentThread();
 160   bool inline_native_time_funcs(bool isNano);
 161   bool inline_native_isInterrupted();
 162   bool inline_native_Class_query(vmIntrinsics::ID id);
 163   bool inline_native_subtype_check();
 164 
 165   bool inline_native_newArray();
 166   bool inline_native_getLength();
 167   bool inline_array_copyOf(bool is_copyOfRange);
 168   bool inline_array_equals();
 169   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
 170   bool inline_native_clone(bool is_virtual);
 171   bool inline_native_Reflection_getCallerClass();
 172   bool inline_native_AtomicLong_get();
 173   bool inline_native_AtomicLong_attemptUpdate();
 174   bool is_method_invoke_or_aux_frame(JVMState* jvms);
 175   // Helper function for inlining native object hash method
 176   bool inline_native_hashcode(bool is_virtual, bool is_static);
 177   bool inline_native_getClass();
 178 
 179   // Helper functions for inlining arraycopy
 180   bool inline_arraycopy();
 181   void generate_arraycopy(const TypePtr* adr_type,
 182                           BasicType basic_elem_type,
 183                           Node* src,  Node* src_offset,
 184                           Node* dest, Node* dest_offset,
 185                           Node* copy_length,
 186                           bool disjoint_bases = false,
 187                           bool length_never_negative = false,
 188                           RegionNode* slow_region = NULL);
 189   AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
 190                                                 RegionNode* slow_region);
 191   void generate_clear_array(const TypePtr* adr_type,
 192                             Node* dest,
 193                             BasicType basic_elem_type,
 194                             Node* slice_off,
 195                             Node* slice_len,
 196                             Node* slice_end);
 197   bool generate_block_arraycopy(const TypePtr* adr_type,
 198                                 BasicType basic_elem_type,
 199                                 AllocateNode* alloc,
 200                                 Node* src,  Node* src_offset,
 201                                 Node* dest, Node* dest_offset,
 202                                 Node* dest_size);
 203   void generate_slow_arraycopy(const TypePtr* adr_type,
 204                                Node* src,  Node* src_offset,
 205                                Node* dest, Node* dest_offset,
 206                                Node* copy_length);
 207   Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
 208                                      Node* dest_elem_klass,
 209                                      Node* src,  Node* src_offset,
 210                                      Node* dest, Node* dest_offset,
 211                                      Node* copy_length);
 212   Node* generate_generic_arraycopy(const TypePtr* adr_type,
 213                                    Node* src,  Node* src_offset,
 214                                    Node* dest, Node* dest_offset,
 215                                    Node* copy_length);
 216   void generate_unchecked_arraycopy(const TypePtr* adr_type,
 217                                     BasicType basic_elem_type,
 218                                     bool disjoint_bases,
 219                                     Node* src,  Node* src_offset,
 220                                     Node* dest, Node* dest_offset,
 221                                     Node* copy_length);
 222   bool inline_unsafe_CAS(BasicType type);
 223   bool inline_unsafe_ordered_store(BasicType type);
 224   bool inline_fp_conversions(vmIntrinsics::ID id);
 225   bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
 226   bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
 227   bool inline_bitCount(vmIntrinsics::ID id);
 228   bool inline_reverseBytes(vmIntrinsics::ID id);
 229 };
 230 
 231 
 232 //---------------------------make_vm_intrinsic----------------------------
 233 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
 234   vmIntrinsics::ID id = m->intrinsic_id();
 235   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 236 
 237   if (DisableIntrinsic[0] != '\0'
 238       && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) {
 239     // disabled by a user request on the command line:
 240     // example: -XX:DisableIntrinsic=_hashCode,_getClass
 241     return NULL;
 242   }
 243 
 244   if (!m->is_loaded()) {
 245     // do not attempt to inline unloaded methods
 246     return NULL;
 247   }
 248 
 249   // Only a few intrinsics implement a virtual dispatch.
 250   // They are expensive calls which are also frequently overridden.
 251   if (is_virtual) {
 252     switch (id) {
 253     case vmIntrinsics::_hashCode:
 254     case vmIntrinsics::_clone:
 255       // OK, Object.hashCode and Object.clone intrinsics come in both flavors
 256       break;
 257     default:
 258       return NULL;
 259     }
 260   }
 261 
 262   // -XX:-InlineNatives disables nearly all intrinsics:
 263   if (!InlineNatives) {
 264     switch (id) {
 265     case vmIntrinsics::_indexOf:
 266     case vmIntrinsics::_compareTo:
 267     case vmIntrinsics::_equals:
 268     case vmIntrinsics::_equalsC:
 269       break;  // InlineNatives does not control String.compareTo
 270     default:
 271       return NULL;
 272     }
 273   }
 274 
 275   switch (id) {
 276   case vmIntrinsics::_compareTo:
 277     if (!SpecialStringCompareTo)  return NULL;
 278     break;
 279   case vmIntrinsics::_indexOf:
 280     if (!SpecialStringIndexOf)  return NULL;
 281     break;
 282   case vmIntrinsics::_equals:
 283     if (!SpecialStringEquals)  return NULL;
 284     break;
 285   case vmIntrinsics::_equalsC:
 286     if (!SpecialArraysEquals)  return NULL;
 287     break;
 288   case vmIntrinsics::_arraycopy:
 289     if (!InlineArrayCopy)  return NULL;
 290     break;
 291   case vmIntrinsics::_copyMemory:
 292     if (StubRoutines::unsafe_arraycopy() == NULL)  return NULL;
 293     if (!InlineArrayCopy)  return NULL;
 294     break;
 295   case vmIntrinsics::_hashCode:
 296     if (!InlineObjectHash)  return NULL;
 297     break;
 298   case vmIntrinsics::_clone:
 299   case vmIntrinsics::_copyOf:
 300   case vmIntrinsics::_copyOfRange:
 301     if (!InlineObjectCopy)  return NULL;
 302     // These also use the arraycopy intrinsic mechanism:
 303     if (!InlineArrayCopy)  return NULL;
 304     break;
 305   case vmIntrinsics::_checkIndex:
 306     // We do not intrinsify this.  The optimizer does fine with it.
 307     return NULL;
 308 
 309   case vmIntrinsics::_get_AtomicLong:
 310   case vmIntrinsics::_attemptUpdate:
 311     if (!InlineAtomicLong)  return NULL;
 312     break;
 313 
 314   case vmIntrinsics::_getCallerClass:
 315     if (!UseNewReflection)  return NULL;
 316     if (!InlineReflectionGetCallerClass)  return NULL;
 317     if (!JDK_Version::is_gte_jdk14x_version())  return NULL;
 318     break;
 319 
 320   case vmIntrinsics::_bitCount_i:
 321   case vmIntrinsics::_bitCount_l:
 322     if (!UsePopCountInstruction)  return NULL;
 323     break;
 324 
 325  default:
 326     assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
 327     assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
 328     break;
 329   }
 330 
 331   // -XX:-InlineClassNatives disables natives from the Class class.
 332   // The flag applies to all reflective calls, notably Array.newArray
 333   // (visible to Java programmers as Array.newInstance).
 334   if (m->holder()->name() == ciSymbol::java_lang_Class() ||
 335       m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
 336     if (!InlineClassNatives)  return NULL;
 337   }
 338 
 339   // -XX:-InlineThreadNatives disables natives from the Thread class.
 340   if (m->holder()->name() == ciSymbol::java_lang_Thread()) {
 341     if (!InlineThreadNatives)  return NULL;
 342   }
 343 
 344   // -XX:-InlineMathNatives disables natives from the Math,Float and Double classes.
 345   if (m->holder()->name() == ciSymbol::java_lang_Math() ||
 346       m->holder()->name() == ciSymbol::java_lang_Float() ||
 347       m->holder()->name() == ciSymbol::java_lang_Double()) {
 348     if (!InlineMathNatives)  return NULL;
 349   }
 350 
 351   // -XX:-InlineUnsafeOps disables natives from the Unsafe class.
 352   if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) {
 353     if (!InlineUnsafeOps)  return NULL;
 354   }
 355 
 356   return new LibraryIntrinsic(m, is_virtual, (vmIntrinsics::ID) id);
 357 }
 358 
 359 //----------------------register_library_intrinsics-----------------------
 360 // Initialize this file's data structures, for each Compile instance.
 361 void Compile::register_library_intrinsics() {
 362   // Nothing to do here.
 363 }
 364 
 365 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
 366   LibraryCallKit kit(jvms, this);
 367   Compile* C = kit.C;
 368   int nodes = C->unique();
 369 #ifndef PRODUCT
 370   if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
 371     char buf[1000];
 372     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
 373     tty->print_cr("Intrinsic %s", str);
 374   }
 375 #endif
 376   if (kit.try_to_inline()) {
 377     if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
 378       tty->print("Inlining intrinsic %s%s at bci:%d in",
 379                  vmIntrinsics::name_at(intrinsic_id()),
 380                  (is_virtual() ? " (virtual)" : ""), kit.bci());
 381       kit.caller()->print_short_name(tty);
 382       tty->print_cr(" (%d bytes)", kit.caller()->code_size());
 383     }
 384     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
 385     if (C->log()) {
 386       C->log()->elem("intrinsic id='%s'%s nodes='%d'",
 387                      vmIntrinsics::name_at(intrinsic_id()),
 388                      (is_virtual() ? " virtual='1'" : ""),
 389                      C->unique() - nodes);
 390     }
 391     return kit.transfer_exceptions_into_jvms();
 392   }
 393 
 394   if (PrintIntrinsics) {
 395     tty->print("Did not inline intrinsic %s%s at bci:%d in",
 396                vmIntrinsics::name_at(intrinsic_id()),
 397                (is_virtual() ? " (virtual)" : ""), kit.bci());
 398     kit.caller()->print_short_name(tty);
 399     tty->print_cr(" (%d bytes)", kit.caller()->code_size());
 400   }
 401   C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
 402   return NULL;
 403 }
 404 
 405 bool LibraryCallKit::try_to_inline() {
 406   // Handle symbolic names for otherwise undistinguished boolean switches:
 407   const bool is_store       = true;
 408   const bool is_native_ptr  = true;
 409   const bool is_static      = true;
 410 
 411   switch (intrinsic_id()) {
 412   case vmIntrinsics::_hashCode:
 413     return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
 414   case vmIntrinsics::_identityHashCode:
 415     return inline_native_hashcode(/*!virtual*/ false, is_static);
 416   case vmIntrinsics::_getClass:
 417     return inline_native_getClass();
 418 
 419   case vmIntrinsics::_dsin:
 420   case vmIntrinsics::_dcos:
 421   case vmIntrinsics::_dtan:
 422   case vmIntrinsics::_dabs:
 423   case vmIntrinsics::_datan2:
 424   case vmIntrinsics::_dsqrt:
 425   case vmIntrinsics::_dexp:
 426   case vmIntrinsics::_dlog:
 427   case vmIntrinsics::_dlog10:
 428   case vmIntrinsics::_dpow:
 429     return inline_math_native(intrinsic_id());
 430 
 431   case vmIntrinsics::_min:
 432   case vmIntrinsics::_max:
 433     return inline_min_max(intrinsic_id());
 434 
 435   case vmIntrinsics::_arraycopy:
 436     return inline_arraycopy();
 437 
 438   case vmIntrinsics::_compareTo:
 439     return inline_string_compareTo();
 440   case vmIntrinsics::_indexOf:
 441     return inline_string_indexOf();
 442   case vmIntrinsics::_equals:
 443     return inline_string_equals();
 444 
 445   case vmIntrinsics::_getObject:
 446     return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, false);
 447   case vmIntrinsics::_getBoolean:
 448     return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, false);
 449   case vmIntrinsics::_getByte:
 450     return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, false);
 451   case vmIntrinsics::_getShort:
 452     return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, false);
 453   case vmIntrinsics::_getChar:
 454     return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, false);
 455   case vmIntrinsics::_getInt:
 456     return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, false);
 457   case vmIntrinsics::_getLong:
 458     return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, false);
 459   case vmIntrinsics::_getFloat:
 460     return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, false);
 461   case vmIntrinsics::_getDouble:
 462     return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, false);
 463 
 464   case vmIntrinsics::_putObject:
 465     return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, false);
 466   case vmIntrinsics::_putBoolean:
 467     return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, false);
 468   case vmIntrinsics::_putByte:
 469     return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, false);
 470   case vmIntrinsics::_putShort:
 471     return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, false);
 472   case vmIntrinsics::_putChar:
 473     return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, false);
 474   case vmIntrinsics::_putInt:
 475     return inline_unsafe_access(!is_native_ptr, is_store, T_INT, false);
 476   case vmIntrinsics::_putLong:
 477     return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, false);
 478   case vmIntrinsics::_putFloat:
 479     return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, false);
 480   case vmIntrinsics::_putDouble:
 481     return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, false);
 482 
 483   case vmIntrinsics::_getByte_raw:
 484     return inline_unsafe_access(is_native_ptr, !is_store, T_BYTE, false);
 485   case vmIntrinsics::_getShort_raw:
 486     return inline_unsafe_access(is_native_ptr, !is_store, T_SHORT, false);
 487   case vmIntrinsics::_getChar_raw:
 488     return inline_unsafe_access(is_native_ptr, !is_store, T_CHAR, false);
 489   case vmIntrinsics::_getInt_raw:
 490     return inline_unsafe_access(is_native_ptr, !is_store, T_INT, false);
 491   case vmIntrinsics::_getLong_raw:
 492     return inline_unsafe_access(is_native_ptr, !is_store, T_LONG, false);
 493   case vmIntrinsics::_getFloat_raw:
 494     return inline_unsafe_access(is_native_ptr, !is_store, T_FLOAT, false);
 495   case vmIntrinsics::_getDouble_raw:
 496     return inline_unsafe_access(is_native_ptr, !is_store, T_DOUBLE, false);
 497   case vmIntrinsics::_getAddress_raw:
 498     return inline_unsafe_access(is_native_ptr, !is_store, T_ADDRESS, false);
 499 
 500   case vmIntrinsics::_putByte_raw:
 501     return inline_unsafe_access(is_native_ptr, is_store, T_BYTE, false);
 502   case vmIntrinsics::_putShort_raw:
 503     return inline_unsafe_access(is_native_ptr, is_store, T_SHORT, false);
 504   case vmIntrinsics::_putChar_raw:
 505     return inline_unsafe_access(is_native_ptr, is_store, T_CHAR, false);
 506   case vmIntrinsics::_putInt_raw:
 507     return inline_unsafe_access(is_native_ptr, is_store, T_INT, false);
 508   case vmIntrinsics::_putLong_raw:
 509     return inline_unsafe_access(is_native_ptr, is_store, T_LONG, false);
 510   case vmIntrinsics::_putFloat_raw:
 511     return inline_unsafe_access(is_native_ptr, is_store, T_FLOAT, false);
 512   case vmIntrinsics::_putDouble_raw:
 513     return inline_unsafe_access(is_native_ptr, is_store, T_DOUBLE, false);
 514   case vmIntrinsics::_putAddress_raw:
 515     return inline_unsafe_access(is_native_ptr, is_store, T_ADDRESS, false);
 516 
 517   case vmIntrinsics::_getObjectVolatile:
 518     return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, true);
 519   case vmIntrinsics::_getBooleanVolatile:
 520     return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, true);
 521   case vmIntrinsics::_getByteVolatile:
 522     return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, true);
 523   case vmIntrinsics::_getShortVolatile:
 524     return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, true);
 525   case vmIntrinsics::_getCharVolatile:
 526     return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, true);
 527   case vmIntrinsics::_getIntVolatile:
 528     return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, true);
 529   case vmIntrinsics::_getLongVolatile:
 530     return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, true);
 531   case vmIntrinsics::_getFloatVolatile:
 532     return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, true);
 533   case vmIntrinsics::_getDoubleVolatile:
 534     return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, true);
 535 
 536   case vmIntrinsics::_putObjectVolatile:
 537     return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, true);
 538   case vmIntrinsics::_putBooleanVolatile:
 539     return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, true);
 540   case vmIntrinsics::_putByteVolatile:
 541     return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, true);
 542   case vmIntrinsics::_putShortVolatile:
 543     return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, true);
 544   case vmIntrinsics::_putCharVolatile:
 545     return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true);
 546   case vmIntrinsics::_putIntVolatile:
 547     return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true);
 548   case vmIntrinsics::_putLongVolatile:
 549     return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true);
 550   case vmIntrinsics::_putFloatVolatile:
 551     return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true);
 552   case vmIntrinsics::_putDoubleVolatile:
 553     return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true);
 554 
 555   case vmIntrinsics::_prefetchRead:
 556     return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
 557   case vmIntrinsics::_prefetchWrite:
 558     return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
 559   case vmIntrinsics::_prefetchReadStatic:
 560     return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
 561   case vmIntrinsics::_prefetchWriteStatic:
 562     return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
 563 
 564   case vmIntrinsics::_compareAndSwapObject:
 565     return inline_unsafe_CAS(T_OBJECT);
 566   case vmIntrinsics::_compareAndSwapInt:
 567     return inline_unsafe_CAS(T_INT);
 568   case vmIntrinsics::_compareAndSwapLong:
 569     return inline_unsafe_CAS(T_LONG);
 570 
 571   case vmIntrinsics::_putOrderedObject:
 572     return inline_unsafe_ordered_store(T_OBJECT);
 573   case vmIntrinsics::_putOrderedInt:
 574     return inline_unsafe_ordered_store(T_INT);
 575   case vmIntrinsics::_putOrderedLong:
 576     return inline_unsafe_ordered_store(T_LONG);
 577 
 578   case vmIntrinsics::_currentThread:
 579     return inline_native_currentThread();
 580   case vmIntrinsics::_isInterrupted:
 581     return inline_native_isInterrupted();
 582 
 583   case vmIntrinsics::_currentTimeMillis:
 584     return inline_native_time_funcs(false);
 585   case vmIntrinsics::_nanoTime:
 586     return inline_native_time_funcs(true);
 587   case vmIntrinsics::_allocateInstance:
 588     return inline_unsafe_allocate();
 589   case vmIntrinsics::_copyMemory:
 590     return inline_unsafe_copyMemory();
 591   case vmIntrinsics::_newArray:
 592     return inline_native_newArray();
 593   case vmIntrinsics::_getLength:
 594     return inline_native_getLength();
 595   case vmIntrinsics::_copyOf:
 596     return inline_array_copyOf(false);
 597   case vmIntrinsics::_copyOfRange:
 598     return inline_array_copyOf(true);
 599   case vmIntrinsics::_equalsC:
 600     return inline_array_equals();
 601   case vmIntrinsics::_clone:
 602     return inline_native_clone(intrinsic()->is_virtual());
 603 
 604   case vmIntrinsics::_isAssignableFrom:
 605     return inline_native_subtype_check();
 606 
 607   case vmIntrinsics::_isInstance:
 608   case vmIntrinsics::_getModifiers:
 609   case vmIntrinsics::_isInterface:
 610   case vmIntrinsics::_isArray:
 611   case vmIntrinsics::_isPrimitive:
 612   case vmIntrinsics::_getSuperclass:
 613   case vmIntrinsics::_getComponentType:
 614   case vmIntrinsics::_getClassAccessFlags:
 615     return inline_native_Class_query(intrinsic_id());
 616 
 617   case vmIntrinsics::_floatToRawIntBits:
 618   case vmIntrinsics::_floatToIntBits:
 619   case vmIntrinsics::_intBitsToFloat:
 620   case vmIntrinsics::_doubleToRawLongBits:
 621   case vmIntrinsics::_doubleToLongBits:
 622   case vmIntrinsics::_longBitsToDouble:
 623     return inline_fp_conversions(intrinsic_id());
 624 
 625   case vmIntrinsics::_numberOfLeadingZeros_i:
 626   case vmIntrinsics::_numberOfLeadingZeros_l:
 627     return inline_numberOfLeadingZeros(intrinsic_id());
 628 
 629   case vmIntrinsics::_numberOfTrailingZeros_i:
 630   case vmIntrinsics::_numberOfTrailingZeros_l:
 631     return inline_numberOfTrailingZeros(intrinsic_id());
 632 
 633   case vmIntrinsics::_bitCount_i:
 634   case vmIntrinsics::_bitCount_l:
 635     return inline_bitCount(intrinsic_id());
 636 
 637   case vmIntrinsics::_reverseBytes_i:
 638   case vmIntrinsics::_reverseBytes_l:
 639     return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
 640 
 641   case vmIntrinsics::_get_AtomicLong:
 642     return inline_native_AtomicLong_get();
 643   case vmIntrinsics::_attemptUpdate:
 644     return inline_native_AtomicLong_attemptUpdate();
 645 
 646   case vmIntrinsics::_getCallerClass:
 647     return inline_native_Reflection_getCallerClass();
 648 
 649   default:
 650     // If you get here, it may be that someone has added a new intrinsic
 651     // to the list in vmSymbols.hpp without implementing it here.
 652 #ifndef PRODUCT
 653     if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
 654       tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
 655                     vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
 656     }
 657 #endif
 658     return false;
 659   }
 660 }
 661 
 662 //------------------------------push_result------------------------------
 663 // Helper function for finishing intrinsics.
 664 void LibraryCallKit::push_result(RegionNode* region, PhiNode* value) {
 665   record_for_igvn(region);
 666   set_control(_gvn.transform(region));
 667   BasicType value_type = value->type()->basic_type();
 668   push_node(value_type, _gvn.transform(value));
 669 }
 670 
 671 //------------------------------generate_guard---------------------------
 672 // Helper function for generating guarded fast-slow graph structures.
 673 // The given 'test', if true, guards a slow path.  If the test fails
 674 // then a fast path can be taken.  (We generally hope it fails.)
 675 // In all cases, GraphKit::control() is updated to the fast path.
 676 // The returned value represents the control for the slow path.
 677 // The return value is never 'top'; it is either a valid control
 678 // or NULL if it is obvious that the slow path can never be taken.
 679 // Also, if region and the slow control are not NULL, the slow edge
 680 // is appended to the region.
 681 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
 682   if (stopped()) {
 683     // Already short circuited.
 684     return NULL;
 685   }
 686 
 687   // Build an if node and its projections.
 688   // If test is true we take the slow path, which we assume is uncommon.
 689   if (_gvn.type(test) == TypeInt::ZERO) {
 690     // The slow branch is never taken.  No need to build this guard.
 691     return NULL;
 692   }
 693 
 694   IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
 695 
 696   Node* if_slow = _gvn.transform( new (C, 1) IfTrueNode(iff) );
 697   if (if_slow == top()) {
 698     // The slow branch is never taken.  No need to build this guard.
 699     return NULL;
 700   }
 701 
 702   if (region != NULL)
 703     region->add_req(if_slow);
 704 
 705   Node* if_fast = _gvn.transform( new (C, 1) IfFalseNode(iff) );
 706   set_control(if_fast);
 707 
 708   return if_slow;
 709 }
 710 
 711 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
 712   return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
 713 }
 714 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
 715   return generate_guard(test, region, PROB_FAIR);
 716 }
 717 
 718 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
 719                                                      Node* *pos_index) {
 720   if (stopped())
 721     return NULL;                // already stopped
 722   if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
 723     return NULL;                // index is already adequately typed
 724   Node* cmp_lt = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) );
 725   Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) );
 726   Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
 727   if (is_neg != NULL && pos_index != NULL) {
 728     // Emulate effect of Parse::adjust_map_after_if.
 729     Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS);
 730     ccast->set_req(0, control());
 731     (*pos_index) = _gvn.transform(ccast);
 732   }
 733   return is_neg;
 734 }
 735 
 736 inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative,
 737                                                         Node* *pos_index) {
 738   if (stopped())
 739     return NULL;                // already stopped
 740   if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
 741     return NULL;                // index is already adequately typed
 742   Node* cmp_le = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) );
 743   BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
 744   Node* bol_le = _gvn.transform( new (C, 2) BoolNode(cmp_le, le_or_eq) );
 745   Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
 746   if (is_notp != NULL && pos_index != NULL) {
 747     // Emulate effect of Parse::adjust_map_after_if.
 748     Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS1);
 749     ccast->set_req(0, control());
 750     (*pos_index) = _gvn.transform(ccast);
 751   }
 752   return is_notp;
 753 }
 754 
 755 // Make sure that 'position' is a valid limit index, in [0..length].
 756 // There are two equivalent plans for checking this:
 757 //   A. (offset + copyLength)  unsigned<=  arrayLength
 758 //   B. offset  <=  (arrayLength - copyLength)
 759 // We require that all of the values above, except for the sum and
 760 // difference, are already known to be non-negative.
 761 // Plan A is robust in the face of overflow, if offset and copyLength
 762 // are both hugely positive.
 763 //
 764 // Plan B is less direct and intuitive, but it does not overflow at
 765 // all, since the difference of two non-negatives is always
 766 // representable.  Whenever Java methods must perform the equivalent
 767 // check they generally use Plan B instead of Plan A.
 768 // For the moment we use Plan A.
 769 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
 770                                                   Node* subseq_length,
 771                                                   Node* array_length,
 772                                                   RegionNode* region) {
 773   if (stopped())
 774     return NULL;                // already stopped
 775   bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
 776   if (zero_offset && _gvn.eqv_uncast(subseq_length, array_length))
 777     return NULL;                // common case of whole-array copy
 778   Node* last = subseq_length;
 779   if (!zero_offset)             // last += offset
 780     last = _gvn.transform( new (C, 3) AddINode(last, offset));
 781   Node* cmp_lt = _gvn.transform( new (C, 3) CmpUNode(array_length, last) );
 782   Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) );
 783   Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
 784   return is_over;
 785 }
 786 
 787 
 788 //--------------------------generate_current_thread--------------------
 789 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
 790   ciKlass*    thread_klass = env()->Thread_klass();
 791   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
 792   Node* thread = _gvn.transform(new (C, 1) ThreadLocalNode());
 793   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
 794   Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT);
 795   tls_output = thread;
 796   return threadObj;
 797 }
 798 
 799 
 800 //------------------------------make_string_method_node------------------------
 801 // Helper method for String intrinsic finctions.
 802 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* cnt1, Node* str2, Node* cnt2) {
 803   const int value_offset  = java_lang_String::value_offset_in_bytes();
 804   const int count_offset  = java_lang_String::count_offset_in_bytes();
 805   const int offset_offset = java_lang_String::offset_offset_in_bytes();
 806 
 807   Node* no_ctrl = NULL;
 808 
 809   ciInstanceKlass* klass = env()->String_klass();
 810   const TypeInstPtr* string_type =
 811         TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
 812 
 813   const TypeAryPtr* value_type = 
 814         TypeAryPtr::make(TypePtr::NotNull,
 815                          TypeAry::make(TypeInt::CHAR,TypeInt::POS),
 816                          ciTypeArrayKlass::make(T_CHAR), true, 0);
 817 
 818   // Get start addr of string and substring
 819   Node* str1_valuea  = basic_plus_adr(str1, str1, value_offset);
 820   Node* str1_value   = make_load(no_ctrl, str1_valuea, value_type, T_OBJECT, string_type->add_offset(value_offset));
 821   Node* str1_offseta = basic_plus_adr(str1, str1, offset_offset);
 822   Node* str1_offset  = make_load(no_ctrl, str1_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
 823   Node* str1_start   = array_element_address(str1_value, str1_offset, T_CHAR);
 824 
 825   // Pin loads from String::equals() argument since it could be NULL.
 826   Node* str2_ctrl = (opcode == Op_StrEquals) ? control() : no_ctrl;
 827   Node* str2_valuea  = basic_plus_adr(str2, str2, value_offset);
 828   Node* str2_value   = make_load(str2_ctrl, str2_valuea, value_type, T_OBJECT, string_type->add_offset(value_offset));
 829   Node* str2_offseta = basic_plus_adr(str2, str2, offset_offset);
 830   Node* str2_offset  = make_load(str2_ctrl, str2_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
 831   Node* str2_start   = array_element_address(str2_value, str2_offset, T_CHAR);
 832 
 833   Node* result = NULL;
 834   switch (opcode) {
 835   case Op_StrIndexOf:
 836     result = new (C, 6) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
 837                                        str1_start, cnt1, str2_start, cnt2);
 838     break;
 839   case Op_StrComp:
 840     result = new (C, 6) StrCompNode(control(), memory(TypeAryPtr::CHARS),
 841                                     str1_start, cnt1, str2_start, cnt2);
 842     break;
 843   case Op_StrEquals:
 844     result = new (C, 5) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
 845                                       str1_start, str2_start, cnt1);
 846     break;
 847   default:
 848     ShouldNotReachHere();
 849     return NULL;
 850   }
 851 
 852   // All these intrinsics have checks.
 853   C->set_has_split_ifs(true); // Has chance for split-if optimization
 854 
 855   return _gvn.transform(result);
 856 }
 857 
 858 //------------------------------inline_string_compareTo------------------------
 859 bool LibraryCallKit::inline_string_compareTo() {
 860 
 861   if (!Matcher::has_match_rule(Op_StrComp)) return false;
 862 
 863   const int value_offset = java_lang_String::value_offset_in_bytes();
 864   const int count_offset = java_lang_String::count_offset_in_bytes();
 865   const int offset_offset = java_lang_String::offset_offset_in_bytes();
 866 
 867   _sp += 2;
 868   Node *argument = pop();  // pop non-receiver first:  it was pushed second
 869   Node *receiver = pop();
 870 
 871   // Null check on self without removing any arguments.  The argument
 872   // null check technically happens in the wrong place, which can lead to
 873   // invalid stack traces when string compare is inlined into a method
 874   // which handles NullPointerExceptions.
 875   _sp += 2;
 876   receiver = do_null_check(receiver, T_OBJECT);
 877   argument = do_null_check(argument, T_OBJECT);
 878   _sp -= 2;
 879   if (stopped()) {
 880     return true;
 881   }
 882 
 883   ciInstanceKlass* klass = env()->String_klass();
 884   const TypeInstPtr* string_type =
 885     TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
 886   Node* no_ctrl = NULL;
 887 
 888   // Get counts for string and argument
 889   Node* receiver_cnta = basic_plus_adr(receiver, receiver, count_offset);
 890   Node* receiver_cnt  = make_load(no_ctrl, receiver_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
 891 
 892   Node* argument_cnta = basic_plus_adr(argument, argument, count_offset);
 893   Node* argument_cnt  = make_load(no_ctrl, argument_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
 894 
 895   Node* compare = make_string_method_node(Op_StrComp, receiver, receiver_cnt, argument, argument_cnt);
 896   push(compare);
 897   return true;
 898 }
 899 
 900 //------------------------------inline_string_equals------------------------
 901 bool LibraryCallKit::inline_string_equals() {
 902 
 903   if (!Matcher::has_match_rule(Op_StrEquals)) return false;
 904 
 905   const int value_offset = java_lang_String::value_offset_in_bytes();
 906   const int count_offset = java_lang_String::count_offset_in_bytes();
 907   const int offset_offset = java_lang_String::offset_offset_in_bytes();
 908 
 909   _sp += 2;
 910   Node* argument = pop();  // pop non-receiver first:  it was pushed second
 911   Node* receiver = pop();
 912 
 913   // Null check on self without removing any arguments.  The argument
 914   // null check technically happens in the wrong place, which can lead to
 915   // invalid stack traces when string compare is inlined into a method
 916   // which handles NullPointerExceptions.
 917   _sp += 2;
 918   receiver = do_null_check(receiver, T_OBJECT);
 919   //should not do null check for argument for String.equals(), because spec
 920   //allows to specify NULL as argument.
 921   _sp -= 2;
 922 
 923   if (stopped()) {
 924     return true;
 925   }
 926 
 927   // paths (plus control) merge
 928   RegionNode* region = new (C, 5) RegionNode(5);
 929   Node* phi = new (C, 5) PhiNode(region, TypeInt::BOOL);
 930 
 931   // does source == target string?
 932   Node* cmp = _gvn.transform(new (C, 3) CmpPNode(receiver, argument));
 933   Node* bol = _gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq));
 934 
 935   Node* if_eq = generate_slow_guard(bol, NULL);
 936   if (if_eq != NULL) {
 937     // receiver == argument
 938     phi->init_req(2, intcon(1));
 939     region->init_req(2, if_eq);
 940   }
 941 
 942   // get String klass for instanceOf
 943   ciInstanceKlass* klass = env()->String_klass();
 944 
 945   if (!stopped()) {
 946     Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
 947     Node* cmp  = _gvn.transform(new (C, 3) CmpINode(inst, intcon(1)));
 948     Node* bol  = _gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::ne));
 949 
 950     Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
 951     //instanceOf == true, fallthrough
 952 
 953     if (inst_false != NULL) {
 954       phi->init_req(3, intcon(0));
 955       region->init_req(3, inst_false);
 956     }
 957   }
 958 
 959   const TypeInstPtr* string_type =
 960     TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
 961 
 962   Node* no_ctrl = NULL;
 963   Node* receiver_cnt;
 964   Node* argument_cnt;
 965 
 966   if (!stopped()) {
 967     // Get counts for string and argument
 968     Node* receiver_cnta = basic_plus_adr(receiver, receiver, count_offset);
 969     receiver_cnt  = make_load(no_ctrl, receiver_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
 970 
 971     // Pin load from argument string since it could be NULL.
 972     Node* argument_cnta = basic_plus_adr(argument, argument, count_offset);
 973     argument_cnt  = make_load(control(), argument_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
 974 
 975     // Check for receiver count != argument count
 976     Node* cmp = _gvn.transform( new(C, 3) CmpINode(receiver_cnt, argument_cnt) );
 977     Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::ne) );
 978     Node* if_ne = generate_slow_guard(bol, NULL);
 979     if (if_ne != NULL) {
 980       phi->init_req(4, intcon(0));
 981       region->init_req(4, if_ne);
 982     }
 983   }
 984 
 985   // Check for count == 0 is done by mach node StrEquals.
 986 
 987   if (!stopped()) {
 988     Node* equals = make_string_method_node(Op_StrEquals, receiver, receiver_cnt, argument, argument_cnt);
 989     phi->init_req(1, equals);
 990     region->init_req(1, control());
 991   }
 992 
 993   // post merge
 994   set_control(_gvn.transform(region));
 995   record_for_igvn(region);
 996 
 997   push(_gvn.transform(phi));
 998 
 999   return true;
1000 }
1001 
1002 //------------------------------inline_array_equals----------------------------
1003 bool LibraryCallKit::inline_array_equals() {
1004 
1005   if (!Matcher::has_match_rule(Op_AryEq)) return false;
1006 
1007   _sp += 2;
1008   Node *argument2 = pop();
1009   Node *argument1 = pop();
1010 
1011   Node* equals =
1012     _gvn.transform(new (C, 4) AryEqNode(control(), memory(TypeAryPtr::CHARS),
1013                                         argument1, argument2) );
1014   push(equals);
1015   return true;
1016 }
1017 
1018 // Java version of String.indexOf(constant string)
1019 // class StringDecl {
1020 //   StringDecl(char[] ca) {
1021 //     offset = 0;
1022 //     count = ca.length;
1023 //     value = ca;
1024 //   }
1025 //   int offset;
1026 //   int count;
1027 //   char[] value;
1028 // }
1029 //
1030 // static int string_indexOf_J(StringDecl string_object, char[] target_object,
1031 //                             int targetOffset, int cache_i, int md2) {
1032 //   int cache = cache_i;
1033 //   int sourceOffset = string_object.offset;
1034 //   int sourceCount = string_object.count;
1035 //   int targetCount = target_object.length;
1036 //
1037 //   int targetCountLess1 = targetCount - 1;
1038 //   int sourceEnd = sourceOffset + sourceCount - targetCountLess1;
1039 //
1040 //   char[] source = string_object.value;
1041 //   char[] target = target_object;
1042 //   int lastChar = target[targetCountLess1];
1043 //
1044 //  outer_loop:
1045 //   for (int i = sourceOffset; i < sourceEnd; ) {
1046 //     int src = source[i + targetCountLess1];
1047 //     if (src == lastChar) {
1048 //       // With random strings and a 4-character alphabet,
1049 //       // reverse matching at this point sets up 0.8% fewer
1050 //       // frames, but (paradoxically) makes 0.3% more probes.
1051 //       // Since those probes are nearer the lastChar probe,
1052 //       // there is may be a net D$ win with reverse matching.
1053 //       // But, reversing loop inhibits unroll of inner loop
1054 //       // for unknown reason.  So, does running outer loop from
1055 //       // (sourceOffset - targetCountLess1) to (sourceOffset + sourceCount)
1056 //       for (int j = 0; j < targetCountLess1; j++) {
1057 //         if (target[targetOffset + j] != source[i+j]) {
1058 //           if ((cache & (1 << source[i+j])) == 0) {
1059 //             if (md2 < j+1) {
1060 //               i += j+1;
1061 //               continue outer_loop;
1062 //             }
1063 //           }
1064 //           i += md2;
1065 //           continue outer_loop;
1066 //         }
1067 //       }
1068 //       return i - sourceOffset;
1069 //     }
1070 //     if ((cache & (1 << src)) == 0) {
1071 //       i += targetCountLess1;
1072 //     } // using "i += targetCount;" and an "else i++;" causes a jump to jump.
1073 //     i++;
1074 //   }
1075 //   return -1;
1076 // }
1077 
1078 //------------------------------string_indexOf------------------------
1079 Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i,
1080                                      jint cache_i, jint md2_i) {
1081 
1082   Node* no_ctrl  = NULL;
1083   float likely   = PROB_LIKELY(0.9);
1084   float unlikely = PROB_UNLIKELY(0.9);
1085 
1086   const int value_offset  = java_lang_String::value_offset_in_bytes();
1087   const int count_offset  = java_lang_String::count_offset_in_bytes();
1088   const int offset_offset = java_lang_String::offset_offset_in_bytes();
1089 
1090   ciInstanceKlass* klass = env()->String_klass();
1091   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
1092   const TypeAryPtr*  source_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::CHAR,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, 0);
1093 
1094   Node* sourceOffseta = basic_plus_adr(string_object, string_object, offset_offset);
1095   Node* sourceOffset  = make_load(no_ctrl, sourceOffseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
1096   Node* sourceCounta  = basic_plus_adr(string_object, string_object, count_offset);
1097   Node* sourceCount   = make_load(no_ctrl, sourceCounta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
1098   Node* sourcea       = basic_plus_adr(string_object, string_object, value_offset);
1099   Node* source        = make_load(no_ctrl, sourcea, source_type, T_OBJECT, string_type->add_offset(value_offset));
1100 
1101   Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array)) );
1102   jint target_length = target_array->length();
1103   const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1104   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1105 
1106   IdealKit kit(gvn(), control(), merged_memory(), false, true);
1107 #define __ kit.
1108   Node* zero             = __ ConI(0);
1109   Node* one              = __ ConI(1);
1110   Node* cache            = __ ConI(cache_i);
1111   Node* md2              = __ ConI(md2_i);
1112   Node* lastChar         = __ ConI(target_array->char_at(target_length - 1));
1113   Node* targetCount      = __ ConI(target_length);
1114   Node* targetCountLess1 = __ ConI(target_length - 1);
1115   Node* targetOffset     = __ ConI(targetOffset_i);
1116   Node* sourceEnd        = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
1117 
1118   IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
1119   Node* outer_loop = __ make_label(2 /* goto */);
1120   Node* return_    = __ make_label(1);
1121 
1122   __ set(rtn,__ ConI(-1));
1123   __ loop(i, sourceOffset, BoolTest::lt, sourceEnd); {
1124        Node* i2  = __ AddI(__ value(i), targetCountLess1);
1125        // pin to prohibit loading of "next iteration" value which may SEGV (rare)
1126        Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS);
1127        __ if_then(src, BoolTest::eq, lastChar, unlikely); {
1128          __ loop(j, zero, BoolTest::lt, targetCountLess1); {
1129               Node* tpj = __ AddI(targetOffset, __ value(j));
1130               Node* targ = load_array_element(no_ctrl, target, tpj, target_type);
1131               Node* ipj  = __ AddI(__ value(i), __ value(j));
1132               Node* src2 = load_array_element(no_ctrl, source, ipj, TypeAryPtr::CHARS);
1133               __ if_then(targ, BoolTest::ne, src2); {
1134                 __ if_then(__ AndI(cache, __ LShiftI(one, src2)), BoolTest::eq, zero); {
1135                   __ if_then(md2, BoolTest::lt, __ AddI(__ value(j), one)); {
1136                     __ increment(i, __ AddI(__ value(j), one));
1137                     __ goto_(outer_loop);
1138                   } __ end_if(); __ dead(j);
1139                 }__ end_if(); __ dead(j);
1140                 __ increment(i, md2);
1141                 __ goto_(outer_loop);
1142               }__ end_if();
1143               __ increment(j, one);
1144          }__ end_loop(); __ dead(j);
1145          __ set(rtn, __ SubI(__ value(i), sourceOffset)); __ dead(i);
1146          __ goto_(return_);
1147        }__ end_if();
1148        __ if_then(__ AndI(cache, __ LShiftI(one, src)), BoolTest::eq, zero, likely); {
1149          __ increment(i, targetCountLess1);
1150        }__ end_if();
1151        __ increment(i, one);
1152        __ bind(outer_loop);
1153   }__ end_loop(); __ dead(i);
1154   __ bind(return_);
1155 
1156   // Final sync IdealKit and GraphKit.
1157   sync_kit(kit);
1158   Node* result = __ value(rtn);
1159 #undef __
1160   C->set_has_loops(true);
1161   return result;
1162 }
1163 
1164 //------------------------------inline_string_indexOf------------------------
1165 bool LibraryCallKit::inline_string_indexOf() {
1166 
1167   const int value_offset  = java_lang_String::value_offset_in_bytes();
1168   const int count_offset  = java_lang_String::count_offset_in_bytes();
1169   const int offset_offset = java_lang_String::offset_offset_in_bytes();
1170 
1171   _sp += 2;
1172   Node *argument = pop();  // pop non-receiver first:  it was pushed second
1173   Node *receiver = pop();
1174 
1175   Node* result;
1176   if (Matcher::has_match_rule(Op_StrIndexOf) &&
1177       UseSSE42Intrinsics) {
1178     // Generate SSE4.2 version of indexOf
1179     // We currently only have match rules that use SSE4.2
1180 
1181     // Null check on self without removing any arguments.  The argument
1182     // null check technically happens in the wrong place, which can lead to
1183     // invalid stack traces when string compare is inlined into a method
1184     // which handles NullPointerExceptions.
1185     _sp += 2;
1186     receiver = do_null_check(receiver, T_OBJECT);
1187     argument = do_null_check(argument, T_OBJECT);
1188     _sp -= 2;
1189 
1190     if (stopped()) {
1191       return true;
1192     }
1193 
1194     // Make the merge point
1195     RegionNode* result_rgn = new (C, 3) RegionNode(3);
1196     Node*       result_phi = new (C, 3) PhiNode(result_rgn, TypeInt::INT);
1197     Node* no_ctrl  = NULL;
1198 
1199     ciInstanceKlass* klass = env()->String_klass();
1200     const TypeInstPtr* string_type =
1201       TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
1202 
1203     // Get counts for string and substr
1204     Node* source_cnta = basic_plus_adr(receiver, receiver, count_offset);
1205     Node* source_cnt  = make_load(no_ctrl, source_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
1206 
1207     Node* substr_cnta = basic_plus_adr(argument, argument, count_offset);
1208     Node* substr_cnt  = make_load(no_ctrl, substr_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
1209 
1210     // Check for substr count > string count
1211     Node* cmp = _gvn.transform( new(C, 3) CmpINode(substr_cnt, source_cnt) );
1212     Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::gt) );
1213     Node* if_gt = generate_slow_guard(bol, NULL);
1214     if (if_gt != NULL) {
1215       result_phi->init_req(2, intcon(-1));
1216       result_rgn->init_req(2, if_gt);
1217     }
1218 
1219     if (!stopped()) {
1220       result = make_string_method_node(Op_StrIndexOf, receiver, source_cnt, argument, substr_cnt);
1221       result_phi->init_req(1, result);
1222       result_rgn->init_req(1, control());
1223     }
1224     set_control(_gvn.transform(result_rgn));
1225     record_for_igvn(result_rgn);
1226     result = _gvn.transform(result_phi);
1227     
1228   } else { //Use LibraryCallKit::string_indexOf
1229     // don't intrinsify is argument isn't a constant string.
1230     if (!argument->is_Con()) {
1231      return false;
1232     }
1233     const TypeOopPtr* str_type = _gvn.type(argument)->isa_oopptr();
1234     if (str_type == NULL) {
1235       return false;
1236     }
1237     ciInstanceKlass* klass = env()->String_klass();
1238     ciObject* str_const = str_type->const_oop();
1239     if (str_const == NULL || str_const->klass() != klass) {
1240       return false;
1241     }
1242     ciInstance* str = str_const->as_instance();
1243     assert(str != NULL, "must be instance");
1244 
1245     ciObject* v = str->field_value_by_offset(value_offset).as_object();
1246     int       o = str->field_value_by_offset(offset_offset).as_int();
1247     int       c = str->field_value_by_offset(count_offset).as_int();
1248     ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array
1249 
1250     // constant strings have no offset and count == length which
1251     // simplifies the resulting code somewhat so lets optimize for that.
1252     if (o != 0 || c != pat->length()) {
1253      return false;
1254     }
1255 
1256     // Null check on self without removing any arguments.  The argument
1257     // null check technically happens in the wrong place, which can lead to
1258     // invalid stack traces when string compare is inlined into a method
1259     // which handles NullPointerExceptions.
1260     _sp += 2;
1261     receiver = do_null_check(receiver, T_OBJECT);
1262     // No null check on the argument is needed since it's a constant String oop.
1263     _sp -= 2;
1264     if (stopped()) {
1265      return true;
1266     }
1267 
1268     // The null string as a pattern always returns 0 (match at beginning of string)
1269     if (c == 0) {
1270       push(intcon(0));
1271       return true;
1272     }
1273 
1274     // Generate default indexOf
1275     jchar lastChar = pat->char_at(o + (c - 1));
1276     int cache = 0;
1277     int i;
1278     for (i = 0; i < c - 1; i++) {
1279       assert(i < pat->length(), "out of range");
1280       cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1)));
1281     }
1282 
1283     int md2 = c;
1284     for (i = 0; i < c - 1; i++) {
1285       assert(i < pat->length(), "out of range");
1286       if (pat->char_at(o + i) == lastChar) {
1287         md2 = (c - 1) - i;
1288       }
1289     }
1290 
1291     result = string_indexOf(receiver, pat, o, cache, md2);
1292   }
1293 
1294   push(result);
1295   return true;
1296 }
1297 
1298 //--------------------------pop_math_arg--------------------------------
1299 // Pop a double argument to a math function from the stack
1300 // rounding it if necessary.
1301 Node * LibraryCallKit::pop_math_arg() {
1302   Node *arg = pop_pair();
1303   if( Matcher::strict_fp_requires_explicit_rounding && UseSSE<=1 )
1304     arg = _gvn.transform( new (C, 2) RoundDoubleNode(0, arg) );
1305   return arg;
1306 }
1307 
1308 //------------------------------inline_trig----------------------------------
1309 // Inline sin/cos/tan instructions, if possible.  If rounding is required, do
1310 // argument reduction which will turn into a fast/slow diamond.
1311 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
1312   _sp += arg_size();            // restore stack pointer
1313   Node* arg = pop_math_arg();
1314   Node* trig = NULL;
1315 
1316   switch (id) {
1317   case vmIntrinsics::_dsin:
1318     trig = _gvn.transform((Node*)new (C, 2) SinDNode(arg));
1319     break;
1320   case vmIntrinsics::_dcos:
1321     trig = _gvn.transform((Node*)new (C, 2) CosDNode(arg));
1322     break;
1323   case vmIntrinsics::_dtan:
1324     trig = _gvn.transform((Node*)new (C, 2) TanDNode(arg));
1325     break;
1326   default:
1327     assert(false, "bad intrinsic was passed in");
1328     return false;
1329   }
1330 
1331   // Rounding required?  Check for argument reduction!
1332   if( Matcher::strict_fp_requires_explicit_rounding ) {
1333 
1334     static const double     pi_4 =  0.7853981633974483;
1335     static const double neg_pi_4 = -0.7853981633974483;
1336     // pi/2 in 80-bit extended precision
1337     // static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00};
1338     // -pi/2 in 80-bit extended precision
1339     // static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00};
1340     // Cutoff value for using this argument reduction technique
1341     //static const double    pi_2_minus_epsilon =  1.564660403643354;
1342     //static const double neg_pi_2_plus_epsilon = -1.564660403643354;
1343 
1344     // Pseudocode for sin:
1345     // if (x <= Math.PI / 4.0) {
1346     //   if (x >= -Math.PI / 4.0) return  fsin(x);
1347     //   if (x >= -Math.PI / 2.0) return -fcos(x + Math.PI / 2.0);
1348     // } else {
1349     //   if (x <=  Math.PI / 2.0) return  fcos(x - Math.PI / 2.0);
1350     // }
1351     // return StrictMath.sin(x);
1352 
1353     // Pseudocode for cos:
1354     // if (x <= Math.PI / 4.0) {
1355     //   if (x >= -Math.PI / 4.0) return  fcos(x);
1356     //   if (x >= -Math.PI / 2.0) return  fsin(x + Math.PI / 2.0);
1357     // } else {
1358     //   if (x <=  Math.PI / 2.0) return -fsin(x - Math.PI / 2.0);
1359     // }
1360     // return StrictMath.cos(x);
1361 
1362     // Actually, sticking in an 80-bit Intel value into C2 will be tough; it
1363     // requires a special machine instruction to load it.  Instead we'll try
1364     // the 'easy' case.  If we really need the extra range +/- PI/2 we'll
1365     // probably do the math inside the SIN encoding.
1366 
1367     // Make the merge point
1368     RegionNode *r = new (C, 3) RegionNode(3);
1369     Node *phi = new (C, 3) PhiNode(r,Type::DOUBLE);
1370 
1371     // Flatten arg so we need only 1 test
1372     Node *abs = _gvn.transform(new (C, 2) AbsDNode(arg));
1373     // Node for PI/4 constant
1374     Node *pi4 = makecon(TypeD::make(pi_4));
1375     // Check PI/4 : abs(arg)
1376     Node *cmp = _gvn.transform(new (C, 3) CmpDNode(pi4,abs));
1377     // Check: If PI/4 < abs(arg) then go slow
1378     Node *bol = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::lt ) );
1379     // Branch either way
1380     IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1381     set_control(opt_iff(r,iff));
1382 
1383     // Set fast path result
1384     phi->init_req(2,trig);
1385 
1386     // Slow path - non-blocking leaf call
1387     Node* call = NULL;
1388     switch (id) {
1389     case vmIntrinsics::_dsin:
1390       call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1391                                CAST_FROM_FN_PTR(address, SharedRuntime::dsin),
1392                                "Sin", NULL, arg, top());
1393       break;
1394     case vmIntrinsics::_dcos:
1395       call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1396                                CAST_FROM_FN_PTR(address, SharedRuntime::dcos),
1397                                "Cos", NULL, arg, top());
1398       break;
1399     case vmIntrinsics::_dtan:
1400       call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1401                                CAST_FROM_FN_PTR(address, SharedRuntime::dtan),
1402                                "Tan", NULL, arg, top());
1403       break;
1404     }
1405     assert(control()->in(0) == call, "");
1406     Node* slow_result = _gvn.transform(new (C, 1) ProjNode(call,TypeFunc::Parms));
1407     r->init_req(1,control());
1408     phi->init_req(1,slow_result);
1409 
1410     // Post-merge
1411     set_control(_gvn.transform(r));
1412     record_for_igvn(r);
1413     trig = _gvn.transform(phi);
1414 
1415     C->set_has_split_ifs(true); // Has chance for split-if optimization
1416   }
1417   // Push result back on JVM stack
1418   push_pair(trig);
1419   return true;
1420 }
1421 
1422 //------------------------------inline_sqrt-------------------------------------
1423 // Inline square root instruction, if possible.
1424 bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) {
1425   assert(id == vmIntrinsics::_dsqrt, "Not square root");
1426   _sp += arg_size();        // restore stack pointer
1427   push_pair(_gvn.transform(new (C, 2) SqrtDNode(0, pop_math_arg())));
1428   return true;
1429 }
1430 
1431 //------------------------------inline_abs-------------------------------------
1432 // Inline absolute value instruction, if possible.
1433 bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) {
1434   assert(id == vmIntrinsics::_dabs, "Not absolute value");
1435   _sp += arg_size();        // restore stack pointer
1436   push_pair(_gvn.transform(new (C, 2) AbsDNode(pop_math_arg())));
1437   return true;
1438 }
1439 
1440 //------------------------------inline_exp-------------------------------------
1441 // Inline exp instructions, if possible.  The Intel hardware only misses
1442 // really odd corner cases (+/- Infinity).  Just uncommon-trap them.
1443 bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) {
1444   assert(id == vmIntrinsics::_dexp, "Not exp");
1445 
1446   // If this inlining ever returned NaN in the past, we do not intrinsify it
1447   // every again.  NaN results requires StrictMath.exp handling.
1448   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
1449 
1450   // Do not intrinsify on older platforms which lack cmove.
1451   if (ConditionalMoveLimit == 0)  return false;
1452 
1453   _sp += arg_size();        // restore stack pointer
1454   Node *x = pop_math_arg();
1455   Node *result = _gvn.transform(new (C, 2) ExpDNode(0,x));
1456 
1457   //-------------------
1458   //result=(result.isNaN())? StrictMath::exp():result;
1459   // Check: If isNaN() by checking result!=result? then go to Strict Math
1460   Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result));
1461   // Build the boolean node
1462   Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) );
1463 
1464   { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1465     // End the current control-flow path
1466     push_pair(x);
1467     // Math.exp intrinsic returned a NaN, which requires StrictMath.exp
1468     // to handle.  Recompile without intrinsifying Math.exp
1469     uncommon_trap(Deoptimization::Reason_intrinsic,
1470                   Deoptimization::Action_make_not_entrant);
1471   }
1472 
1473   C->set_has_split_ifs(true); // Has chance for split-if optimization
1474 
1475   push_pair(result);
1476 
1477   return true;
1478 }
1479 
1480 //------------------------------inline_pow-------------------------------------
1481 // Inline power instructions, if possible.
1482 bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
1483   assert(id == vmIntrinsics::_dpow, "Not pow");
1484 
1485   // If this inlining ever returned NaN in the past, we do not intrinsify it
1486   // every again.  NaN results requires StrictMath.pow handling.
1487   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
1488 
1489   // Do not intrinsify on older platforms which lack cmove.
1490   if (ConditionalMoveLimit == 0)  return false;
1491 
1492   // Pseudocode for pow
1493   // if (x <= 0.0) {
1494   //   if ((double)((int)y)==y) { // if y is int
1495   //     result = ((1&(int)y)==0)?-DPow(abs(x), y):DPow(abs(x), y)
1496   //   } else {
1497   //     result = NaN;
1498   //   }
1499   // } else {
1500   //   result = DPow(x,y);
1501   // }
1502   // if (result != result)?  {
1503   //   uncommon_trap();
1504   // }
1505   // return result;
1506 
1507   _sp += arg_size();        // restore stack pointer
1508   Node* y = pop_math_arg();
1509   Node* x = pop_math_arg();
1510 
1511   Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, x, y) );
1512 
1513   // Short form: if not top-level (i.e., Math.pow but inlining Math.pow
1514   // inside of something) then skip the fancy tests and just check for
1515   // NaN result.
1516   Node *result = NULL;
1517   if( jvms()->depth() >= 1 ) {
1518     result = fast_result;
1519   } else {
1520 
1521     // Set the merge point for If node with condition of (x <= 0.0)
1522     // There are four possible paths to region node and phi node
1523     RegionNode *r = new (C, 4) RegionNode(4);
1524     Node *phi = new (C, 4) PhiNode(r, Type::DOUBLE);
1525 
1526     // Build the first if node: if (x <= 0.0)
1527     // Node for 0 constant
1528     Node *zeronode = makecon(TypeD::ZERO);
1529     // Check x:0
1530     Node *cmp = _gvn.transform(new (C, 3) CmpDNode(x, zeronode));
1531     // Check: If (x<=0) then go complex path
1532     Node *bol1 = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::le ) );
1533     // Branch either way
1534     IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1535     Node *opt_test = _gvn.transform(if1);
1536     //assert( opt_test->is_If(), "Expect an IfNode");
1537     IfNode *opt_if1 = (IfNode*)opt_test;
1538     // Fast path taken; set region slot 3
1539     Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(opt_if1) );
1540     r->init_req(3,fast_taken); // Capture fast-control
1541 
1542     // Fast path not-taken, i.e. slow path
1543     Node *complex_path = _gvn.transform( new (C, 1) IfTrueNode(opt_if1) );
1544 
1545     // Set fast path result
1546     Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, y, x) );
1547     phi->init_req(3, fast_result);
1548 
1549     // Complex path
1550     // Build the second if node (if y is int)
1551     // Node for (int)y
1552     Node *inty = _gvn.transform( new (C, 2) ConvD2INode(y));
1553     // Node for (double)((int) y)
1554     Node *doubleinty= _gvn.transform( new (C, 2) ConvI2DNode(inty));
1555     // Check (double)((int) y) : y
1556     Node *cmpinty= _gvn.transform(new (C, 3) CmpDNode(doubleinty, y));
1557     // Check if (y isn't int) then go to slow path
1558 
1559     Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) );
1560     // Branch either way
1561     IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1562     Node *slow_path = opt_iff(r,if2); // Set region path 2
1563 
1564     // Calculate DPow(abs(x), y)*(1 & (int)y)
1565     // Node for constant 1
1566     Node *conone = intcon(1);
1567     // 1& (int)y
1568     Node *signnode= _gvn.transform( new (C, 3) AndINode(conone, inty) );
1569     // zero node
1570     Node *conzero = intcon(0);
1571     // Check (1&(int)y)==0?
1572     Node *cmpeq1 = _gvn.transform(new (C, 3) CmpINode(signnode, conzero));
1573     // Check if (1&(int)y)!=0?, if so the result is negative
1574     Node *bol3 = _gvn.transform( new (C, 2) BoolNode( cmpeq1, BoolTest::ne ) );
1575     // abs(x)
1576     Node *absx=_gvn.transform( new (C, 2) AbsDNode(x));
1577     // abs(x)^y
1578     Node *absxpowy = _gvn.transform( new (C, 3) PowDNode(0, y, absx) );
1579     // -abs(x)^y
1580     Node *negabsxpowy = _gvn.transform(new (C, 2) NegDNode (absxpowy));
1581     // (1&(int)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
1582     Node *signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
1583     // Set complex path fast result
1584     phi->init_req(2, signresult);
1585 
1586     static const jlong nan_bits = CONST64(0x7ff8000000000000);
1587     Node *slow_result = makecon(TypeD::make(*(double*)&nan_bits)); // return NaN
1588     r->init_req(1,slow_path);
1589     phi->init_req(1,slow_result);
1590 
1591     // Post merge
1592     set_control(_gvn.transform(r));
1593     record_for_igvn(r);
1594     result=_gvn.transform(phi);
1595   }
1596 
1597   //-------------------
1598   //result=(result.isNaN())? uncommon_trap():result;
1599   // Check: If isNaN() by checking result!=result? then go to Strict Math
1600   Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result));
1601   // Build the boolean node
1602   Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) );
1603 
1604   { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1605     // End the current control-flow path
1606     push_pair(x);
1607     push_pair(y);
1608     // Math.pow intrinsic returned a NaN, which requires StrictMath.pow
1609     // to handle.  Recompile without intrinsifying Math.pow.
1610     uncommon_trap(Deoptimization::Reason_intrinsic,
1611                   Deoptimization::Action_make_not_entrant);
1612   }
1613 
1614   C->set_has_split_ifs(true); // Has chance for split-if optimization
1615 
1616   push_pair(result);
1617 
1618   return true;
1619 }
1620 
1621 //------------------------------inline_trans-------------------------------------
1622 // Inline transcendental instructions, if possible.  The Intel hardware gets
1623 // these right, no funny corner cases missed.
1624 bool LibraryCallKit::inline_trans(vmIntrinsics::ID id) {
1625   _sp += arg_size();        // restore stack pointer
1626   Node* arg = pop_math_arg();
1627   Node* trans = NULL;
1628 
1629   switch (id) {
1630   case vmIntrinsics::_dlog:
1631     trans = _gvn.transform((Node*)new (C, 2) LogDNode(arg));
1632     break;
1633   case vmIntrinsics::_dlog10:
1634     trans = _gvn.transform((Node*)new (C, 2) Log10DNode(arg));
1635     break;
1636   default:
1637     assert(false, "bad intrinsic was passed in");
1638     return false;
1639   }
1640 
1641   // Push result back on JVM stack
1642   push_pair(trans);
1643   return true;
1644 }
1645 
1646 //------------------------------runtime_math-----------------------------
1647 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1648   Node* a = NULL;
1649   Node* b = NULL;
1650 
1651   assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1652          "must be (DD)D or (D)D type");
1653 
1654   // Inputs
1655   _sp += arg_size();        // restore stack pointer
1656   if (call_type == OptoRuntime::Math_DD_D_Type()) {
1657     b = pop_math_arg();
1658   }
1659   a = pop_math_arg();
1660 
1661   const TypePtr* no_memory_effects = NULL;
1662   Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1663                                  no_memory_effects,
1664                                  a, top(), b, b ? top() : NULL);
1665   Node* value = _gvn.transform(new (C, 1) ProjNode(trig, TypeFunc::Parms+0));
1666 #ifdef ASSERT
1667   Node* value_top = _gvn.transform(new (C, 1) ProjNode(trig, TypeFunc::Parms+1));
1668   assert(value_top == top(), "second value must be top");
1669 #endif
1670 
1671   push_pair(value);
1672   return true;
1673 }
1674 
1675 //------------------------------inline_math_native-----------------------------
1676 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1677   switch (id) {
1678     // These intrinsics are not properly supported on all hardware
1679   case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) :
1680     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS");
1681   case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) :
1682     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN");
1683   case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) :
1684     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN");
1685 
1686   case vmIntrinsics::_dlog:   return Matcher::has_match_rule(Op_LogD) ? inline_trans(id) :
1687     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG");
1688   case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_trans(id) :
1689     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10");
1690 
1691     // These intrinsics are supported on all hardware
1692   case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_sqrt(id) : false;
1693   case vmIntrinsics::_dabs:  return Matcher::has_match_rule(Op_AbsD)  ? inline_abs(id)  : false;
1694 
1695     // These intrinsics don't work on X86.  The ad implementation doesn't
1696     // handle NaN's properly.  Instead of returning infinity, the ad
1697     // implementation returns a NaN on overflow. See bug: 6304089
1698     // Once the ad implementations are fixed, change the code below
1699     // to match the intrinsics above
1700 
1701   case vmIntrinsics::_dexp:  return
1702     runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1703   case vmIntrinsics::_dpow:  return
1704     runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
1705 
1706    // These intrinsics are not yet correctly implemented
1707   case vmIntrinsics::_datan2:
1708     return false;
1709 
1710   default:
1711     ShouldNotReachHere();
1712     return false;
1713   }
1714 }
1715 
1716 static bool is_simple_name(Node* n) {
1717   return (n->req() == 1         // constant
1718           || (n->is_Type() && n->as_Type()->type()->singleton())
1719           || n->is_Proj()       // parameter or return value
1720           || n->is_Phi()        // local of some sort
1721           );
1722 }
1723 
1724 //----------------------------inline_min_max-----------------------------------
1725 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1726   push(generate_min_max(id, argument(0), argument(1)));
1727 
1728   return true;
1729 }
1730 
1731 Node*
1732 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
1733   // These are the candidate return value:
1734   Node* xvalue = x0;
1735   Node* yvalue = y0;
1736 
1737   if (xvalue == yvalue) {
1738     return xvalue;
1739   }
1740 
1741   bool want_max = (id == vmIntrinsics::_max);
1742 
1743   const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
1744   const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
1745   if (txvalue == NULL || tyvalue == NULL)  return top();
1746   // This is not really necessary, but it is consistent with a
1747   // hypothetical MaxINode::Value method:
1748   int widen = MAX2(txvalue->_widen, tyvalue->_widen);
1749 
1750   // %%% This folding logic should (ideally) be in a different place.
1751   // Some should be inside IfNode, and there to be a more reliable
1752   // transformation of ?: style patterns into cmoves.  We also want
1753   // more powerful optimizations around cmove and min/max.
1754 
1755   // Try to find a dominating comparison of these guys.
1756   // It can simplify the index computation for Arrays.copyOf
1757   // and similar uses of System.arraycopy.
1758   // First, compute the normalized version of CmpI(x, y).
1759   int   cmp_op = Op_CmpI;
1760   Node* xkey = xvalue;
1761   Node* ykey = yvalue;
1762   Node* ideal_cmpxy = _gvn.transform( new(C, 3) CmpINode(xkey, ykey) );
1763   if (ideal_cmpxy->is_Cmp()) {
1764     // E.g., if we have CmpI(length - offset, count),
1765     // it might idealize to CmpI(length, count + offset)
1766     cmp_op = ideal_cmpxy->Opcode();
1767     xkey = ideal_cmpxy->in(1);
1768     ykey = ideal_cmpxy->in(2);
1769   }
1770 
1771   // Start by locating any relevant comparisons.
1772   Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
1773   Node* cmpxy = NULL;
1774   Node* cmpyx = NULL;
1775   for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
1776     Node* cmp = start_from->fast_out(k);
1777     if (cmp->outcnt() > 0 &&            // must have prior uses
1778         cmp->in(0) == NULL &&           // must be context-independent
1779         cmp->Opcode() == cmp_op) {      // right kind of compare
1780       if (cmp->in(1) == xkey && cmp->in(2) == ykey)  cmpxy = cmp;
1781       if (cmp->in(1) == ykey && cmp->in(2) == xkey)  cmpyx = cmp;
1782     }
1783   }
1784 
1785   const int NCMPS = 2;
1786   Node* cmps[NCMPS] = { cmpxy, cmpyx };
1787   int cmpn;
1788   for (cmpn = 0; cmpn < NCMPS; cmpn++) {
1789     if (cmps[cmpn] != NULL)  break;     // find a result
1790   }
1791   if (cmpn < NCMPS) {
1792     // Look for a dominating test that tells us the min and max.
1793     int depth = 0;                // Limit search depth for speed
1794     Node* dom = control();
1795     for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
1796       if (++depth >= 100)  break;
1797       Node* ifproj = dom;
1798       if (!ifproj->is_Proj())  continue;
1799       Node* iff = ifproj->in(0);
1800       if (!iff->is_If())  continue;
1801       Node* bol = iff->in(1);
1802       if (!bol->is_Bool())  continue;
1803       Node* cmp = bol->in(1);
1804       if (cmp == NULL)  continue;
1805       for (cmpn = 0; cmpn < NCMPS; cmpn++)
1806         if (cmps[cmpn] == cmp)  break;
1807       if (cmpn == NCMPS)  continue;
1808       BoolTest::mask btest = bol->as_Bool()->_test._test;
1809       if (ifproj->is_IfFalse())  btest = BoolTest(btest).negate();
1810       if (cmp->in(1) == ykey)    btest = BoolTest(btest).commute();
1811       // At this point, we know that 'x btest y' is true.
1812       switch (btest) {
1813       case BoolTest::eq:
1814         // They are proven equal, so we can collapse the min/max.
1815         // Either value is the answer.  Choose the simpler.
1816         if (is_simple_name(yvalue) && !is_simple_name(xvalue))
1817           return yvalue;
1818         return xvalue;
1819       case BoolTest::lt:          // x < y
1820       case BoolTest::le:          // x <= y
1821         return (want_max ? yvalue : xvalue);
1822       case BoolTest::gt:          // x > y
1823       case BoolTest::ge:          // x >= y
1824         return (want_max ? xvalue : yvalue);
1825       }
1826     }
1827   }
1828 
1829   // We failed to find a dominating test.
1830   // Let's pick a test that might GVN with prior tests.
1831   Node*          best_bol   = NULL;
1832   BoolTest::mask best_btest = BoolTest::illegal;
1833   for (cmpn = 0; cmpn < NCMPS; cmpn++) {
1834     Node* cmp = cmps[cmpn];
1835     if (cmp == NULL)  continue;
1836     for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
1837       Node* bol = cmp->fast_out(j);
1838       if (!bol->is_Bool())  continue;
1839       BoolTest::mask btest = bol->as_Bool()->_test._test;
1840       if (btest == BoolTest::eq || btest == BoolTest::ne)  continue;
1841       if (cmp->in(1) == ykey)   btest = BoolTest(btest).commute();
1842       if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
1843         best_bol   = bol->as_Bool();
1844         best_btest = btest;
1845       }
1846     }
1847   }
1848 
1849   Node* answer_if_true  = NULL;
1850   Node* answer_if_false = NULL;
1851   switch (best_btest) {
1852   default:
1853     if (cmpxy == NULL)
1854       cmpxy = ideal_cmpxy;
1855     best_bol = _gvn.transform( new(C, 2) BoolNode(cmpxy, BoolTest::lt) );
1856     // and fall through:
1857   case BoolTest::lt:          // x < y
1858   case BoolTest::le:          // x <= y
1859     answer_if_true  = (want_max ? yvalue : xvalue);
1860     answer_if_false = (want_max ? xvalue : yvalue);
1861     break;
1862   case BoolTest::gt:          // x > y
1863   case BoolTest::ge:          // x >= y
1864     answer_if_true  = (want_max ? xvalue : yvalue);
1865     answer_if_false = (want_max ? yvalue : xvalue);
1866     break;
1867   }
1868 
1869   jint hi, lo;
1870   if (want_max) {
1871     // We can sharpen the minimum.
1872     hi = MAX2(txvalue->_hi, tyvalue->_hi);
1873     lo = MAX2(txvalue->_lo, tyvalue->_lo);
1874   } else {
1875     // We can sharpen the maximum.
1876     hi = MIN2(txvalue->_hi, tyvalue->_hi);
1877     lo = MIN2(txvalue->_lo, tyvalue->_lo);
1878   }
1879 
1880   // Use a flow-free graph structure, to avoid creating excess control edges
1881   // which could hinder other optimizations.
1882   // Since Math.min/max is often used with arraycopy, we want
1883   // tightly_coupled_allocation to be able to see beyond min/max expressions.
1884   Node* cmov = CMoveNode::make(C, NULL, best_bol,
1885                                answer_if_false, answer_if_true,
1886                                TypeInt::make(lo, hi, widen));
1887 
1888   return _gvn.transform(cmov);
1889 
1890   /*
1891   // This is not as desirable as it may seem, since Min and Max
1892   // nodes do not have a full set of optimizations.
1893   // And they would interfere, anyway, with 'if' optimizations
1894   // and with CMoveI canonical forms.
1895   switch (id) {
1896   case vmIntrinsics::_min:
1897     result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
1898   case vmIntrinsics::_max:
1899     result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
1900   default:
1901     ShouldNotReachHere();
1902   }
1903   */
1904 }
1905 
1906 inline int
1907 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
1908   const TypePtr* base_type = TypePtr::NULL_PTR;
1909   if (base != NULL)  base_type = _gvn.type(base)->isa_ptr();
1910   if (base_type == NULL) {
1911     // Unknown type.
1912     return Type::AnyPtr;
1913   } else if (base_type == TypePtr::NULL_PTR) {
1914     // Since this is a NULL+long form, we have to switch to a rawptr.
1915     base   = _gvn.transform( new (C, 2) CastX2PNode(offset) );
1916     offset = MakeConX(0);
1917     return Type::RawPtr;
1918   } else if (base_type->base() == Type::RawPtr) {
1919     return Type::RawPtr;
1920   } else if (base_type->isa_oopptr()) {
1921     // Base is never null => always a heap address.
1922     if (base_type->ptr() == TypePtr::NotNull) {
1923       return Type::OopPtr;
1924     }
1925     // Offset is small => always a heap address.
1926     const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
1927     if (offset_type != NULL &&
1928         base_type->offset() == 0 &&     // (should always be?)
1929         offset_type->_lo >= 0 &&
1930         !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
1931       return Type::OopPtr;
1932     }
1933     // Otherwise, it might either be oop+off or NULL+addr.
1934     return Type::AnyPtr;
1935   } else {
1936     // No information:
1937     return Type::AnyPtr;
1938   }
1939 }
1940 
1941 inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
1942   int kind = classify_unsafe_addr(base, offset);
1943   if (kind == Type::RawPtr) {
1944     return basic_plus_adr(top(), base, offset);
1945   } else {
1946     return basic_plus_adr(base, offset);
1947   }
1948 }
1949 
1950 //-------------------inline_numberOfLeadingZeros_int/long-----------------------
1951 // inline int Integer.numberOfLeadingZeros(int)
1952 // inline int Long.numberOfLeadingZeros(long)
1953 bool LibraryCallKit::inline_numberOfLeadingZeros(vmIntrinsics::ID id) {
1954   assert(id == vmIntrinsics::_numberOfLeadingZeros_i || id == vmIntrinsics::_numberOfLeadingZeros_l, "not numberOfLeadingZeros");
1955   if (id == vmIntrinsics::_numberOfLeadingZeros_i && !Matcher::match_rule_supported(Op_CountLeadingZerosI)) return false;
1956   if (id == vmIntrinsics::_numberOfLeadingZeros_l && !Matcher::match_rule_supported(Op_CountLeadingZerosL)) return false;
1957   _sp += arg_size();  // restore stack pointer
1958   switch (id) {
1959   case vmIntrinsics::_numberOfLeadingZeros_i:
1960     push(_gvn.transform(new (C, 2) CountLeadingZerosINode(pop())));
1961     break;
1962   case vmIntrinsics::_numberOfLeadingZeros_l:
1963     push(_gvn.transform(new (C, 2) CountLeadingZerosLNode(pop_pair())));
1964     break;
1965   default:
1966     ShouldNotReachHere();
1967   }
1968   return true;
1969 }
1970 
1971 //-------------------inline_numberOfTrailingZeros_int/long----------------------
1972 // inline int Integer.numberOfTrailingZeros(int)
1973 // inline int Long.numberOfTrailingZeros(long)
1974 bool LibraryCallKit::inline_numberOfTrailingZeros(vmIntrinsics::ID id) {
1975   assert(id == vmIntrinsics::_numberOfTrailingZeros_i || id == vmIntrinsics::_numberOfTrailingZeros_l, "not numberOfTrailingZeros");
1976   if (id == vmIntrinsics::_numberOfTrailingZeros_i && !Matcher::match_rule_supported(Op_CountTrailingZerosI)) return false;
1977   if (id == vmIntrinsics::_numberOfTrailingZeros_l && !Matcher::match_rule_supported(Op_CountTrailingZerosL)) return false;
1978   _sp += arg_size();  // restore stack pointer
1979   switch (id) {
1980   case vmIntrinsics::_numberOfTrailingZeros_i:
1981     push(_gvn.transform(new (C, 2) CountTrailingZerosINode(pop())));
1982     break;
1983   case vmIntrinsics::_numberOfTrailingZeros_l:
1984     push(_gvn.transform(new (C, 2) CountTrailingZerosLNode(pop_pair())));
1985     break;
1986   default:
1987     ShouldNotReachHere();
1988   }
1989   return true;
1990 }
1991 
1992 //----------------------------inline_bitCount_int/long-----------------------
1993 // inline int Integer.bitCount(int)
1994 // inline int Long.bitCount(long)
1995 bool LibraryCallKit::inline_bitCount(vmIntrinsics::ID id) {
1996   assert(id == vmIntrinsics::_bitCount_i || id == vmIntrinsics::_bitCount_l, "not bitCount");
1997   if (id == vmIntrinsics::_bitCount_i && !Matcher::has_match_rule(Op_PopCountI)) return false;
1998   if (id == vmIntrinsics::_bitCount_l && !Matcher::has_match_rule(Op_PopCountL)) return false;
1999   _sp += arg_size();  // restore stack pointer
2000   switch (id) {
2001   case vmIntrinsics::_bitCount_i:
2002     push(_gvn.transform(new (C, 2) PopCountINode(pop())));
2003     break;
2004   case vmIntrinsics::_bitCount_l:
2005     push(_gvn.transform(new (C, 2) PopCountLNode(pop_pair())));
2006     break;
2007   default:
2008     ShouldNotReachHere();
2009   }
2010   return true;
2011 }
2012 
2013 //----------------------------inline_reverseBytes_int/long-------------------
2014 // inline Integer.reverseBytes(int)
2015 // inline Long.reverseBytes(long)
2016 bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
2017   assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes");
2018   if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false;
2019   if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false;
2020   _sp += arg_size();        // restore stack pointer
2021   switch (id) {
2022   case vmIntrinsics::_reverseBytes_i:
2023     push(_gvn.transform(new (C, 2) ReverseBytesINode(0, pop())));
2024     break;
2025   case vmIntrinsics::_reverseBytes_l:
2026     push_pair(_gvn.transform(new (C, 2) ReverseBytesLNode(0, pop_pair())));
2027     break;
2028   default:
2029     ;
2030   }
2031   return true;
2032 }
2033 
2034 //----------------------------inline_unsafe_access----------------------------
2035 
2036 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2037 
2038 // Interpret Unsafe.fieldOffset cookies correctly:
2039 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
2040 
2041 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
2042   if (callee()->is_static())  return false;  // caller must have the capability!
2043 
2044 #ifndef PRODUCT
2045   {
2046     ResourceMark rm;
2047     // Check the signatures.
2048     ciSignature* sig = signature();
2049 #ifdef ASSERT
2050     if (!is_store) {
2051       // Object getObject(Object base, int/long offset), etc.
2052       BasicType rtype = sig->return_type()->basic_type();
2053       if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2054           rtype = T_ADDRESS;  // it is really a C void*
2055       assert(rtype == type, "getter must return the expected value");
2056       if (!is_native_ptr) {
2057         assert(sig->count() == 2, "oop getter has 2 arguments");
2058         assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2059         assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2060       } else {
2061         assert(sig->count() == 1, "native getter has 1 argument");
2062         assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
2063       }
2064     } else {
2065       // void putObject(Object base, int/long offset, Object x), etc.
2066       assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2067       if (!is_native_ptr) {
2068         assert(sig->count() == 3, "oop putter has 3 arguments");
2069         assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2070         assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2071       } else {
2072         assert(sig->count() == 2, "native putter has 2 arguments");
2073         assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long");
2074       }
2075       BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2076       if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name())
2077         vtype = T_ADDRESS;  // it is really a C void*
2078       assert(vtype == type, "putter must accept the expected value");
2079     }
2080 #endif // ASSERT
2081  }
2082 #endif //PRODUCT
2083 
2084   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2085 
2086   int type_words = type2size[ (type == T_ADDRESS) ? T_LONG : type ];
2087 
2088   // Argument words:  "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words
2089   int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0);
2090 
2091   debug_only(int saved_sp = _sp);
2092   _sp += nargs;
2093 
2094   Node* val;
2095   debug_only(val = (Node*)(uintptr_t)-1);
2096 
2097 
2098   if (is_store) {
2099     // Get the value being stored.  (Pop it first; it was pushed last.)
2100     switch (type) {
2101     case T_DOUBLE:
2102     case T_LONG:
2103     case T_ADDRESS:
2104       val = pop_pair();
2105       break;
2106     default:
2107       val = pop();
2108     }
2109   }
2110 
2111   // Build address expression.  See the code in inline_unsafe_prefetch.
2112   Node *adr;
2113   Node *heap_base_oop = top();
2114   if (!is_native_ptr) {
2115     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2116     Node* offset = pop_pair();
2117     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2118     Node* base   = pop();
2119     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2120     // to be plain byte offsets, which are also the same as those accepted
2121     // by oopDesc::field_base.
2122     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2123            "fieldOffset must be byte-scaled");
2124     // 32-bit machines ignore the high half!
2125     offset = ConvL2X(offset);
2126     adr = make_unsafe_address(base, offset);
2127     heap_base_oop = base;
2128   } else {
2129     Node* ptr = pop_pair();
2130     // Adjust Java long to machine word:
2131     ptr = ConvL2X(ptr);
2132     adr = make_unsafe_address(NULL, ptr);
2133   }
2134 
2135   // Pop receiver last:  it was pushed first.
2136   Node *receiver = pop();
2137 
2138   assert(saved_sp == _sp, "must have correct argument count");
2139 
2140   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2141 
2142   // First guess at the value type.
2143   const Type *value_type = Type::get_const_basic_type(type);
2144 
2145   // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
2146   // there was not enough information to nail it down.
2147   Compile::AliasType* alias_type = C->alias_type(adr_type);
2148   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2149 
2150   // We will need memory barriers unless we can determine a unique
2151   // alias category for this reference.  (Note:  If for some reason
2152   // the barriers get omitted and the unsafe reference begins to "pollute"
2153   // the alias analysis of the rest of the graph, either Compile::can_alias
2154   // or Compile::must_alias will throw a diagnostic assert.)
2155   bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2156 
2157   if (!is_store && type == T_OBJECT) {
2158     // Attempt to infer a sharper value type from the offset and base type.
2159     ciKlass* sharpened_klass = NULL;
2160 
2161     // See if it is an instance field, with an object type.
2162     if (alias_type->field() != NULL) {
2163       assert(!is_native_ptr, "native pointer op cannot use a java address");
2164       if (alias_type->field()->type()->is_klass()) {
2165         sharpened_klass = alias_type->field()->type()->as_klass();
2166       }
2167     }
2168 
2169     // See if it is a narrow oop array.
2170     if (adr_type->isa_aryptr()) {
2171       if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2172         const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2173         if (elem_type != NULL) {
2174           sharpened_klass = elem_type->klass();
2175         }
2176       }
2177     }
2178 
2179     if (sharpened_klass != NULL) {
2180       const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2181 
2182       // Sharpen the value type.
2183       value_type = tjp;
2184 
2185 #ifndef PRODUCT
2186       if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
2187         tty->print("  from base type:  ");   adr_type->dump();
2188         tty->print("  sharpened value: "); value_type->dump();
2189       }
2190 #endif
2191     }
2192   }
2193 
2194   // Null check on self without removing any arguments.  The argument
2195   // null check technically happens in the wrong place, which can lead to
2196   // invalid stack traces when the primitive is inlined into a method
2197   // which handles NullPointerExceptions.
2198   _sp += nargs;
2199   do_null_check(receiver, T_OBJECT);
2200   _sp -= nargs;
2201   if (stopped()) {
2202     return true;
2203   }
2204   // Heap pointers get a null-check from the interpreter,
2205   // as a courtesy.  However, this is not guaranteed by Unsafe,
2206   // and it is not possible to fully distinguish unintended nulls
2207   // from intended ones in this API.
2208 
2209   if (is_volatile) {
2210     // We need to emit leading and trailing CPU membars (see below) in
2211     // addition to memory membars when is_volatile. This is a little
2212     // too strong, but avoids the need to insert per-alias-type
2213     // volatile membars (for stores; compare Parse::do_put_xxx), which
2214     // we cannot do effectively here because we probably only have a
2215     // rough approximation of type.
2216     need_mem_bar = true;
2217     // For Stores, place a memory ordering barrier now.
2218     if (is_store)
2219       insert_mem_bar(Op_MemBarRelease);
2220   }
2221 
2222   // Memory barrier to prevent normal and 'unsafe' accesses from
2223   // bypassing each other.  Happens after null checks, so the
2224   // exception paths do not take memory state from the memory barrier,
2225   // so there's no problems making a strong assert about mixing users
2226   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2227   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2228   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2229 
2230   if (!is_store) {
2231     Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile);
2232     // load value and push onto stack
2233     switch (type) {
2234     case T_BOOLEAN:
2235     case T_CHAR:
2236     case T_BYTE:
2237     case T_SHORT:
2238     case T_INT:
2239     case T_FLOAT:
2240     case T_OBJECT:
2241       push( p );
2242       break;
2243     case T_ADDRESS:
2244       // Cast to an int type.
2245       p = _gvn.transform( new (C, 2) CastP2XNode(NULL,p) );
2246       p = ConvX2L(p);
2247       push_pair(p);
2248       break;
2249     case T_DOUBLE:
2250     case T_LONG:
2251       push_pair( p );
2252       break;
2253     default: ShouldNotReachHere();
2254     }
2255   } else {
2256     // place effect of store into memory
2257     switch (type) {
2258     case T_DOUBLE:
2259       val = dstore_rounding(val);
2260       break;
2261     case T_ADDRESS:
2262       // Repackage the long as a pointer.
2263       val = ConvL2X(val);
2264       val = _gvn.transform( new (C, 2) CastX2PNode(val) );
2265       break;
2266     }
2267 
2268     if (type != T_OBJECT ) {
2269       (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
2270     } else {
2271       // Possibly an oop being stored to Java heap or native memory
2272       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2273         // oop to Java heap.
2274         (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
2275       } else {
2276         // We can't tell at compile time if we are storing in the Java heap or outside
2277         // of it. So we need to emit code to conditionally do the proper type of
2278         // store.
2279 
2280         IdealKit ideal(gvn(), control(),  merged_memory());
2281 #define __ ideal.
2282         // QQQ who knows what probability is here??
2283         __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2284           // Sync IdealKit and graphKit.
2285           set_all_memory( __ merged_memory());
2286           set_control(__ ctrl());
2287           Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
2288           // Update IdealKit memory.
2289           __ set_all_memory(merged_memory());
2290           __ set_ctrl(control());
2291         } __ else_(); {
2292           __ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile);
2293         } __ end_if();
2294         // Final sync IdealKit and GraphKit.
2295         sync_kit(ideal);
2296 #undef __
2297       }
2298     }
2299   }
2300 
2301   if (is_volatile) {
2302     if (!is_store)
2303       insert_mem_bar(Op_MemBarAcquire);
2304     else
2305       insert_mem_bar(Op_MemBarVolatile);
2306   }
2307 
2308   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2309 
2310   return true;
2311 }
2312 
2313 //----------------------------inline_unsafe_prefetch----------------------------
2314 
2315 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2316 #ifndef PRODUCT
2317   {
2318     ResourceMark rm;
2319     // Check the signatures.
2320     ciSignature* sig = signature();
2321 #ifdef ASSERT
2322     // Object getObject(Object base, int/long offset), etc.
2323     BasicType rtype = sig->return_type()->basic_type();
2324     if (!is_native_ptr) {
2325       assert(sig->count() == 2, "oop prefetch has 2 arguments");
2326       assert(sig->type_at(0)->basic_type() == T_OBJECT, "prefetch base is object");
2327       assert(sig->type_at(1)->basic_type() == T_LONG, "prefetcha offset is correct");
2328     } else {
2329       assert(sig->count() == 1, "native prefetch has 1 argument");
2330       assert(sig->type_at(0)->basic_type() == T_LONG, "prefetch base is long");
2331     }
2332 #endif // ASSERT
2333   }
2334 #endif // !PRODUCT
2335 
2336   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2337 
2338   // Argument words:  "this" if not static, plus (oop/offset) or (lo/hi) args
2339   int nargs = (is_static ? 0 : 1) + (is_native_ptr ? 2 : 3);
2340 
2341   debug_only(int saved_sp = _sp);
2342   _sp += nargs;
2343 
2344   // Build address expression.  See the code in inline_unsafe_access.
2345   Node *adr;
2346   if (!is_native_ptr) {
2347     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2348     Node* offset = pop_pair();
2349     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2350     Node* base   = pop();
2351     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2352     // to be plain byte offsets, which are also the same as those accepted
2353     // by oopDesc::field_base.
2354     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2355            "fieldOffset must be byte-scaled");
2356     // 32-bit machines ignore the high half!
2357     offset = ConvL2X(offset);
2358     adr = make_unsafe_address(base, offset);
2359   } else {
2360     Node* ptr = pop_pair();
2361     // Adjust Java long to machine word:
2362     ptr = ConvL2X(ptr);
2363     adr = make_unsafe_address(NULL, ptr);
2364   }
2365 
2366   if (is_static) {
2367     assert(saved_sp == _sp, "must have correct argument count");
2368   } else {
2369     // Pop receiver last:  it was pushed first.
2370     Node *receiver = pop();
2371     assert(saved_sp == _sp, "must have correct argument count");
2372 
2373     // Null check on self without removing any arguments.  The argument
2374     // null check technically happens in the wrong place, which can lead to
2375     // invalid stack traces when the primitive is inlined into a method
2376     // which handles NullPointerExceptions.
2377     _sp += nargs;
2378     do_null_check(receiver, T_OBJECT);
2379     _sp -= nargs;
2380     if (stopped()) {
2381       return true;
2382     }
2383   }
2384 
2385   // Generate the read or write prefetch
2386   Node *prefetch;
2387   if (is_store) {
2388     prefetch = new (C, 3) PrefetchWriteNode(i_o(), adr);
2389   } else {
2390     prefetch = new (C, 3) PrefetchReadNode(i_o(), adr);
2391   }
2392   prefetch->init_req(0, control());
2393   set_i_o(_gvn.transform(prefetch));
2394 
2395   return true;
2396 }
2397 
2398 //----------------------------inline_unsafe_CAS----------------------------
2399 
2400 bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
2401   // This basic scheme here is the same as inline_unsafe_access, but
2402   // differs in enough details that combining them would make the code
2403   // overly confusing.  (This is a true fact! I originally combined
2404   // them, but even I was confused by it!) As much code/comments as
2405   // possible are retained from inline_unsafe_access though to make
2406   // the correspondences clearer. - dl
2407 
2408   if (callee()->is_static())  return false;  // caller must have the capability!
2409 
2410 #ifndef PRODUCT
2411   {
2412     ResourceMark rm;
2413     // Check the signatures.
2414     ciSignature* sig = signature();
2415 #ifdef ASSERT
2416     BasicType rtype = sig->return_type()->basic_type();
2417     assert(rtype == T_BOOLEAN, "CAS must return boolean");
2418     assert(sig->count() == 4, "CAS has 4 arguments");
2419     assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2420     assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2421 #endif // ASSERT
2422   }
2423 #endif //PRODUCT
2424 
2425   // number of stack slots per value argument (1 or 2)
2426   int type_words = type2size[type];
2427 
2428   // Cannot inline wide CAS on machines that don't support it natively
2429   if (type2aelembytes(type) > BytesPerInt && !VM_Version::supports_cx8())
2430     return false;
2431 
2432   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2433 
2434   // Argument words:  "this" plus oop plus offset plus oldvalue plus newvalue;
2435   int nargs = 1 + 1 + 2  + type_words + type_words;
2436 
2437   // pop arguments: newval, oldval, offset, base, and receiver
2438   debug_only(int saved_sp = _sp);
2439   _sp += nargs;
2440   Node* newval   = (type_words == 1) ? pop() : pop_pair();
2441   Node* oldval   = (type_words == 1) ? pop() : pop_pair();
2442   Node *offset   = pop_pair();
2443   Node *base     = pop();
2444   Node *receiver = pop();
2445   assert(saved_sp == _sp, "must have correct argument count");
2446 
2447   //  Null check receiver.
2448   _sp += nargs;
2449   do_null_check(receiver, T_OBJECT);
2450   _sp -= nargs;
2451   if (stopped()) {
2452     return true;
2453   }
2454 
2455   // Build field offset expression.
2456   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2457   // to be plain byte offsets, which are also the same as those accepted
2458   // by oopDesc::field_base.
2459   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2460   // 32-bit machines ignore the high half of long offsets
2461   offset = ConvL2X(offset);
2462   Node* adr = make_unsafe_address(base, offset);
2463   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2464 
2465   // (Unlike inline_unsafe_access, there seems no point in trying
2466   // to refine types. Just use the coarse types here.
2467   const Type *value_type = Type::get_const_basic_type(type);
2468   Compile::AliasType* alias_type = C->alias_type(adr_type);
2469   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2470   int alias_idx = C->get_alias_index(adr_type);
2471 
2472   // Memory-model-wise, a CAS acts like a little synchronized block,
2473   // so needs barriers on each side.  These don't translate into
2474   // actual barriers on most machines, but we still need rest of
2475   // compiler to respect ordering.
2476 
2477   insert_mem_bar(Op_MemBarRelease);
2478   insert_mem_bar(Op_MemBarCPUOrder);
2479 
2480   // 4984716: MemBars must be inserted before this
2481   //          memory node in order to avoid a false
2482   //          dependency which will confuse the scheduler.
2483   Node *mem = memory(alias_idx);
2484 
2485   // For now, we handle only those cases that actually exist: ints,
2486   // longs, and Object. Adding others should be straightforward.
2487   Node* cas;
2488   switch(type) {
2489   case T_INT:
2490     cas = _gvn.transform(new (C, 5) CompareAndSwapINode(control(), mem, adr, newval, oldval));
2491     break;
2492   case T_LONG:
2493     cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2494     break;
2495   case T_OBJECT:
2496      // reference stores need a store barrier.
2497     // (They don't if CAS fails, but it isn't worth checking.)
2498     pre_barrier(control(), base, adr, alias_idx, newval, value_type->make_oopptr(), T_OBJECT);
2499 #ifdef _LP64
2500     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2501       Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2502       Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2503       cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
2504                                                           newval_enc, oldval_enc));
2505     } else
2506 #endif
2507     {
2508       cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2509     }
2510     post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true);
2511     break;
2512   default:
2513     ShouldNotReachHere();
2514     break;
2515   }
2516 
2517   // SCMemProjNodes represent the memory state of CAS. Their main
2518   // role is to prevent CAS nodes from being optimized away when their
2519   // results aren't used.
2520   Node* proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
2521   set_memory(proj, alias_idx);
2522 
2523   // Add the trailing membar surrounding the access
2524   insert_mem_bar(Op_MemBarCPUOrder);
2525   insert_mem_bar(Op_MemBarAcquire);
2526 
2527   push(cas);
2528   return true;
2529 }
2530 
2531 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2532   // This is another variant of inline_unsafe_access, differing in
2533   // that it always issues store-store ("release") barrier and ensures
2534   // store-atomicity (which only matters for "long").
2535 
2536   if (callee()->is_static())  return false;  // caller must have the capability!
2537 
2538 #ifndef PRODUCT
2539   {
2540     ResourceMark rm;
2541     // Check the signatures.
2542     ciSignature* sig = signature();
2543 #ifdef ASSERT
2544     BasicType rtype = sig->return_type()->basic_type();
2545     assert(rtype == T_VOID, "must return void");
2546     assert(sig->count() == 3, "has 3 arguments");
2547     assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
2548     assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
2549 #endif // ASSERT
2550   }
2551 #endif //PRODUCT
2552 
2553   // number of stack slots per value argument (1 or 2)
2554   int type_words = type2size[type];
2555 
2556   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2557 
2558   // Argument words:  "this" plus oop plus offset plus value;
2559   int nargs = 1 + 1 + 2 + type_words;
2560 
2561   // pop arguments: val, offset, base, and receiver
2562   debug_only(int saved_sp = _sp);
2563   _sp += nargs;
2564   Node* val      = (type_words == 1) ? pop() : pop_pair();
2565   Node *offset   = pop_pair();
2566   Node *base     = pop();
2567   Node *receiver = pop();
2568   assert(saved_sp == _sp, "must have correct argument count");
2569 
2570   //  Null check receiver.
2571   _sp += nargs;
2572   do_null_check(receiver, T_OBJECT);
2573   _sp -= nargs;
2574   if (stopped()) {
2575     return true;
2576   }
2577 
2578   // Build field offset expression.
2579   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2580   // 32-bit machines ignore the high half of long offsets
2581   offset = ConvL2X(offset);
2582   Node* adr = make_unsafe_address(base, offset);
2583   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2584   const Type *value_type = Type::get_const_basic_type(type);
2585   Compile::AliasType* alias_type = C->alias_type(adr_type);
2586 
2587   insert_mem_bar(Op_MemBarRelease);
2588   insert_mem_bar(Op_MemBarCPUOrder);
2589   // Ensure that the store is atomic for longs:
2590   bool require_atomic_access = true;
2591   Node* store;
2592   if (type == T_OBJECT) // reference stores need a store barrier.
2593     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type);
2594   else {
2595     store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access);
2596   }
2597   insert_mem_bar(Op_MemBarCPUOrder);
2598   return true;
2599 }
2600 
2601 bool LibraryCallKit::inline_unsafe_allocate() {
2602   if (callee()->is_static())  return false;  // caller must have the capability!
2603   int nargs = 1 + 1;
2604   assert(signature()->size() == nargs-1, "alloc has 1 argument");
2605   null_check_receiver(callee());  // check then ignore argument(0)
2606   _sp += nargs;  // set original stack for use by uncommon_trap
2607   Node* cls = do_null_check(argument(1), T_OBJECT);
2608   _sp -= nargs;
2609   if (stopped())  return true;
2610 
2611   Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
2612   _sp += nargs;  // set original stack for use by uncommon_trap
2613   kls = do_null_check(kls, T_OBJECT);
2614   _sp -= nargs;
2615   if (stopped())  return true;  // argument was like int.class
2616 
2617   // Note:  The argument might still be an illegal value like
2618   // Serializable.class or Object[].class.   The runtime will handle it.
2619   // But we must make an explicit check for initialization.
2620   Node* insp = basic_plus_adr(kls, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc));
2621   Node* inst = make_load(NULL, insp, TypeInt::INT, T_INT);
2622   Node* bits = intcon(instanceKlass::fully_initialized);
2623   Node* test = _gvn.transform( new (C, 3) SubINode(inst, bits) );
2624   // The 'test' is non-zero if we need to take a slow path.
2625 
2626   Node* obj = new_instance(kls, test);
2627   push(obj);
2628 
2629   return true;
2630 }
2631 
2632 //------------------------inline_native_time_funcs--------------
2633 // inline code for System.currentTimeMillis() and System.nanoTime()
2634 // these have the same type and signature
2635 bool LibraryCallKit::inline_native_time_funcs(bool isNano) {
2636   address funcAddr = isNano ? CAST_FROM_FN_PTR(address, os::javaTimeNanos) :
2637                               CAST_FROM_FN_PTR(address, os::javaTimeMillis);
2638   const char * funcName = isNano ? "nanoTime" : "currentTimeMillis";
2639   const TypeFunc *tf = OptoRuntime::current_time_millis_Type();
2640   const TypePtr* no_memory_effects = NULL;
2641   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2642   Node* value = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms+0));
2643 #ifdef ASSERT
2644   Node* value_top = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms + 1));
2645   assert(value_top == top(), "second value must be top");
2646 #endif
2647   push_pair(value);
2648   return true;
2649 }
2650 
2651 //------------------------inline_native_currentThread------------------
2652 bool LibraryCallKit::inline_native_currentThread() {
2653   Node* junk = NULL;
2654   push(generate_current_thread(junk));
2655   return true;
2656 }
2657 
2658 //------------------------inline_native_isInterrupted------------------
2659 bool LibraryCallKit::inline_native_isInterrupted() {
2660   const int nargs = 1+1;  // receiver + boolean
2661   assert(nargs == arg_size(), "sanity");
2662   // Add a fast path to t.isInterrupted(clear_int):
2663   //   (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int))
2664   //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
2665   // So, in the common case that the interrupt bit is false,
2666   // we avoid making a call into the VM.  Even if the interrupt bit
2667   // is true, if the clear_int argument is false, we avoid the VM call.
2668   // However, if the receiver is not currentThread, we must call the VM,
2669   // because there must be some locking done around the operation.
2670 
2671   // We only go to the fast case code if we pass two guards.
2672   // Paths which do not pass are accumulated in the slow_region.
2673   RegionNode* slow_region = new (C, 1) RegionNode(1);
2674   record_for_igvn(slow_region);
2675   RegionNode* result_rgn = new (C, 4) RegionNode(1+3); // fast1, fast2, slow
2676   PhiNode*    result_val = new (C, 4) PhiNode(result_rgn, TypeInt::BOOL);
2677   enum { no_int_result_path   = 1,
2678          no_clear_result_path = 2,
2679          slow_result_path     = 3
2680   };
2681 
2682   // (a) Receiving thread must be the current thread.
2683   Node* rec_thr = argument(0);
2684   Node* tls_ptr = NULL;
2685   Node* cur_thr = generate_current_thread(tls_ptr);
2686   Node* cmp_thr = _gvn.transform( new (C, 3) CmpPNode(cur_thr, rec_thr) );
2687   Node* bol_thr = _gvn.transform( new (C, 2) BoolNode(cmp_thr, BoolTest::ne) );
2688 
2689   bool known_current_thread = (_gvn.type(bol_thr) == TypeInt::ZERO);
2690   if (!known_current_thread)
2691     generate_slow_guard(bol_thr, slow_region);
2692 
2693   // (b) Interrupt bit on TLS must be false.
2694   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
2695   Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
2696   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
2697   // Set the control input on the field _interrupted read to prevent it floating up.
2698   Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
2699   Node* cmp_bit = _gvn.transform( new (C, 3) CmpINode(int_bit, intcon(0)) );
2700   Node* bol_bit = _gvn.transform( new (C, 2) BoolNode(cmp_bit, BoolTest::ne) );
2701 
2702   IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2703 
2704   // First fast path:  if (!TLS._interrupted) return false;
2705   Node* false_bit = _gvn.transform( new (C, 1) IfFalseNode(iff_bit) );
2706   result_rgn->init_req(no_int_result_path, false_bit);
2707   result_val->init_req(no_int_result_path, intcon(0));
2708 
2709   // drop through to next case
2710   set_control( _gvn.transform(new (C, 1) IfTrueNode(iff_bit)) );
2711 
2712   // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
2713   Node* clr_arg = argument(1);
2714   Node* cmp_arg = _gvn.transform( new (C, 3) CmpINode(clr_arg, intcon(0)) );
2715   Node* bol_arg = _gvn.transform( new (C, 2) BoolNode(cmp_arg, BoolTest::ne) );
2716   IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
2717 
2718   // Second fast path:  ... else if (!clear_int) return true;
2719   Node* false_arg = _gvn.transform( new (C, 1) IfFalseNode(iff_arg) );
2720   result_rgn->init_req(no_clear_result_path, false_arg);
2721   result_val->init_req(no_clear_result_path, intcon(1));
2722 
2723   // drop through to next case
2724   set_control( _gvn.transform(new (C, 1) IfTrueNode(iff_arg)) );
2725 
2726   // (d) Otherwise, go to the slow path.
2727   slow_region->add_req(control());
2728   set_control( _gvn.transform(slow_region) );
2729 
2730   if (stopped()) {
2731     // There is no slow path.
2732     result_rgn->init_req(slow_result_path, top());
2733     result_val->init_req(slow_result_path, top());
2734   } else {
2735     // non-virtual because it is a private non-static
2736     CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
2737 
2738     Node* slow_val = set_results_for_java_call(slow_call);
2739     // this->control() comes from set_results_for_java_call
2740 
2741     // If we know that the result of the slow call will be true, tell the optimizer!
2742     if (known_current_thread)  slow_val = intcon(1);
2743 
2744     Node* fast_io  = slow_call->in(TypeFunc::I_O);
2745     Node* fast_mem = slow_call->in(TypeFunc::Memory);
2746     // These two phis are pre-filled with copies of of the fast IO and Memory
2747     Node* io_phi   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
2748     Node* mem_phi  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2749 
2750     result_rgn->init_req(slow_result_path, control());
2751     io_phi    ->init_req(slow_result_path, i_o());
2752     mem_phi   ->init_req(slow_result_path, reset_memory());
2753     result_val->init_req(slow_result_path, slow_val);
2754 
2755     set_all_memory( _gvn.transform(mem_phi) );
2756     set_i_o(        _gvn.transform(io_phi) );
2757   }
2758 
2759   push_result(result_rgn, result_val);
2760   C->set_has_split_ifs(true); // Has chance for split-if optimization
2761 
2762   return true;
2763 }
2764 
2765 //---------------------------load_mirror_from_klass----------------------------
2766 // Given a klass oop, load its java mirror (a java.lang.Class oop).
2767 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
2768   Node* p = basic_plus_adr(klass, Klass::java_mirror_offset_in_bytes() + sizeof(oopDesc));
2769   return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT);
2770 }
2771 
2772 //-----------------------load_klass_from_mirror_common-------------------------
2773 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
2774 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
2775 // and branch to the given path on the region.
2776 // If never_see_null, take an uncommon trap on null, so we can optimistically
2777 // compile for the non-null case.
2778 // If the region is NULL, force never_see_null = true.
2779 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
2780                                                     bool never_see_null,
2781                                                     int nargs,
2782                                                     RegionNode* region,
2783                                                     int null_path,
2784                                                     int offset) {
2785   if (region == NULL)  never_see_null = true;
2786   Node* p = basic_plus_adr(mirror, offset);
2787   const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
2788   Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) );
2789   _sp += nargs; // any deopt will start just before call to enclosing method
2790   Node* null_ctl = top();
2791   kls = null_check_oop(kls, &null_ctl, never_see_null);
2792   if (region != NULL) {
2793     // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
2794     region->init_req(null_path, null_ctl);
2795   } else {
2796     assert(null_ctl == top(), "no loose ends");
2797   }
2798   _sp -= nargs;
2799   return kls;
2800 }
2801 
2802 //--------------------(inline_native_Class_query helpers)---------------------
2803 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER.
2804 // Fall through if (mods & mask) == bits, take the guard otherwise.
2805 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
2806   // Branch around if the given klass has the given modifier bit set.
2807   // Like generate_guard, adds a new path onto the region.
2808   Node* modp = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
2809   Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
2810   Node* mask = intcon(modifier_mask);
2811   Node* bits = intcon(modifier_bits);
2812   Node* mbit = _gvn.transform( new (C, 3) AndINode(mods, mask) );
2813   Node* cmp  = _gvn.transform( new (C, 3) CmpINode(mbit, bits) );
2814   Node* bol  = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) );
2815   return generate_fair_guard(bol, region);
2816 }
2817 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
2818   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
2819 }
2820 
2821 //-------------------------inline_native_Class_query-------------------
2822 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
2823   int nargs = 1+0;  // just the Class mirror, in most cases
2824   const Type* return_type = TypeInt::BOOL;
2825   Node* prim_return_value = top();  // what happens if it's a primitive class?
2826   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
2827   bool expect_prim = false;     // most of these guys expect to work on refs
2828 
2829   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
2830 
2831   switch (id) {
2832   case vmIntrinsics::_isInstance:
2833     nargs = 1+1;  // the Class mirror, plus the object getting queried about
2834     // nothing is an instance of a primitive type
2835     prim_return_value = intcon(0);
2836     break;
2837   case vmIntrinsics::_getModifiers:
2838     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
2839     assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
2840     return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
2841     break;
2842   case vmIntrinsics::_isInterface:
2843     prim_return_value = intcon(0);
2844     break;
2845   case vmIntrinsics::_isArray:
2846     prim_return_value = intcon(0);
2847     expect_prim = true;  // cf. ObjectStreamClass.getClassSignature
2848     break;
2849   case vmIntrinsics::_isPrimitive:
2850     prim_return_value = intcon(1);
2851     expect_prim = true;  // obviously
2852     break;
2853   case vmIntrinsics::_getSuperclass:
2854     prim_return_value = null();
2855     return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
2856     break;
2857   case vmIntrinsics::_getComponentType:
2858     prim_return_value = null();
2859     return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
2860     break;
2861   case vmIntrinsics::_getClassAccessFlags:
2862     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
2863     return_type = TypeInt::INT;  // not bool!  6297094
2864     break;
2865   default:
2866     ShouldNotReachHere();
2867   }
2868 
2869   Node* mirror =                      argument(0);
2870   Node* obj    = (nargs <= 1)? top(): argument(1);
2871 
2872   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
2873   if (mirror_con == NULL)  return false;  // cannot happen?
2874 
2875 #ifndef PRODUCT
2876   if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
2877     ciType* k = mirror_con->java_mirror_type();
2878     if (k) {
2879       tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
2880       k->print_name();
2881       tty->cr();
2882     }
2883   }
2884 #endif
2885 
2886   // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
2887   RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT);
2888   record_for_igvn(region);
2889   PhiNode* phi = new (C, PATH_LIMIT) PhiNode(region, return_type);
2890 
2891   // The mirror will never be null of Reflection.getClassAccessFlags, however
2892   // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
2893   // if it is. See bug 4774291.
2894 
2895   // For Reflection.getClassAccessFlags(), the null check occurs in
2896   // the wrong place; see inline_unsafe_access(), above, for a similar
2897   // situation.
2898   _sp += nargs;  // set original stack for use by uncommon_trap
2899   mirror = do_null_check(mirror, T_OBJECT);
2900   _sp -= nargs;
2901   // If mirror or obj is dead, only null-path is taken.
2902   if (stopped())  return true;
2903 
2904   if (expect_prim)  never_see_null = false;  // expect nulls (meaning prims)
2905 
2906   // Now load the mirror's klass metaobject, and null-check it.
2907   // Side-effects region with the control path if the klass is null.
2908   Node* kls = load_klass_from_mirror(mirror, never_see_null, nargs,
2909                                      region, _prim_path);
2910   // If kls is null, we have a primitive mirror.
2911   phi->init_req(_prim_path, prim_return_value);
2912   if (stopped()) { push_result(region, phi); return true; }
2913 
2914   Node* p;  // handy temp
2915   Node* null_ctl;
2916 
2917   // Now that we have the non-null klass, we can perform the real query.
2918   // For constant classes, the query will constant-fold in LoadNode::Value.
2919   Node* query_value = top();
2920   switch (id) {
2921   case vmIntrinsics::_isInstance:
2922     // nothing is an instance of a primitive type
2923     query_value = gen_instanceof(obj, kls);
2924     break;
2925 
2926   case vmIntrinsics::_getModifiers:
2927     p = basic_plus_adr(kls, Klass::modifier_flags_offset_in_bytes() + sizeof(oopDesc));
2928     query_value = make_load(NULL, p, TypeInt::INT, T_INT);
2929     break;
2930 
2931   case vmIntrinsics::_isInterface:
2932     // (To verify this code sequence, check the asserts in JVM_IsInterface.)
2933     if (generate_interface_guard(kls, region) != NULL)
2934       // A guard was added.  If the guard is taken, it was an interface.
2935       phi->add_req(intcon(1));
2936     // If we fall through, it's a plain class.
2937     query_value = intcon(0);
2938     break;
2939 
2940   case vmIntrinsics::_isArray:
2941     // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
2942     if (generate_array_guard(kls, region) != NULL)
2943       // A guard was added.  If the guard is taken, it was an array.
2944       phi->add_req(intcon(1));
2945     // If we fall through, it's a plain class.
2946     query_value = intcon(0);
2947     break;
2948 
2949   case vmIntrinsics::_isPrimitive:
2950     query_value = intcon(0); // "normal" path produces false
2951     break;
2952 
2953   case vmIntrinsics::_getSuperclass:
2954     // The rules here are somewhat unfortunate, but we can still do better
2955     // with random logic than with a JNI call.
2956     // Interfaces store null or Object as _super, but must report null.
2957     // Arrays store an intermediate super as _super, but must report Object.
2958     // Other types can report the actual _super.
2959     // (To verify this code sequence, check the asserts in JVM_IsInterface.)
2960     if (generate_interface_guard(kls, region) != NULL)
2961       // A guard was added.  If the guard is taken, it was an interface.
2962       phi->add_req(null());
2963     if (generate_array_guard(kls, region) != NULL)
2964       // A guard was added.  If the guard is taken, it was an array.
2965       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
2966     // If we fall through, it's a plain class.  Get its _super.
2967     p = basic_plus_adr(kls, Klass::super_offset_in_bytes() + sizeof(oopDesc));
2968     kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) );
2969     null_ctl = top();
2970     kls = null_check_oop(kls, &null_ctl);
2971     if (null_ctl != top()) {
2972       // If the guard is taken, Object.superClass is null (both klass and mirror).
2973       region->add_req(null_ctl);
2974       phi   ->add_req(null());
2975     }
2976     if (!stopped()) {
2977       query_value = load_mirror_from_klass(kls);
2978     }
2979     break;
2980 
2981   case vmIntrinsics::_getComponentType:
2982     if (generate_array_guard(kls, region) != NULL) {
2983       // Be sure to pin the oop load to the guard edge just created:
2984       Node* is_array_ctrl = region->in(region->req()-1);
2985       Node* cma = basic_plus_adr(kls, in_bytes(arrayKlass::component_mirror_offset()) + sizeof(oopDesc));
2986       Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT);
2987       phi->add_req(cmo);
2988     }
2989     query_value = null();  // non-array case is null
2990     break;
2991 
2992   case vmIntrinsics::_getClassAccessFlags:
2993     p = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
2994     query_value = make_load(NULL, p, TypeInt::INT, T_INT);
2995     break;
2996 
2997   default:
2998     ShouldNotReachHere();
2999   }
3000 
3001   // Fall-through is the normal case of a query to a real class.
3002   phi->init_req(1, query_value);
3003   region->init_req(1, control());
3004 
3005   push_result(region, phi);
3006   C->set_has_split_ifs(true); // Has chance for split-if optimization
3007 
3008   return true;
3009 }
3010 
3011 //--------------------------inline_native_subtype_check------------------------
3012 // This intrinsic takes the JNI calls out of the heart of
3013 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3014 bool LibraryCallKit::inline_native_subtype_check() {
3015   int nargs = 1+1;  // the Class mirror, plus the other class getting examined
3016 
3017   // Pull both arguments off the stack.
3018   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3019   args[0] = argument(0);
3020   args[1] = argument(1);
3021   Node* klasses[2];             // corresponding Klasses: superk, subk
3022   klasses[0] = klasses[1] = top();
3023 
3024   enum {
3025     // A full decision tree on {superc is prim, subc is prim}:
3026     _prim_0_path = 1,           // {P,N} => false
3027                                 // {P,P} & superc!=subc => false
3028     _prim_same_path,            // {P,P} & superc==subc => true
3029     _prim_1_path,               // {N,P} => false
3030     _ref_subtype_path,          // {N,N} & subtype check wins => true
3031     _both_ref_path,             // {N,N} & subtype check loses => false
3032     PATH_LIMIT
3033   };
3034 
3035   RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT);
3036   Node*       phi    = new (C, PATH_LIMIT) PhiNode(region, TypeInt::BOOL);
3037   record_for_igvn(region);
3038 
3039   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3040   const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3041   int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3042 
3043   // First null-check both mirrors and load each mirror's klass metaobject.
3044   int which_arg;
3045   for (which_arg = 0; which_arg <= 1; which_arg++) {
3046     Node* arg = args[which_arg];
3047     _sp += nargs;  // set original stack for use by uncommon_trap
3048     arg = do_null_check(arg, T_OBJECT);
3049     _sp -= nargs;
3050     if (stopped())  break;
3051     args[which_arg] = _gvn.transform(arg);
3052 
3053     Node* p = basic_plus_adr(arg, class_klass_offset);
3054     Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
3055     klasses[which_arg] = _gvn.transform(kls);
3056   }
3057 
3058   // Having loaded both klasses, test each for null.
3059   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3060   for (which_arg = 0; which_arg <= 1; which_arg++) {
3061     Node* kls = klasses[which_arg];
3062     Node* null_ctl = top();
3063     _sp += nargs;  // set original stack for use by uncommon_trap
3064     kls = null_check_oop(kls, &null_ctl, never_see_null);
3065     _sp -= nargs;
3066     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3067     region->init_req(prim_path, null_ctl);
3068     if (stopped())  break;
3069     klasses[which_arg] = kls;
3070   }
3071 
3072   if (!stopped()) {
3073     // now we have two reference types, in klasses[0..1]
3074     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3075     Node* superk = klasses[0];  // the receiver
3076     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3077     // now we have a successful reference subtype check
3078     region->set_req(_ref_subtype_path, control());
3079   }
3080 
3081   // If both operands are primitive (both klasses null), then
3082   // we must return true when they are identical primitives.
3083   // It is convenient to test this after the first null klass check.
3084   set_control(region->in(_prim_0_path)); // go back to first null check
3085   if (!stopped()) {
3086     // Since superc is primitive, make a guard for the superc==subc case.
3087     Node* cmp_eq = _gvn.transform( new (C, 3) CmpPNode(args[0], args[1]) );
3088     Node* bol_eq = _gvn.transform( new (C, 2) BoolNode(cmp_eq, BoolTest::eq) );
3089     generate_guard(bol_eq, region, PROB_FAIR);
3090     if (region->req() == PATH_LIMIT+1) {
3091       // A guard was added.  If the added guard is taken, superc==subc.
3092       region->swap_edges(PATH_LIMIT, _prim_same_path);
3093       region->del_req(PATH_LIMIT);
3094     }
3095     region->set_req(_prim_0_path, control()); // Not equal after all.
3096   }
3097 
3098   // these are the only paths that produce 'true':
3099   phi->set_req(_prim_same_path,   intcon(1));
3100   phi->set_req(_ref_subtype_path, intcon(1));
3101 
3102   // pull together the cases:
3103   assert(region->req() == PATH_LIMIT, "sane region");
3104   for (uint i = 1; i < region->req(); i++) {
3105     Node* ctl = region->in(i);
3106     if (ctl == NULL || ctl == top()) {
3107       region->set_req(i, top());
3108       phi   ->set_req(i, top());
3109     } else if (phi->in(i) == NULL) {
3110       phi->set_req(i, intcon(0)); // all other paths produce 'false'
3111     }
3112   }
3113 
3114   set_control(_gvn.transform(region));
3115   push(_gvn.transform(phi));
3116 
3117   return true;
3118 }
3119 
3120 //---------------------generate_array_guard_common------------------------
3121 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3122                                                   bool obj_array, bool not_array) {
3123   // If obj_array/non_array==false/false:
3124   // Branch around if the given klass is in fact an array (either obj or prim).
3125   // If obj_array/non_array==false/true:
3126   // Branch around if the given klass is not an array klass of any kind.
3127   // If obj_array/non_array==true/true:
3128   // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3129   // If obj_array/non_array==true/false:
3130   // Branch around if the kls is an oop array (Object[] or subtype)
3131   //
3132   // Like generate_guard, adds a new path onto the region.
3133   jint  layout_con = 0;
3134   Node* layout_val = get_layout_helper(kls, layout_con);
3135   if (layout_val == NULL) {
3136     bool query = (obj_array
3137                   ? Klass::layout_helper_is_objArray(layout_con)
3138                   : Klass::layout_helper_is_javaArray(layout_con));
3139     if (query == not_array) {
3140       return NULL;                       // never a branch
3141     } else {                             // always a branch
3142       Node* always_branch = control();
3143       if (region != NULL)
3144         region->add_req(always_branch);
3145       set_control(top());
3146       return always_branch;
3147     }
3148   }
3149   // Now test the correct condition.
3150   jint  nval = (obj_array
3151                 ? ((jint)Klass::_lh_array_tag_type_value
3152                    <<    Klass::_lh_array_tag_shift)
3153                 : Klass::_lh_neutral_value);
3154   Node* cmp = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(nval)) );
3155   BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
3156   // invert the test if we are looking for a non-array
3157   if (not_array)  btest = BoolTest(btest).negate();
3158   Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, btest) );
3159   return generate_fair_guard(bol, region);
3160 }
3161 
3162 
3163 //-----------------------inline_native_newArray--------------------------
3164 bool LibraryCallKit::inline_native_newArray() {
3165   int nargs = 2;
3166   Node* mirror    = argument(0);
3167   Node* count_val = argument(1);
3168 
3169   _sp += nargs;  // set original stack for use by uncommon_trap
3170   mirror = do_null_check(mirror, T_OBJECT);
3171   _sp -= nargs;
3172   // If mirror or obj is dead, only null-path is taken.
3173   if (stopped())  return true;
3174 
3175   enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3176   RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
3177   PhiNode*    result_val = new(C, PATH_LIMIT) PhiNode(result_reg,
3178                                                       TypeInstPtr::NOTNULL);
3179   PhiNode*    result_io  = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
3180   PhiNode*    result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
3181                                                       TypePtr::BOTTOM);
3182 
3183   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3184   Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3185                                                   nargs,
3186                                                   result_reg, _slow_path);
3187   Node* normal_ctl   = control();
3188   Node* no_array_ctl = result_reg->in(_slow_path);
3189 
3190   // Generate code for the slow case.  We make a call to newArray().
3191   set_control(no_array_ctl);
3192   if (!stopped()) {
3193     // Either the input type is void.class, or else the
3194     // array klass has not yet been cached.  Either the
3195     // ensuing call will throw an exception, or else it
3196     // will cache the array klass for next time.
3197     PreserveJVMState pjvms(this);
3198     CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
3199     Node* slow_result = set_results_for_java_call(slow_call);
3200     // this->control() comes from set_results_for_java_call
3201     result_reg->set_req(_slow_path, control());
3202     result_val->set_req(_slow_path, slow_result);
3203     result_io ->set_req(_slow_path, i_o());
3204     result_mem->set_req(_slow_path, reset_memory());
3205   }
3206 
3207   set_control(normal_ctl);
3208   if (!stopped()) {
3209     // Normal case:  The array type has been cached in the java.lang.Class.
3210     // The following call works fine even if the array type is polymorphic.
3211     // It could be a dynamic mix of int[], boolean[], Object[], etc.
3212     Node* obj = new_array(klass_node, count_val, nargs);
3213     result_reg->init_req(_normal_path, control());
3214     result_val->init_req(_normal_path, obj);
3215     result_io ->init_req(_normal_path, i_o());
3216     result_mem->init_req(_normal_path, reset_memory());
3217   }
3218 
3219   // Return the combined state.
3220   set_i_o(        _gvn.transform(result_io)  );
3221   set_all_memory( _gvn.transform(result_mem) );
3222   push_result(result_reg, result_val);
3223   C->set_has_split_ifs(true); // Has chance for split-if optimization
3224 
3225   return true;
3226 }
3227 
3228 //----------------------inline_native_getLength--------------------------
3229 bool LibraryCallKit::inline_native_getLength() {
3230   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3231 
3232   int nargs = 1;
3233   Node* array = argument(0);
3234 
3235   _sp += nargs;  // set original stack for use by uncommon_trap
3236   array = do_null_check(array, T_OBJECT);
3237   _sp -= nargs;
3238 
3239   // If array is dead, only null-path is taken.
3240   if (stopped())  return true;
3241 
3242   // Deoptimize if it is a non-array.
3243   Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
3244 
3245   if (non_array != NULL) {
3246     PreserveJVMState pjvms(this);
3247     set_control(non_array);
3248     _sp += nargs;  // push the arguments back on the stack
3249     uncommon_trap(Deoptimization::Reason_intrinsic,
3250                   Deoptimization::Action_maybe_recompile);
3251   }
3252 
3253   // If control is dead, only non-array-path is taken.
3254   if (stopped())  return true;
3255 
3256   // The works fine even if the array type is polymorphic.
3257   // It could be a dynamic mix of int[], boolean[], Object[], etc.
3258   push( load_array_length(array) );
3259 
3260   C->set_has_split_ifs(true); // Has chance for split-if optimization
3261 
3262   return true;
3263 }
3264 
3265 //------------------------inline_array_copyOf----------------------------
3266 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3267   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3268 
3269   // Restore the stack and pop off the arguments.
3270   int nargs = 3 + (is_copyOfRange? 1: 0);
3271   Node* original          = argument(0);
3272   Node* start             = is_copyOfRange? argument(1): intcon(0);
3273   Node* end               = is_copyOfRange? argument(2): argument(1);
3274   Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3275 
3276   Node* newcopy;
3277 
3278   //set the original stack and the reexecute bit for the interpreter to reexecute
3279   //the bytecode that invokes Arrays.copyOf if deoptimization happens
3280   { PreserveReexecuteState preexecs(this);
3281     _sp += nargs;
3282     jvms()->set_should_reexecute(true);
3283 
3284     array_type_mirror = do_null_check(array_type_mirror, T_OBJECT);
3285     original          = do_null_check(original, T_OBJECT);
3286 
3287     // Check if a null path was taken unconditionally.
3288     if (stopped())  return true;
3289 
3290     Node* orig_length = load_array_length(original);
3291 
3292     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, 0,
3293                                               NULL, 0);
3294     klass_node = do_null_check(klass_node, T_OBJECT);
3295 
3296     RegionNode* bailout = new (C, 1) RegionNode(1);
3297     record_for_igvn(bailout);
3298 
3299     // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3300     // Bail out if that is so.
3301     Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3302     if (not_objArray != NULL) {
3303       // Improve the klass node's type from the new optimistic assumption:
3304       ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3305       const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3306       Node* cast = new (C, 2) CastPPNode(klass_node, akls);
3307       cast->init_req(0, control());
3308       klass_node = _gvn.transform(cast);
3309     }
3310 
3311     // Bail out if either start or end is negative.
3312     generate_negative_guard(start, bailout, &start);
3313     generate_negative_guard(end,   bailout, &end);
3314 
3315     Node* length = end;
3316     if (_gvn.type(start) != TypeInt::ZERO) {
3317       length = _gvn.transform( new (C, 3) SubINode(end, start) );
3318     }
3319 
3320     // Bail out if length is negative.
3321     // ...Not needed, since the new_array will throw the right exception.
3322     //generate_negative_guard(length, bailout, &length);
3323 
3324     if (bailout->req() > 1) {
3325       PreserveJVMState pjvms(this);
3326       set_control( _gvn.transform(bailout) );
3327       uncommon_trap(Deoptimization::Reason_intrinsic,
3328                     Deoptimization::Action_maybe_recompile);
3329     }
3330 
3331     if (!stopped()) {
3332 
3333       // How many elements will we copy from the original?
3334       // The answer is MinI(orig_length - start, length).
3335       Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
3336       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3337 
3338       const bool raw_mem_only = true;
3339       newcopy = new_array(klass_node, length, 0, raw_mem_only);
3340 
3341       // Generate a direct call to the right arraycopy function(s).
3342       // We know the copy is disjoint but we might not know if the
3343       // oop stores need checking.
3344       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3345       // This will fail a store-check if x contains any non-nulls.
3346       bool disjoint_bases = true;
3347       bool length_never_negative = true;
3348       generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
3349                          original, start, newcopy, intcon(0), moved,
3350                          disjoint_bases, length_never_negative);
3351     }
3352   } //original reexecute and sp are set back here
3353 
3354   if(!stopped()) {
3355     push(newcopy);
3356   }
3357 
3358   C->set_has_split_ifs(true); // Has chance for split-if optimization
3359 
3360   return true;
3361 }
3362 
3363 
3364 //----------------------generate_virtual_guard---------------------------
3365 // Helper for hashCode and clone.  Peeks inside the vtable to avoid a call.
3366 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3367                                              RegionNode* slow_region) {
3368   ciMethod* method = callee();
3369   int vtable_index = method->vtable_index();
3370   // Get the methodOop out of the appropriate vtable entry.
3371   int entry_offset  = (instanceKlass::vtable_start_offset() +
3372                      vtable_index*vtableEntry::size()) * wordSize +
3373                      vtableEntry::method_offset_in_bytes();
3374   Node* entry_addr  = basic_plus_adr(obj_klass, entry_offset);
3375   Node* target_call = make_load(NULL, entry_addr, TypeInstPtr::NOTNULL, T_OBJECT);
3376 
3377   // Compare the target method with the expected method (e.g., Object.hashCode).
3378   const TypeInstPtr* native_call_addr = TypeInstPtr::make(method);
3379 
3380   Node* native_call = makecon(native_call_addr);
3381   Node* chk_native  = _gvn.transform( new(C, 3) CmpPNode(target_call, native_call) );
3382   Node* test_native = _gvn.transform( new(C, 2) BoolNode(chk_native, BoolTest::ne) );
3383 
3384   return generate_slow_guard(test_native, slow_region);
3385 }
3386 
3387 //-----------------------generate_method_call----------------------------
3388 // Use generate_method_call to make a slow-call to the real
3389 // method if the fast path fails.  An alternative would be to
3390 // use a stub like OptoRuntime::slow_arraycopy_Java.
3391 // This only works for expanding the current library call,
3392 // not another intrinsic.  (E.g., don't use this for making an
3393 // arraycopy call inside of the copyOf intrinsic.)
3394 CallJavaNode*
3395 LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
3396   // When compiling the intrinsic method itself, do not use this technique.
3397   guarantee(callee() != C->method(), "cannot make slow-call to self");
3398 
3399   ciMethod* method = callee();
3400   // ensure the JVMS we have will be correct for this call
3401   guarantee(method_id == method->intrinsic_id(), "must match");
3402 
3403   const TypeFunc* tf = TypeFunc::make(method);
3404   int tfdc = tf->domain()->cnt();
3405   CallJavaNode* slow_call;
3406   if (is_static) {
3407     assert(!is_virtual, "");
3408     slow_call = new(C, tfdc) CallStaticJavaNode(tf,
3409                                 SharedRuntime::get_resolve_static_call_stub(),
3410                                 method, bci());
3411   } else if (is_virtual) {
3412     null_check_receiver(method);
3413     int vtable_index = methodOopDesc::invalid_vtable_index;
3414     if (UseInlineCaches) {
3415       // Suppress the vtable call
3416     } else {
3417       // hashCode and clone are not a miranda methods,
3418       // so the vtable index is fixed.
3419       // No need to use the linkResolver to get it.
3420        vtable_index = method->vtable_index();
3421     }
3422     slow_call = new(C, tfdc) CallDynamicJavaNode(tf,
3423                                 SharedRuntime::get_resolve_virtual_call_stub(),
3424                                 method, vtable_index, bci());
3425   } else {  // neither virtual nor static:  opt_virtual
3426     null_check_receiver(method);
3427     slow_call = new(C, tfdc) CallStaticJavaNode(tf,
3428                                 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3429                                 method, bci());
3430     slow_call->set_optimized_virtual(true);
3431   }
3432   set_arguments_for_java_call(slow_call);
3433   set_edges_for_java_call(slow_call);
3434   return slow_call;
3435 }
3436 
3437 
3438 //------------------------------inline_native_hashcode--------------------
3439 // Build special case code for calls to hashCode on an object.
3440 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3441   assert(is_static == callee()->is_static(), "correct intrinsic selection");
3442   assert(!(is_virtual && is_static), "either virtual, special, or static");
3443 
3444   enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3445 
3446   RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
3447   PhiNode*    result_val = new(C, PATH_LIMIT) PhiNode(result_reg,
3448                                                       TypeInt::INT);
3449   PhiNode*    result_io  = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
3450   PhiNode*    result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
3451                                                       TypePtr::BOTTOM);
3452   Node* obj = NULL;
3453   if (!is_static) {
3454     // Check for hashing null object
3455     obj = null_check_receiver(callee());
3456     if (stopped())  return true;        // unconditionally null
3457     result_reg->init_req(_null_path, top());
3458     result_val->init_req(_null_path, top());
3459   } else {
3460     // Do a null check, and return zero if null.
3461     // System.identityHashCode(null) == 0
3462     obj = argument(0);
3463     Node* null_ctl = top();
3464     obj = null_check_oop(obj, &null_ctl);
3465     result_reg->init_req(_null_path, null_ctl);
3466     result_val->init_req(_null_path, _gvn.intcon(0));
3467   }
3468 
3469   // Unconditionally null?  Then return right away.
3470   if (stopped()) {
3471     set_control( result_reg->in(_null_path) );
3472     if (!stopped())
3473       push(      result_val ->in(_null_path) );
3474     return true;
3475   }
3476 
3477   // After null check, get the object's klass.
3478   Node* obj_klass = load_object_klass(obj);
3479 
3480   // This call may be virtual (invokevirtual) or bound (invokespecial).
3481   // For each case we generate slightly different code.
3482 
3483   // We only go to the fast case code if we pass a number of guards.  The
3484   // paths which do not pass are accumulated in the slow_region.
3485   RegionNode* slow_region = new (C, 1) RegionNode(1);
3486   record_for_igvn(slow_region);
3487 
3488   // If this is a virtual call, we generate a funny guard.  We pull out
3489   // the vtable entry corresponding to hashCode() from the target object.
3490   // If the target method which we are calling happens to be the native
3491   // Object hashCode() method, we pass the guard.  We do not need this
3492   // guard for non-virtual calls -- the caller is known to be the native
3493   // Object hashCode().
3494   if (is_virtual) {
3495     generate_virtual_guard(obj_klass, slow_region);
3496   }
3497 
3498   // Get the header out of the object, use LoadMarkNode when available
3499   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3500   Node* header = make_load(NULL, header_addr, TypeRawPtr::BOTTOM, T_ADDRESS);
3501   header = _gvn.transform( new (C, 2) CastP2XNode(NULL, header) );
3502 
3503   // Test the header to see if it is unlocked.
3504   Node *lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
3505   Node *lmasked_header = _gvn.transform( new (C, 3) AndXNode(header, lock_mask) );
3506   Node *unlocked_val   = _gvn.MakeConX(markOopDesc::unlocked_value);
3507   Node *chk_unlocked   = _gvn.transform( new (C, 3) CmpXNode( lmasked_header, unlocked_val));
3508   Node *test_unlocked  = _gvn.transform( new (C, 2) BoolNode( chk_unlocked, BoolTest::ne) );
3509 
3510   generate_slow_guard(test_unlocked, slow_region);
3511 
3512   // Get the hash value and check to see that it has been properly assigned.
3513   // We depend on hash_mask being at most 32 bits and avoid the use of
3514   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
3515   // vm: see markOop.hpp.
3516   Node *hash_mask      = _gvn.intcon(markOopDesc::hash_mask);
3517   Node *hash_shift     = _gvn.intcon(markOopDesc::hash_shift);
3518   Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) );
3519   // This hack lets the hash bits live anywhere in the mark object now, as long
3520   // as the shift drops the relevant bits into the low 32 bits.  Note that
3521   // Java spec says that HashCode is an int so there's no point in capturing
3522   // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
3523   hshifted_header      = ConvX2I(hshifted_header);
3524   Node *hash_val       = _gvn.transform( new (C, 3) AndINode(hshifted_header, hash_mask) );
3525 
3526   Node *no_hash_val    = _gvn.intcon(markOopDesc::no_hash);
3527   Node *chk_assigned   = _gvn.transform( new (C, 3) CmpINode( hash_val, no_hash_val));
3528   Node *test_assigned  = _gvn.transform( new (C, 2) BoolNode( chk_assigned, BoolTest::eq) );
3529 
3530   generate_slow_guard(test_assigned, slow_region);
3531 
3532   Node* init_mem = reset_memory();
3533   // fill in the rest of the null path:
3534   result_io ->init_req(_null_path, i_o());
3535   result_mem->init_req(_null_path, init_mem);
3536 
3537   result_val->init_req(_fast_path, hash_val);
3538   result_reg->init_req(_fast_path, control());
3539   result_io ->init_req(_fast_path, i_o());
3540   result_mem->init_req(_fast_path, init_mem);
3541 
3542   // Generate code for the slow case.  We make a call to hashCode().
3543   set_control(_gvn.transform(slow_region));
3544   if (!stopped()) {
3545     // No need for PreserveJVMState, because we're using up the present state.
3546     set_all_memory(init_mem);
3547     vmIntrinsics::ID hashCode_id = vmIntrinsics::_hashCode;
3548     if (is_static)   hashCode_id = vmIntrinsics::_identityHashCode;
3549     CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
3550     Node* slow_result = set_results_for_java_call(slow_call);
3551     // this->control() comes from set_results_for_java_call
3552     result_reg->init_req(_slow_path, control());
3553     result_val->init_req(_slow_path, slow_result);
3554     result_io  ->set_req(_slow_path, i_o());
3555     result_mem ->set_req(_slow_path, reset_memory());
3556   }
3557 
3558   // Return the combined state.
3559   set_i_o(        _gvn.transform(result_io)  );
3560   set_all_memory( _gvn.transform(result_mem) );
3561   push_result(result_reg, result_val);
3562 
3563   return true;
3564 }
3565 
3566 //---------------------------inline_native_getClass----------------------------
3567 // Build special case code for calls to getClass on an object.
3568 bool LibraryCallKit::inline_native_getClass() {
3569   Node* obj = null_check_receiver(callee());
3570   if (stopped())  return true;
3571   push( load_mirror_from_klass(load_object_klass(obj)) );
3572   return true;
3573 }
3574 
3575 //-----------------inline_native_Reflection_getCallerClass---------------------
3576 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
3577 //
3578 // NOTE that this code must perform the same logic as
3579 // vframeStream::security_get_caller_frame in that it must skip
3580 // Method.invoke() and auxiliary frames.
3581 
3582 
3583 
3584 
3585 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3586   ciMethod*       method = callee();
3587 
3588 #ifndef PRODUCT
3589   if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3590     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3591   }
3592 #endif
3593 
3594   debug_only(int saved_sp = _sp);
3595 
3596   // Argument words:  (int depth)
3597   int nargs = 1;
3598 
3599   _sp += nargs;
3600   Node* caller_depth_node = pop();
3601 
3602   assert(saved_sp == _sp, "must have correct argument count");
3603 
3604   // The depth value must be a constant in order for the runtime call
3605   // to be eliminated.
3606   const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
3607   if (caller_depth_type == NULL || !caller_depth_type->is_con()) {
3608 #ifndef PRODUCT
3609     if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3610       tty->print_cr("  Bailing out because caller depth was not a constant");
3611     }
3612 #endif
3613     return false;
3614   }
3615   // Note that the JVM state at this point does not include the
3616   // getCallerClass() frame which we are trying to inline. The
3617   // semantics of getCallerClass(), however, are that the "first"
3618   // frame is the getCallerClass() frame, so we subtract one from the
3619   // requested depth before continuing. We don't inline requests of
3620   // getCallerClass(0).
3621   int caller_depth = caller_depth_type->get_con() - 1;
3622   if (caller_depth < 0) {
3623 #ifndef PRODUCT
3624     if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3625       tty->print_cr("  Bailing out because caller depth was %d", caller_depth);
3626     }
3627 #endif
3628     return false;
3629   }
3630 
3631   if (!jvms()->has_method()) {
3632 #ifndef PRODUCT
3633     if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3634       tty->print_cr("  Bailing out because intrinsic was inlined at top level");
3635     }
3636 #endif
3637     return false;
3638   }
3639   int _depth = jvms()->depth();  // cache call chain depth
3640 
3641   // Walk back up the JVM state to find the caller at the required
3642   // depth. NOTE that this code must perform the same logic as
3643   // vframeStream::security_get_caller_frame in that it must skip
3644   // Method.invoke() and auxiliary frames. Note also that depth is
3645   // 1-based (1 is the bottom of the inlining).
3646   int inlining_depth = _depth;
3647   JVMState* caller_jvms = NULL;
3648 
3649   if (inlining_depth > 0) {
3650     caller_jvms = jvms();
3651     assert(caller_jvms = jvms()->of_depth(inlining_depth), "inlining_depth == our depth");
3652     do {
3653       // The following if-tests should be performed in this order
3654       if (is_method_invoke_or_aux_frame(caller_jvms)) {
3655         // Skip a Method.invoke() or auxiliary frame
3656       } else if (caller_depth > 0) {
3657         // Skip real frame
3658         --caller_depth;
3659       } else {
3660         // We're done: reached desired caller after skipping.
3661         break;
3662       }
3663       caller_jvms = caller_jvms->caller();
3664       --inlining_depth;
3665     } while (inlining_depth > 0);
3666   }
3667 
3668   if (inlining_depth == 0) {
3669 #ifndef PRODUCT
3670     if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3671       tty->print_cr("  Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
3672       tty->print_cr("  JVM state at this point:");
3673       for (int i = _depth; i >= 1; i--) {
3674         tty->print_cr("   %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8());
3675       }
3676     }
3677 #endif
3678     return false; // Reached end of inlining
3679   }
3680 
3681   // Acquire method holder as java.lang.Class
3682   ciInstanceKlass* caller_klass  = caller_jvms->method()->holder();
3683   ciInstance*      caller_mirror = caller_klass->java_mirror();
3684   // Push this as a constant
3685   push(makecon(TypeInstPtr::make(caller_mirror)));
3686 #ifndef PRODUCT
3687   if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3688     tty->print_cr("  Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
3689     tty->print_cr("  JVM state at this point:");
3690     for (int i = _depth; i >= 1; i--) {
3691       tty->print_cr("   %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8());
3692     }
3693   }
3694 #endif
3695   return true;
3696 }
3697 
3698 // Helper routine for above
3699 bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
3700   // Is this the Method.invoke method itself?
3701   if (jvms->method()->intrinsic_id() == vmIntrinsics::_invoke)
3702     return true;
3703 
3704   // Is this a helper, defined somewhere underneath MethodAccessorImpl.
3705   ciKlass* k = jvms->method()->holder();
3706   if (k->is_instance_klass()) {
3707     ciInstanceKlass* ik = k->as_instance_klass();
3708     for (; ik != NULL; ik = ik->super()) {
3709       if (ik->name() == ciSymbol::sun_reflect_MethodAccessorImpl() &&
3710           ik == env()->find_system_klass(ik->name())) {
3711         return true;
3712       }
3713     }
3714   }
3715 
3716   return false;
3717 }
3718 
3719 static int value_field_offset = -1;  // offset of the "value" field of AtomicLongCSImpl.  This is needed by
3720                                      // inline_native_AtomicLong_attemptUpdate() but it has no way of
3721                                      // computing it since there is no lookup field by name function in the
3722                                      // CI interface.  This is computed and set by inline_native_AtomicLong_get().
3723                                      // Using a static variable here is safe even if we have multiple compilation
3724                                      // threads because the offset is constant.  At worst the same offset will be
3725                                      // computed and  stored multiple
3726 
3727 bool LibraryCallKit::inline_native_AtomicLong_get() {
3728   // Restore the stack and pop off the argument
3729   _sp+=1;
3730   Node *obj = pop();
3731 
3732   // get the offset of the "value" field. Since the CI interfaces
3733   // does not provide a way to look up a field by name, we scan the bytecodes
3734   // to get the field index.  We expect the first 2 instructions of the method
3735   // to be:
3736   //    0 aload_0
3737   //    1 getfield "value"
3738   ciMethod* method = callee();
3739   if (value_field_offset == -1)
3740   {
3741     ciField* value_field;
3742     ciBytecodeStream iter(method);
3743     Bytecodes::Code bc = iter.next();
3744 
3745     if ((bc != Bytecodes::_aload_0) &&
3746               ((bc != Bytecodes::_aload) || (iter.get_index() != 0)))
3747       return false;
3748     bc = iter.next();
3749     if (bc != Bytecodes::_getfield)
3750       return false;
3751     bool ignore;
3752     value_field = iter.get_field(ignore);
3753     value_field_offset = value_field->offset_in_bytes();
3754   }
3755 
3756   // Null check without removing any arguments.
3757   _sp++;
3758   obj = do_null_check(obj, T_OBJECT);
3759   _sp--;
3760   // Check for locking null object
3761   if (stopped()) return true;
3762 
3763   Node *adr = basic_plus_adr(obj, obj, value_field_offset);
3764   const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
3765   int alias_idx = C->get_alias_index(adr_type);
3766 
3767   Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr));
3768 
3769   push_pair(result);
3770 
3771   return true;
3772 }
3773 
3774 bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() {
3775   // Restore the stack and pop off the arguments
3776   _sp+=5;
3777   Node *newVal = pop_pair();
3778   Node *oldVal = pop_pair();
3779   Node *obj = pop();
3780 
3781   // we need the offset of the "value" field which was computed when
3782   // inlining the get() method.  Give up if we don't have it.
3783   if (value_field_offset == -1)
3784     return false;
3785 
3786   // Null check without removing any arguments.
3787   _sp+=5;
3788   obj = do_null_check(obj, T_OBJECT);
3789   _sp-=5;
3790   // Check for locking null object
3791   if (stopped()) return true;
3792 
3793   Node *adr = basic_plus_adr(obj, obj, value_field_offset);
3794   const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
3795   int alias_idx = C->get_alias_index(adr_type);
3796 
3797   Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal));
3798   Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
3799   set_memory(store_proj, alias_idx);
3800   Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) );
3801 
3802   Node *result;
3803   // CMove node is not used to be able fold a possible check code
3804   // after attemptUpdate() call. This code could be transformed
3805   // into CMove node by loop optimizations.
3806   {
3807     RegionNode *r = new (C, 3) RegionNode(3);
3808     result = new (C, 3) PhiNode(r, TypeInt::BOOL);
3809 
3810     Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
3811     Node *iftrue = opt_iff(r, iff);
3812     r->init_req(1, iftrue);
3813     result->init_req(1, intcon(1));
3814     result->init_req(2, intcon(0));
3815 
3816     set_control(_gvn.transform(r));
3817     record_for_igvn(r);
3818 
3819     C->set_has_split_ifs(true); // Has chance for split-if optimization
3820   }
3821 
3822   push(_gvn.transform(result));
3823   return true;
3824 }
3825 
3826 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
3827   // restore the arguments
3828   _sp += arg_size();
3829 
3830   switch (id) {
3831   case vmIntrinsics::_floatToRawIntBits:
3832     push(_gvn.transform( new (C, 2) MoveF2INode(pop())));
3833     break;
3834 
3835   case vmIntrinsics::_intBitsToFloat:
3836     push(_gvn.transform( new (C, 2) MoveI2FNode(pop())));
3837     break;
3838 
3839   case vmIntrinsics::_doubleToRawLongBits:
3840     push_pair(_gvn.transform( new (C, 2) MoveD2LNode(pop_pair())));
3841     break;
3842 
3843   case vmIntrinsics::_longBitsToDouble:
3844     push_pair(_gvn.transform( new (C, 2) MoveL2DNode(pop_pair())));
3845     break;
3846 
3847   case vmIntrinsics::_doubleToLongBits: {
3848     Node* value = pop_pair();
3849 
3850     // two paths (plus control) merge in a wood
3851     RegionNode *r = new (C, 3) RegionNode(3);
3852     Node *phi = new (C, 3) PhiNode(r, TypeLong::LONG);
3853 
3854     Node *cmpisnan = _gvn.transform( new (C, 3) CmpDNode(value, value));
3855     // Build the boolean node
3856     Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) );
3857 
3858     // Branch either way.
3859     // NaN case is less traveled, which makes all the difference.
3860     IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3861     Node *opt_isnan = _gvn.transform(ifisnan);
3862     assert( opt_isnan->is_If(), "Expect an IfNode");
3863     IfNode *opt_ifisnan = (IfNode*)opt_isnan;
3864     Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) );
3865 
3866     set_control(iftrue);
3867 
3868     static const jlong nan_bits = CONST64(0x7ff8000000000000);
3869     Node *slow_result = longcon(nan_bits); // return NaN
3870     phi->init_req(1, _gvn.transform( slow_result ));
3871     r->init_req(1, iftrue);
3872 
3873     // Else fall through
3874     Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) );
3875     set_control(iffalse);
3876 
3877     phi->init_req(2, _gvn.transform( new (C, 2) MoveD2LNode(value)));
3878     r->init_req(2, iffalse);
3879 
3880     // Post merge
3881     set_control(_gvn.transform(r));
3882     record_for_igvn(r);
3883 
3884     Node* result = _gvn.transform(phi);
3885     assert(result->bottom_type()->isa_long(), "must be");
3886     push_pair(result);
3887 
3888     C->set_has_split_ifs(true); // Has chance for split-if optimization
3889 
3890     break;
3891   }
3892 
3893   case vmIntrinsics::_floatToIntBits: {
3894     Node* value = pop();
3895 
3896     // two paths (plus control) merge in a wood
3897     RegionNode *r = new (C, 3) RegionNode(3);
3898     Node *phi = new (C, 3) PhiNode(r, TypeInt::INT);
3899 
3900     Node *cmpisnan = _gvn.transform( new (C, 3) CmpFNode(value, value));
3901     // Build the boolean node
3902     Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) );
3903 
3904     // Branch either way.
3905     // NaN case is less traveled, which makes all the difference.
3906     IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3907     Node *opt_isnan = _gvn.transform(ifisnan);
3908     assert( opt_isnan->is_If(), "Expect an IfNode");
3909     IfNode *opt_ifisnan = (IfNode*)opt_isnan;
3910     Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) );
3911 
3912     set_control(iftrue);
3913 
3914     static const jint nan_bits = 0x7fc00000;
3915     Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
3916     phi->init_req(1, _gvn.transform( slow_result ));
3917     r->init_req(1, iftrue);
3918 
3919     // Else fall through
3920     Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) );
3921     set_control(iffalse);
3922 
3923     phi->init_req(2, _gvn.transform( new (C, 2) MoveF2INode(value)));
3924     r->init_req(2, iffalse);
3925 
3926     // Post merge
3927     set_control(_gvn.transform(r));
3928     record_for_igvn(r);
3929 
3930     Node* result = _gvn.transform(phi);
3931     assert(result->bottom_type()->isa_int(), "must be");
3932     push(result);
3933 
3934     C->set_has_split_ifs(true); // Has chance for split-if optimization
3935 
3936     break;
3937   }
3938 
3939   default:
3940     ShouldNotReachHere();
3941   }
3942 
3943   return true;
3944 }
3945 
3946 #ifdef _LP64
3947 #define XTOP ,top() /*additional argument*/
3948 #else  //_LP64
3949 #define XTOP        /*no additional argument*/
3950 #endif //_LP64
3951 
3952 //----------------------inline_unsafe_copyMemory-------------------------
3953 bool LibraryCallKit::inline_unsafe_copyMemory() {
3954   if (callee()->is_static())  return false;  // caller must have the capability!
3955   int nargs = 1 + 5 + 3;  // 5 args:  (src: ptr,off, dst: ptr,off, size)
3956   assert(signature()->size() == nargs-1, "copy has 5 arguments");
3957   null_check_receiver(callee());  // check then ignore argument(0)
3958   if (stopped())  return true;
3959 
3960   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
3961 
3962   Node* src_ptr = argument(1);
3963   Node* src_off = ConvL2X(argument(2));
3964   assert(argument(3)->is_top(), "2nd half of long");
3965   Node* dst_ptr = argument(4);
3966   Node* dst_off = ConvL2X(argument(5));
3967   assert(argument(6)->is_top(), "2nd half of long");
3968   Node* size    = ConvL2X(argument(7));
3969   assert(argument(8)->is_top(), "2nd half of long");
3970 
3971   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
3972          "fieldOffset must be byte-scaled");
3973 
3974   Node* src = make_unsafe_address(src_ptr, src_off);
3975   Node* dst = make_unsafe_address(dst_ptr, dst_off);
3976 
3977   // Conservatively insert a memory barrier on all memory slices.
3978   // Do not let writes of the copy source or destination float below the copy.
3979   insert_mem_bar(Op_MemBarCPUOrder);
3980 
3981   // Call it.  Note that the length argument is not scaled.
3982   make_runtime_call(RC_LEAF|RC_NO_FP,
3983                     OptoRuntime::fast_arraycopy_Type(),
3984                     StubRoutines::unsafe_arraycopy(),
3985                     "unsafe_arraycopy",
3986                     TypeRawPtr::BOTTOM,
3987                     src, dst, size XTOP);
3988 
3989   // Do not let reads of the copy destination float above the copy.
3990   insert_mem_bar(Op_MemBarCPUOrder);
3991 
3992   return true;
3993 }
3994 
3995 //------------------------clone_coping-----------------------------------
3996 // Helper function for inline_native_clone.
3997 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
3998   assert(obj_size != NULL, "");
3999   Node* raw_obj = alloc_obj->in(1);
4000   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4001   assert(alloc_obj->as_CheckCastPP()->type() != TypeInstPtr::NOTNULL, "should be more precise than Object");
4002 
4003   if (ReduceBulkZeroing) {
4004     // We will be completely responsible for initializing this object -
4005     // mark Initialize node as complete.
4006     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4007     // The object was just allocated - there should be no any stores!
4008     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4009   }
4010 
4011   // Cast to Object for arraycopy.
4012   // We can't use the original CheckCastPP since it should be moved
4013   // after the arraycopy to prevent stores flowing above it.
4014   Node* new_obj = new(C, 2) CheckCastPPNode(alloc_obj->in(0), raw_obj,
4015                                             TypeInstPtr::NOTNULL);
4016   new_obj = _gvn.transform(new_obj);
4017   // Substitute in the locally valid dest_oop.
4018   replace_in_map(alloc_obj, new_obj);
4019 
4020   // Copy the fastest available way.
4021   // TODO: generate fields copies for small objects instead.
4022   Node* src  = obj;
4023   Node* dest = new_obj;
4024   Node* size = _gvn.transform(obj_size);
4025 
4026   // Exclude the header but include array length to copy by 8 bytes words.
4027   // Can't use base_offset_in_bytes(bt) since basic type is unknown.
4028   int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
4029                             instanceOopDesc::base_offset_in_bytes();
4030   // base_off:
4031   // 8  - 32-bit VM
4032   // 12 - 64-bit VM, compressed oops
4033   // 16 - 64-bit VM, normal oops
4034   if (base_off % BytesPerLong != 0) {
4035     assert(UseCompressedOops, "");
4036     if (is_array) {
4037       // Exclude length to copy by 8 bytes words.
4038       base_off += sizeof(int);
4039     } else {
4040       // Include klass to copy by 8 bytes words.
4041       base_off = instanceOopDesc::klass_offset_in_bytes();
4042     }
4043     assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4044   }
4045   src  = basic_plus_adr(src,  base_off);
4046   dest = basic_plus_adr(dest, base_off);
4047 
4048   // Compute the length also, if needed:
4049   Node* countx = size;
4050   countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) );
4051   countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4052 
4053   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4054   bool disjoint_bases = true;
4055   generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4056                                src, NULL, dest, NULL, countx);
4057 
4058   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4059   if (card_mark) {
4060     assert(!is_array, "");
4061     // Put in store barrier for any and all oops we are sticking
4062     // into this object.  (We could avoid this if we could prove
4063     // that the object type contains no oop fields at all.)
4064     Node* no_particular_value = NULL;
4065     Node* no_particular_field = NULL;
4066     int raw_adr_idx = Compile::AliasIdxRaw;
4067     post_barrier(control(),
4068                  memory(raw_adr_type),
4069                  new_obj,
4070                  no_particular_field,
4071                  raw_adr_idx,
4072                  no_particular_value,
4073                  T_OBJECT,
4074                  false);
4075   }
4076 
4077   // Move the original CheckCastPP after arraycopy.
4078   _gvn.hash_delete(alloc_obj);
4079   alloc_obj->set_req(0, control());
4080   // Replace raw memory edge with new CheckCastPP to have a live oop
4081   // at safepoints instead of raw value.
4082   assert(new_obj->is_CheckCastPP() && new_obj->in(1) == alloc_obj->in(1), "sanity");
4083   alloc_obj->set_req(1, new_obj);    // cast to the original type
4084   _gvn.hash_find_insert(alloc_obj);  // put back into GVN table
4085   // Restore in the locally valid dest_oop.
4086   replace_in_map(new_obj, alloc_obj);
4087 }
4088 
4089 //------------------------inline_native_clone----------------------------
4090 // Here are the simple edge cases:
4091 //  null receiver => normal trap
4092 //  virtual and clone was overridden => slow path to out-of-line clone
4093 //  not cloneable or finalizer => slow path to out-of-line Object.clone
4094 //
4095 // The general case has two steps, allocation and copying.
4096 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4097 //
4098 // Copying also has two cases, oop arrays and everything else.
4099 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4100 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4101 //
4102 // These steps fold up nicely if and when the cloned object's klass
4103 // can be sharply typed as an object array, a type array, or an instance.
4104 //
4105 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4106   int nargs = 1;
4107   PhiNode* result_val;
4108 
4109   //set the original stack and the reexecute bit for the interpreter to reexecute
4110   //the bytecode that invokes Object.clone if deoptimization happens
4111   { PreserveReexecuteState preexecs(this);
4112     jvms()->set_should_reexecute(true);
4113 
4114     //null_check_receiver will adjust _sp (push and pop)
4115     Node* obj = null_check_receiver(callee());
4116     if (stopped())  return true;
4117 
4118     _sp += nargs;
4119 
4120     Node* obj_klass = load_object_klass(obj);
4121     const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4122     const TypeOopPtr*   toop   = ((tklass != NULL)
4123                                 ? tklass->as_instance_type()
4124                                 : TypeInstPtr::NOTNULL);
4125 
4126     // Conservatively insert a memory barrier on all memory slices.
4127     // Do not let writes into the original float below the clone.
4128     insert_mem_bar(Op_MemBarCPUOrder);
4129 
4130     // paths into result_reg:
4131     enum {
4132       _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4133       _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4134       _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4135       _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4136       PATH_LIMIT
4137     };
4138     RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
4139     result_val             = new(C, PATH_LIMIT) PhiNode(result_reg,
4140                                                         TypeInstPtr::NOTNULL);
4141     PhiNode*    result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
4142     PhiNode*    result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
4143                                                         TypePtr::BOTTOM);
4144     record_for_igvn(result_reg);
4145 
4146     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4147     int raw_adr_idx = Compile::AliasIdxRaw;
4148     const bool raw_mem_only = true;
4149 
4150 
4151     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4152     if (array_ctl != NULL) {
4153       // It's an array.
4154       PreserveJVMState pjvms(this);
4155       set_control(array_ctl);
4156       Node* obj_length = load_array_length(obj);
4157       Node* obj_size  = NULL;
4158       Node* alloc_obj = new_array(obj_klass, obj_length, 0,
4159                                   raw_mem_only, &obj_size);
4160 
4161       if (!use_ReduceInitialCardMarks()) {
4162         // If it is an oop array, it requires very special treatment,
4163         // because card marking is required on each card of the array.
4164         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4165         if (is_obja != NULL) {
4166           PreserveJVMState pjvms2(this);
4167           set_control(is_obja);
4168           // Generate a direct call to the right arraycopy function(s).
4169           bool disjoint_bases = true;
4170           bool length_never_negative = true;
4171           generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4172                              obj, intcon(0), alloc_obj, intcon(0),
4173                              obj_length,
4174                              disjoint_bases, length_never_negative);
4175           result_reg->init_req(_objArray_path, control());
4176           result_val->init_req(_objArray_path, alloc_obj);
4177           result_i_o ->set_req(_objArray_path, i_o());
4178           result_mem ->set_req(_objArray_path, reset_memory());
4179         }
4180       }
4181       // We can dispense with card marks if we know the allocation
4182       // comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4183       // causes the non-eden paths to simulate a fresh allocation,
4184       // insofar that no further card marks are required to initialize
4185       // the object.
4186 
4187       // Otherwise, there are no card marks to worry about.
4188 
4189       if (!stopped()) {
4190         copy_to_clone(obj, alloc_obj, obj_size, true, false);
4191 
4192         // Present the results of the copy.
4193         result_reg->init_req(_array_path, control());
4194         result_val->init_req(_array_path, alloc_obj);
4195         result_i_o ->set_req(_array_path, i_o());
4196         result_mem ->set_req(_array_path, reset_memory());
4197       }
4198     }
4199 
4200     // We only go to the instance fast case code if we pass a number of guards.
4201     // The paths which do not pass are accumulated in the slow_region.
4202     RegionNode* slow_region = new (C, 1) RegionNode(1);
4203     record_for_igvn(slow_region);
4204     if (!stopped()) {
4205       // It's an instance (we did array above).  Make the slow-path tests.
4206       // If this is a virtual call, we generate a funny guard.  We grab
4207       // the vtable entry corresponding to clone() from the target object.
4208       // If the target method which we are calling happens to be the
4209       // Object clone() method, we pass the guard.  We do not need this
4210       // guard for non-virtual calls; the caller is known to be the native
4211       // Object clone().
4212       if (is_virtual) {
4213         generate_virtual_guard(obj_klass, slow_region);
4214       }
4215 
4216       // The object must be cloneable and must not have a finalizer.
4217       // Both of these conditions may be checked in a single test.
4218       // We could optimize the cloneable test further, but we don't care.
4219       generate_access_flags_guard(obj_klass,
4220                                   // Test both conditions:
4221                                   JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
4222                                   // Must be cloneable but not finalizer:
4223                                   JVM_ACC_IS_CLONEABLE,
4224                                   slow_region);
4225     }
4226 
4227     if (!stopped()) {
4228       // It's an instance, and it passed the slow-path tests.
4229       PreserveJVMState pjvms(this);
4230       Node* obj_size  = NULL;
4231       Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
4232 
4233       copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
4234 
4235       // Present the results of the slow call.
4236       result_reg->init_req(_instance_path, control());
4237       result_val->init_req(_instance_path, alloc_obj);
4238       result_i_o ->set_req(_instance_path, i_o());
4239       result_mem ->set_req(_instance_path, reset_memory());
4240     }
4241 
4242     // Generate code for the slow case.  We make a call to clone().
4243     set_control(_gvn.transform(slow_region));
4244     if (!stopped()) {
4245       PreserveJVMState pjvms(this);
4246       CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4247       Node* slow_result = set_results_for_java_call(slow_call);
4248       // this->control() comes from set_results_for_java_call
4249       result_reg->init_req(_slow_path, control());
4250       result_val->init_req(_slow_path, slow_result);
4251       result_i_o ->set_req(_slow_path, i_o());
4252       result_mem ->set_req(_slow_path, reset_memory());
4253     }
4254 
4255     // Return the combined state.
4256     set_control(    _gvn.transform(result_reg) );
4257     set_i_o(        _gvn.transform(result_i_o) );
4258     set_all_memory( _gvn.transform(result_mem) );
4259   } //original reexecute and sp are set back here
4260 
4261   push(_gvn.transform(result_val));
4262 
4263   return true;
4264 }
4265 
4266 
4267 // constants for computing the copy function
4268 enum {
4269   COPYFUNC_UNALIGNED = 0,
4270   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
4271   COPYFUNC_CONJOINT = 0,
4272   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
4273 };
4274 
4275 // Note:  The condition "disjoint" applies also for overlapping copies
4276 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
4277 static address
4278 select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name) {
4279   int selector =
4280     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
4281     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
4282 
4283 #define RETURN_STUB(xxx_arraycopy) { \
4284   name = #xxx_arraycopy; \
4285   return StubRoutines::xxx_arraycopy(); }
4286 
4287   switch (t) {
4288   case T_BYTE:
4289   case T_BOOLEAN:
4290     switch (selector) {
4291     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
4292     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
4293     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
4294     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
4295     }
4296   case T_CHAR:
4297   case T_SHORT:
4298     switch (selector) {
4299     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
4300     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
4301     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
4302     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
4303     }
4304   case T_INT:
4305   case T_FLOAT:
4306     switch (selector) {
4307     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
4308     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
4309     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
4310     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
4311     }
4312   case T_DOUBLE:
4313   case T_LONG:
4314     switch (selector) {
4315     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
4316     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
4317     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
4318     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
4319     }
4320   case T_ARRAY:
4321   case T_OBJECT:
4322     switch (selector) {
4323     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(oop_arraycopy);
4324     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_oop_arraycopy);
4325     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(oop_disjoint_arraycopy);
4326     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_oop_disjoint_arraycopy);
4327     }
4328   default:
4329     ShouldNotReachHere();
4330     return NULL;
4331   }
4332 
4333 #undef RETURN_STUB
4334 }
4335 
4336 //------------------------------basictype2arraycopy----------------------------
4337 address LibraryCallKit::basictype2arraycopy(BasicType t,
4338                                             Node* src_offset,
4339                                             Node* dest_offset,
4340                                             bool disjoint_bases,
4341                                             const char* &name) {
4342   const TypeInt* src_offset_inttype  = gvn().find_int_type(src_offset);;
4343   const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;
4344 
4345   bool aligned = false;
4346   bool disjoint = disjoint_bases;
4347 
4348   // if the offsets are the same, we can treat the memory regions as
4349   // disjoint, because either the memory regions are in different arrays,
4350   // or they are identical (which we can treat as disjoint.)  We can also
4351   // treat a copy with a destination index  less that the source index
4352   // as disjoint since a low->high copy will work correctly in this case.
4353   if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
4354       dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
4355     // both indices are constants
4356     int s_offs = src_offset_inttype->get_con();
4357     int d_offs = dest_offset_inttype->get_con();
4358     int element_size = type2aelembytes(t);
4359     aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
4360               ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
4361     if (s_offs >= d_offs)  disjoint = true;
4362   } else if (src_offset == dest_offset && src_offset != NULL) {
4363     // This can occur if the offsets are identical non-constants.
4364     disjoint = true;
4365   }
4366 
4367   return select_arraycopy_function(t, aligned, disjoint, name);
4368 }
4369 
4370 
4371 //------------------------------inline_arraycopy-----------------------
4372 bool LibraryCallKit::inline_arraycopy() {
4373   // Restore the stack and pop off the arguments.
4374   int nargs = 5;  // 2 oops, 3 ints, no size_t or long
4375   assert(callee()->signature()->size() == nargs, "copy has 5 arguments");
4376 
4377   Node *src         = argument(0);
4378   Node *src_offset  = argument(1);
4379   Node *dest        = argument(2);
4380   Node *dest_offset = argument(3);
4381   Node *length      = argument(4);
4382 
4383   // Compile time checks.  If any of these checks cannot be verified at compile time,
4384   // we do not make a fast path for this call.  Instead, we let the call remain as it
4385   // is.  The checks we choose to mandate at compile time are:
4386   //
4387   // (1) src and dest are arrays.
4388   const Type* src_type = src->Value(&_gvn);
4389   const Type* dest_type = dest->Value(&_gvn);
4390   const TypeAryPtr* top_src = src_type->isa_aryptr();
4391   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4392   if (top_src  == NULL || top_src->klass()  == NULL ||
4393       top_dest == NULL || top_dest->klass() == NULL) {
4394     // Conservatively insert a memory barrier on all memory slices.
4395     // Do not let writes into the source float below the arraycopy.
4396     insert_mem_bar(Op_MemBarCPUOrder);
4397 
4398     // Call StubRoutines::generic_arraycopy stub.
4399     generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
4400                        src, src_offset, dest, dest_offset, length);
4401 
4402     // Do not let reads from the destination float above the arraycopy.
4403     // Since we cannot type the arrays, we don't know which slices
4404     // might be affected.  We could restrict this barrier only to those
4405     // memory slices which pertain to array elements--but don't bother.
4406     if (!InsertMemBarAfterArraycopy)
4407       // (If InsertMemBarAfterArraycopy, there is already one in place.)
4408       insert_mem_bar(Op_MemBarCPUOrder);
4409     return true;
4410   }
4411 
4412   // (2) src and dest arrays must have elements of the same BasicType
4413   // Figure out the size and type of the elements we will be copying.
4414   BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
4415   BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4416   if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4417   if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4418 
4419   if (src_elem != dest_elem || dest_elem == T_VOID) {
4420     // The component types are not the same or are not recognized.  Punt.
4421     // (But, avoid the native method wrapper to JVM_ArrayCopy.)
4422     generate_slow_arraycopy(TypePtr::BOTTOM,
4423                             src, src_offset, dest, dest_offset, length);
4424     return true;
4425   }
4426 
4427   //---------------------------------------------------------------------------
4428   // We will make a fast path for this call to arraycopy.
4429 
4430   // We have the following tests left to perform:
4431   //
4432   // (3) src and dest must not be null.
4433   // (4) src_offset must not be negative.
4434   // (5) dest_offset must not be negative.
4435   // (6) length must not be negative.
4436   // (7) src_offset + length must not exceed length of src.
4437   // (8) dest_offset + length must not exceed length of dest.
4438   // (9) each element of an oop array must be assignable
4439 
4440   RegionNode* slow_region = new (C, 1) RegionNode(1);
4441   record_for_igvn(slow_region);
4442 
4443   // (3) operands must not be null
4444   // We currently perform our null checks with the do_null_check routine.
4445   // This means that the null exceptions will be reported in the caller
4446   // rather than (correctly) reported inside of the native arraycopy call.
4447   // This should be corrected, given time.  We do our null check with the
4448   // stack pointer restored.
4449   _sp += nargs;
4450   src  = do_null_check(src,  T_ARRAY);
4451   dest = do_null_check(dest, T_ARRAY);
4452   _sp -= nargs;
4453 
4454   // (4) src_offset must not be negative.
4455   generate_negative_guard(src_offset, slow_region);
4456 
4457   // (5) dest_offset must not be negative.
4458   generate_negative_guard(dest_offset, slow_region);
4459 
4460   // (6) length must not be negative (moved to generate_arraycopy()).
4461   // generate_negative_guard(length, slow_region);
4462 
4463   // (7) src_offset + length must not exceed length of src.
4464   generate_limit_guard(src_offset, length,
4465                        load_array_length(src),
4466                        slow_region);
4467 
4468   // (8) dest_offset + length must not exceed length of dest.
4469   generate_limit_guard(dest_offset, length,
4470                        load_array_length(dest),
4471                        slow_region);
4472 
4473   // (9) each element of an oop array must be assignable
4474   // The generate_arraycopy subroutine checks this.
4475 
4476   // This is where the memory effects are placed:
4477   const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
4478   generate_arraycopy(adr_type, dest_elem,
4479                      src, src_offset, dest, dest_offset, length,
4480                      false, false, slow_region);
4481 
4482   return true;
4483 }
4484 
4485 //-----------------------------generate_arraycopy----------------------
4486 // Generate an optimized call to arraycopy.
4487 // Caller must guard against non-arrays.
4488 // Caller must determine a common array basic-type for both arrays.
4489 // Caller must validate offsets against array bounds.
4490 // The slow_region has already collected guard failure paths
4491 // (such as out of bounds length or non-conformable array types).
4492 // The generated code has this shape, in general:
4493 //
4494 //     if (length == 0)  return   // via zero_path
4495 //     slowval = -1
4496 //     if (types unknown) {
4497 //       slowval = call generic copy loop
4498 //       if (slowval == 0)  return  // via checked_path
4499 //     } else if (indexes in bounds) {
4500 //       if ((is object array) && !(array type check)) {
4501 //         slowval = call checked copy loop
4502 //         if (slowval == 0)  return  // via checked_path
4503 //       } else {
4504 //         call bulk copy loop
4505 //         return  // via fast_path
4506 //       }
4507 //     }
4508 //     // adjust params for remaining work:
4509 //     if (slowval != -1) {
4510 //       n = -1^slowval; src_offset += n; dest_offset += n; length -= n
4511 //     }
4512 //   slow_region:
4513 //     call slow arraycopy(src, src_offset, dest, dest_offset, length)
4514 //     return  // via slow_call_path
4515 //
4516 // This routine is used from several intrinsics:  System.arraycopy,
4517 // Object.clone (the array subcase), and Arrays.copyOf[Range].
4518 //
4519 void
4520 LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
4521                                    BasicType basic_elem_type,
4522                                    Node* src,  Node* src_offset,
4523                                    Node* dest, Node* dest_offset,
4524                                    Node* copy_length,
4525                                    bool disjoint_bases,
4526                                    bool length_never_negative,
4527                                    RegionNode* slow_region) {
4528 
4529   if (slow_region == NULL) {
4530     slow_region = new(C,1) RegionNode(1);
4531     record_for_igvn(slow_region);
4532   }
4533 
4534   Node* original_dest      = dest;
4535   AllocateArrayNode* alloc = NULL;  // used for zeroing, if needed
4536   bool  must_clear_dest    = false;
4537 
4538   // See if this is the initialization of a newly-allocated array.
4539   // If so, we will take responsibility here for initializing it to zero.
4540   // (Note:  Because tightly_coupled_allocation performs checks on the
4541   // out-edges of the dest, we need to avoid making derived pointers
4542   // from it until we have checked its uses.)
4543   if (ReduceBulkZeroing
4544       && !ZeroTLAB              // pointless if already zeroed
4545       && basic_elem_type != T_CONFLICT // avoid corner case
4546       && !_gvn.eqv_uncast(src, dest)
4547       && ((alloc = tightly_coupled_allocation(dest, slow_region))
4548           != NULL)
4549       && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
4550       && alloc->maybe_set_complete(&_gvn)) {
4551     // "You break it, you buy it."
4552     InitializeNode* init = alloc->initialization();
4553     assert(init->is_complete(), "we just did this");
4554     assert(dest->is_CheckCastPP(), "sanity");
4555     assert(dest->as_CheckCastPP()->type() != TypeInstPtr::NOTNULL, "type should be more precise than Object");
4556     assert(dest->in(0)->in(0) == init, "dest pinned");
4557 
4558     // Cast to Object for arraycopy.
4559     // We can't use the original CheckCastPP since it should be moved
4560     // after the arraycopy to prevent stores flowing above it.
4561     Node* new_obj = new(C, 2) CheckCastPPNode(dest->in(0), dest->in(1),
4562                                               TypeInstPtr::NOTNULL);
4563     dest = _gvn.transform(new_obj);
4564     // Substitute in the locally valid dest_oop.
4565     replace_in_map(original_dest, dest);
4566     adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
4567     // From this point on, every exit path is responsible for
4568     // initializing any non-copied parts of the object to zero.
4569     must_clear_dest = true;
4570   } else {
4571     // No zeroing elimination here.
4572     alloc             = NULL;
4573     //original_dest   = dest;
4574     //must_clear_dest = false;
4575   }
4576 
4577   // Results are placed here:
4578   enum { fast_path        = 1,  // normal void-returning assembly stub
4579          checked_path     = 2,  // special assembly stub with cleanup
4580          slow_call_path   = 3,  // something went wrong; call the VM
4581          zero_path        = 4,  // bypass when length of copy is zero
4582          bcopy_path       = 5,  // copy primitive array by 64-bit blocks
4583          PATH_LIMIT       = 6
4584   };
4585   RegionNode* result_region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
4586   PhiNode*    result_i_o    = new(C, PATH_LIMIT) PhiNode(result_region, Type::ABIO);
4587   PhiNode*    result_memory = new(C, PATH_LIMIT) PhiNode(result_region, Type::MEMORY, adr_type);
4588   record_for_igvn(result_region);
4589   _gvn.set_type_bottom(result_i_o);
4590   _gvn.set_type_bottom(result_memory);
4591   assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
4592 
4593   // The slow_control path:
4594   Node* slow_control;
4595   Node* slow_i_o = i_o();
4596   Node* slow_mem = memory(adr_type);
4597   debug_only(slow_control = (Node*) badAddress);
4598 
4599   // Checked control path:
4600   Node* checked_control = top();
4601   Node* checked_mem     = NULL;
4602   Node* checked_i_o     = NULL;
4603   Node* checked_value   = NULL;
4604 
4605   if (basic_elem_type == T_CONFLICT) {
4606     assert(!must_clear_dest, "");
4607     Node* cv = generate_generic_arraycopy(adr_type,
4608                                           src, src_offset, dest, dest_offset,
4609                                           copy_length);
4610     if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
4611     checked_control = control();
4612     checked_i_o     = i_o();
4613     checked_mem     = memory(adr_type);
4614     checked_value   = cv;
4615     set_control(top());         // no fast path
4616   }
4617 
4618   Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
4619   if (not_pos != NULL) {
4620     PreserveJVMState pjvms(this);
4621     set_control(not_pos);
4622 
4623     // (6) length must not be negative.
4624     if (!length_never_negative) {
4625       generate_negative_guard(copy_length, slow_region);
4626     }
4627 
4628     // copy_length is 0.
4629     if (!stopped() && must_clear_dest) {
4630       Node* dest_length = alloc->in(AllocateNode::ALength);
4631       if (_gvn.eqv_uncast(copy_length, dest_length)
4632           || _gvn.find_int_con(dest_length, 1) <= 0) {
4633         // There is no zeroing to do. No need for a secondary raw memory barrier.
4634       } else {
4635         // Clear the whole thing since there are no source elements to copy.
4636         generate_clear_array(adr_type, dest, basic_elem_type,
4637                              intcon(0), NULL,
4638                              alloc->in(AllocateNode::AllocSize));
4639         // Use a secondary InitializeNode as raw memory barrier.
4640         // Currently it is needed only on this path since other
4641         // paths have stub or runtime calls as raw memory barriers.
4642         InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
4643                                                        Compile::AliasIdxRaw,
4644                                                        top())->as_Initialize();
4645         init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
4646       }
4647     }
4648 
4649     // Present the results of the fast call.
4650     result_region->init_req(zero_path, control());
4651     result_i_o   ->init_req(zero_path, i_o());
4652     result_memory->init_req(zero_path, memory(adr_type));
4653   }
4654 
4655   if (!stopped() && must_clear_dest) {
4656     // We have to initialize the *uncopied* part of the array to zero.
4657     // The copy destination is the slice dest[off..off+len].  The other slices
4658     // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
4659     Node* dest_size   = alloc->in(AllocateNode::AllocSize);
4660     Node* dest_length = alloc->in(AllocateNode::ALength);
4661     Node* dest_tail   = _gvn.transform( new(C,3) AddINode(dest_offset,
4662                                                           copy_length) );
4663 
4664     // If there is a head section that needs zeroing, do it now.
4665     if (find_int_con(dest_offset, -1) != 0) {
4666       generate_clear_array(adr_type, dest, basic_elem_type,
4667                            intcon(0), dest_offset,
4668                            NULL);
4669     }
4670 
4671     // Next, perform a dynamic check on the tail length.
4672     // It is often zero, and we can win big if we prove this.
4673     // There are two wins:  Avoid generating the ClearArray
4674     // with its attendant messy index arithmetic, and upgrade
4675     // the copy to a more hardware-friendly word size of 64 bits.
4676     Node* tail_ctl = NULL;
4677     if (!stopped() && !_gvn.eqv_uncast(dest_tail, dest_length)) {
4678       Node* cmp_lt   = _gvn.transform( new(C,3) CmpINode(dest_tail, dest_length) );
4679       Node* bol_lt   = _gvn.transform( new(C,2) BoolNode(cmp_lt, BoolTest::lt) );
4680       tail_ctl = generate_slow_guard(bol_lt, NULL);
4681       assert(tail_ctl != NULL || !stopped(), "must be an outcome");
4682     }
4683 
4684     // At this point, let's assume there is no tail.
4685     if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
4686       // There is no tail.  Try an upgrade to a 64-bit copy.
4687       bool didit = false;
4688       { PreserveJVMState pjvms(this);
4689         didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
4690                                          src, src_offset, dest, dest_offset,
4691                                          dest_size);
4692         if (didit) {
4693           // Present the results of the block-copying fast call.
4694           result_region->init_req(bcopy_path, control());
4695           result_i_o   ->init_req(bcopy_path, i_o());
4696           result_memory->init_req(bcopy_path, memory(adr_type));
4697         }
4698       }
4699       if (didit)
4700         set_control(top());     // no regular fast path
4701     }
4702 
4703     // Clear the tail, if any.
4704     if (tail_ctl != NULL) {
4705       Node* notail_ctl = stopped() ? NULL : control();
4706       set_control(tail_ctl);
4707       if (notail_ctl == NULL) {
4708         generate_clear_array(adr_type, dest, basic_elem_type,
4709                              dest_tail, NULL,
4710                              dest_size);
4711       } else {
4712         // Make a local merge.
4713         Node* done_ctl = new(C,3) RegionNode(3);
4714         Node* done_mem = new(C,3) PhiNode(done_ctl, Type::MEMORY, adr_type);
4715         done_ctl->init_req(1, notail_ctl);
4716         done_mem->init_req(1, memory(adr_type));
4717         generate_clear_array(adr_type, dest, basic_elem_type,
4718                              dest_tail, NULL,
4719                              dest_size);
4720         done_ctl->init_req(2, control());
4721         done_mem->init_req(2, memory(adr_type));
4722         set_control( _gvn.transform(done_ctl) );
4723         set_memory(  _gvn.transform(done_mem), adr_type );
4724       }
4725     }
4726   }
4727 
4728   BasicType copy_type = basic_elem_type;
4729   assert(basic_elem_type != T_ARRAY, "caller must fix this");
4730   if (!stopped() && copy_type == T_OBJECT) {
4731     // If src and dest have compatible element types, we can copy bits.
4732     // Types S[] and D[] are compatible if D is a supertype of S.
4733     //
4734     // If they are not, we will use checked_oop_disjoint_arraycopy,
4735     // which performs a fast optimistic per-oop check, and backs off
4736     // further to JVM_ArrayCopy on the first per-oop check that fails.
4737     // (Actually, we don't move raw bits only; the GC requires card marks.)
4738 
4739     // Get the klassOop for both src and dest
4740     Node* src_klass  = load_object_klass(src);
4741     Node* dest_klass = load_object_klass(dest);
4742 
4743     // Generate the subtype check.
4744     // This might fold up statically, or then again it might not.
4745     //
4746     // Non-static example:  Copying List<String>.elements to a new String[].
4747     // The backing store for a List<String> is always an Object[],
4748     // but its elements are always type String, if the generic types
4749     // are correct at the source level.
4750     //
4751     // Test S[] against D[], not S against D, because (probably)
4752     // the secondary supertype cache is less busy for S[] than S.
4753     // This usually only matters when D is an interface.
4754     Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
4755     // Plug failing path into checked_oop_disjoint_arraycopy
4756     if (not_subtype_ctrl != top()) {
4757       PreserveJVMState pjvms(this);
4758       set_control(not_subtype_ctrl);
4759       // (At this point we can assume disjoint_bases, since types differ.)
4760       int ek_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc);
4761       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
4762       Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
4763       Node* dest_elem_klass = _gvn.transform(n1);
4764       Node* cv = generate_checkcast_arraycopy(adr_type,
4765                                               dest_elem_klass,
4766                                               src, src_offset, dest, dest_offset,
4767                                               copy_length);
4768       if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
4769       checked_control = control();
4770       checked_i_o     = i_o();
4771       checked_mem     = memory(adr_type);
4772       checked_value   = cv;
4773     }
4774     // At this point we know we do not need type checks on oop stores.
4775 
4776     // Let's see if we need card marks:
4777     if (alloc != NULL && use_ReduceInitialCardMarks()) {
4778       // If we do not need card marks, copy using the jint or jlong stub.
4779       copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
4780       assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
4781              "sizes agree");
4782     }
4783   }
4784 
4785   if (!stopped()) {
4786     // Generate the fast path, if possible.
4787     PreserveJVMState pjvms(this);
4788     generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
4789                                  src, src_offset, dest, dest_offset,
4790                                  ConvI2X(copy_length));
4791 
4792     // Present the results of the fast call.
4793     result_region->init_req(fast_path, control());
4794     result_i_o   ->init_req(fast_path, i_o());
4795     result_memory->init_req(fast_path, memory(adr_type));
4796   }
4797 
4798   // Here are all the slow paths up to this point, in one bundle:
4799   slow_control = top();
4800   if (slow_region != NULL)
4801     slow_control = _gvn.transform(slow_region);
4802   debug_only(slow_region = (RegionNode*)badAddress);
4803 
4804   set_control(checked_control);
4805   if (!stopped()) {
4806     // Clean up after the checked call.
4807     // The returned value is either 0 or -1^K,
4808     // where K = number of partially transferred array elements.
4809     Node* cmp = _gvn.transform( new(C, 3) CmpINode(checked_value, intcon(0)) );
4810     Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) );
4811     IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
4812 
4813     // If it is 0, we are done, so transfer to the end.
4814     Node* checks_done = _gvn.transform( new(C, 1) IfTrueNode(iff) );
4815     result_region->init_req(checked_path, checks_done);
4816     result_i_o   ->init_req(checked_path, checked_i_o);
4817     result_memory->init_req(checked_path, checked_mem);
4818 
4819     // If it is not zero, merge into the slow call.
4820     set_control( _gvn.transform( new(C, 1) IfFalseNode(iff) ));
4821     RegionNode* slow_reg2 = new(C, 3) RegionNode(3);
4822     PhiNode*    slow_i_o2 = new(C, 3) PhiNode(slow_reg2, Type::ABIO);
4823     PhiNode*    slow_mem2 = new(C, 3) PhiNode(slow_reg2, Type::MEMORY, adr_type);
4824     record_for_igvn(slow_reg2);
4825     slow_reg2  ->init_req(1, slow_control);
4826     slow_i_o2  ->init_req(1, slow_i_o);
4827     slow_mem2  ->init_req(1, slow_mem);
4828     slow_reg2  ->init_req(2, control());
4829     slow_i_o2  ->init_req(2, checked_i_o);
4830     slow_mem2  ->init_req(2, checked_mem);
4831 
4832     slow_control = _gvn.transform(slow_reg2);
4833     slow_i_o     = _gvn.transform(slow_i_o2);
4834     slow_mem     = _gvn.transform(slow_mem2);
4835 
4836     if (alloc != NULL) {
4837       // We'll restart from the very beginning, after zeroing the whole thing.
4838       // This can cause double writes, but that's OK since dest is brand new.
4839       // So we ignore the low 31 bits of the value returned from the stub.
4840     } else {
4841       // We must continue the copy exactly where it failed, or else
4842       // another thread might see the wrong number of writes to dest.
4843       Node* checked_offset = _gvn.transform( new(C, 3) XorINode(checked_value, intcon(-1)) );
4844       Node* slow_offset    = new(C, 3) PhiNode(slow_reg2, TypeInt::INT);
4845       slow_offset->init_req(1, intcon(0));
4846       slow_offset->init_req(2, checked_offset);
4847       slow_offset  = _gvn.transform(slow_offset);
4848 
4849       // Adjust the arguments by the conditionally incoming offset.
4850       Node* src_off_plus  = _gvn.transform( new(C, 3) AddINode(src_offset,  slow_offset) );
4851       Node* dest_off_plus = _gvn.transform( new(C, 3) AddINode(dest_offset, slow_offset) );
4852       Node* length_minus  = _gvn.transform( new(C, 3) SubINode(copy_length, slow_offset) );
4853 
4854       // Tweak the node variables to adjust the code produced below:
4855       src_offset  = src_off_plus;
4856       dest_offset = dest_off_plus;
4857       copy_length = length_minus;
4858     }
4859   }
4860 
4861   set_control(slow_control);
4862   if (!stopped()) {
4863     // Generate the slow path, if needed.
4864     PreserveJVMState pjvms(this);   // replace_in_map may trash the map
4865 
4866     set_memory(slow_mem, adr_type);
4867     set_i_o(slow_i_o);
4868 
4869     if (must_clear_dest) {
4870       generate_clear_array(adr_type, dest, basic_elem_type,
4871                            intcon(0), NULL,
4872                            alloc->in(AllocateNode::AllocSize));
4873     }
4874 
4875     generate_slow_arraycopy(adr_type,
4876                             src, src_offset, dest, dest_offset,
4877                             copy_length);
4878 
4879     result_region->init_req(slow_call_path, control());
4880     result_i_o   ->init_req(slow_call_path, i_o());
4881     result_memory->init_req(slow_call_path, memory(adr_type));
4882   }
4883 
4884   // Remove unused edges.
4885   for (uint i = 1; i < result_region->req(); i++) {
4886     if (result_region->in(i) == NULL)
4887       result_region->init_req(i, top());
4888   }
4889 
4890   // Finished; return the combined state.
4891   set_control( _gvn.transform(result_region) );
4892   set_i_o(     _gvn.transform(result_i_o)    );
4893   set_memory(  _gvn.transform(result_memory), adr_type );
4894 
4895   if (dest != original_dest) {
4896     // Pin the "finished" array node after the arraycopy/zeroing operations.
4897     _gvn.hash_delete(original_dest);
4898     original_dest->set_req(0, control());
4899     // Replace raw memory edge with new CheckCastPP to have a live oop
4900     // at safepoints instead of raw value.
4901     assert(dest->is_CheckCastPP() && dest->in(1) == original_dest->in(1), "sanity");
4902     original_dest->set_req(1, dest);       // cast to the original type
4903     _gvn.hash_find_insert(original_dest);  // put back into GVN table
4904     // Restore in the locally valid dest_oop.
4905     replace_in_map(dest, original_dest);
4906   }
4907   // The memory edges above are precise in order to model effects around
4908   // array copies accurately to allow value numbering of field loads around
4909   // arraycopy.  Such field loads, both before and after, are common in Java
4910   // collections and similar classes involving header/array data structures.
4911   //
4912   // But with low number of register or when some registers are used or killed
4913   // by arraycopy calls it causes registers spilling on stack. See 6544710.
4914   // The next memory barrier is added to avoid it. If the arraycopy can be
4915   // optimized away (which it can, sometimes) then we can manually remove
4916   // the membar also.
4917   if (InsertMemBarAfterArraycopy)
4918     insert_mem_bar(Op_MemBarCPUOrder);
4919 }
4920 
4921 
4922 // Helper function which determines if an arraycopy immediately follows
4923 // an allocation, with no intervening tests or other escapes for the object.
4924 AllocateArrayNode*
4925 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
4926                                            RegionNode* slow_region) {
4927   if (stopped())             return NULL;  // no fast path
4928   if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
4929 
4930   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
4931   if (alloc == NULL)  return NULL;
4932 
4933   Node* rawmem = memory(Compile::AliasIdxRaw);
4934   // Is the allocation's memory state untouched?
4935   if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
4936     // Bail out if there have been raw-memory effects since the allocation.
4937     // (Example:  There might have been a call or safepoint.)
4938     return NULL;
4939   }
4940   rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
4941   if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
4942     return NULL;
4943   }
4944 
4945   // There must be no unexpected observers of this allocation.
4946   for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
4947     Node* obs = ptr->fast_out(i);
4948     if (obs != this->map()) {
4949       return NULL;
4950     }
4951   }
4952 
4953   // This arraycopy must unconditionally follow the allocation of the ptr.
4954   Node* alloc_ctl = ptr->in(0);
4955   assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
4956 
4957   Node* ctl = control();
4958   while (ctl != alloc_ctl) {
4959     // There may be guards which feed into the slow_region.
4960     // Any other control flow means that we might not get a chance
4961     // to finish initializing the allocated object.
4962     if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
4963       IfNode* iff = ctl->in(0)->as_If();
4964       Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
4965       assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
4966       if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
4967         ctl = iff->in(0);       // This test feeds the known slow_region.
4968         continue;
4969       }
4970       // One more try:  Various low-level checks bottom out in
4971       // uncommon traps.  If the debug-info of the trap omits
4972       // any reference to the allocation, as we've already
4973       // observed, then there can be no objection to the trap.
4974       bool found_trap = false;
4975       for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
4976         Node* obs = not_ctl->fast_out(j);
4977         if (obs->in(0) == not_ctl && obs->is_Call() &&
4978             (obs->as_Call()->entry_point() ==
4979              SharedRuntime::uncommon_trap_blob()->instructions_begin())) {
4980           found_trap = true; break;
4981         }
4982       }
4983       if (found_trap) {
4984         ctl = iff->in(0);       // This test feeds a harmless uncommon trap.
4985         continue;
4986       }
4987     }
4988     return NULL;
4989   }
4990 
4991   // If we get this far, we have an allocation which immediately
4992   // precedes the arraycopy, and we can take over zeroing the new object.
4993   // The arraycopy will finish the initialization, and provide
4994   // a new control state to which we will anchor the destination pointer.
4995 
4996   return alloc;
4997 }
4998 
4999 // Helper for initialization of arrays, creating a ClearArray.
5000 // It writes zero bits in [start..end), within the body of an array object.
5001 // The memory effects are all chained onto the 'adr_type' alias category.
5002 //
5003 // Since the object is otherwise uninitialized, we are free
5004 // to put a little "slop" around the edges of the cleared area,
5005 // as long as it does not go back into the array's header,
5006 // or beyond the array end within the heap.
5007 //
5008 // The lower edge can be rounded down to the nearest jint and the
5009 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
5010 //
5011 // Arguments:
5012 //   adr_type           memory slice where writes are generated
5013 //   dest               oop of the destination array
5014 //   basic_elem_type    element type of the destination
5015 //   slice_idx          array index of first element to store
5016 //   slice_len          number of elements to store (or NULL)
5017 //   dest_size          total size in bytes of the array object
5018 //
5019 // Exactly one of slice_len or dest_size must be non-NULL.
5020 // If dest_size is non-NULL, zeroing extends to the end of the object.
5021 // If slice_len is non-NULL, the slice_idx value must be a constant.
5022 void
5023 LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
5024                                      Node* dest,
5025                                      BasicType basic_elem_type,
5026                                      Node* slice_idx,
5027                                      Node* slice_len,
5028                                      Node* dest_size) {
5029   // one or the other but not both of slice_len and dest_size:
5030   assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
5031   if (slice_len == NULL)  slice_len = top();
5032   if (dest_size == NULL)  dest_size = top();
5033 
5034   // operate on this memory slice:
5035   Node* mem = memory(adr_type); // memory slice to operate on
5036 
5037   // scaling and rounding of indexes:
5038   int scale = exact_log2(type2aelembytes(basic_elem_type));
5039   int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5040   int clear_low = (-1 << scale) & (BytesPerInt  - 1);
5041   int bump_bit  = (-1 << scale) & BytesPerInt;
5042 
5043   // determine constant starts and ends
5044   const intptr_t BIG_NEG = -128;
5045   assert(BIG_NEG + 2*abase < 0, "neg enough");
5046   intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG);
5047   intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG);
5048   if (slice_len_con == 0) {
5049     return;                     // nothing to do here
5050   }
5051   intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
5052   intptr_t end_con   = find_intptr_t_con(dest_size, -1);
5053   if (slice_idx_con >= 0 && slice_len_con >= 0) {
5054     assert(end_con < 0, "not two cons");
5055     end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
5056                        BytesPerLong);
5057   }
5058 
5059   if (start_con >= 0 && end_con >= 0) {
5060     // Constant start and end.  Simple.
5061     mem = ClearArrayNode::clear_memory(control(), mem, dest,
5062                                        start_con, end_con, &_gvn);
5063   } else if (start_con >= 0 && dest_size != top()) {
5064     // Constant start, pre-rounded end after the tail of the array.
5065     Node* end = dest_size;
5066     mem = ClearArrayNode::clear_memory(control(), mem, dest,
5067                                        start_con, end, &_gvn);
5068   } else if (start_con >= 0 && slice_len != top()) {
5069     // Constant start, non-constant end.  End needs rounding up.
5070     // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
5071     intptr_t end_base  = abase + (slice_idx_con << scale);
5072     int      end_round = (-1 << scale) & (BytesPerLong  - 1);
5073     Node*    end       = ConvI2X(slice_len);
5074     if (scale != 0)
5075       end = _gvn.transform( new(C,3) LShiftXNode(end, intcon(scale) ));
5076     end_base += end_round;
5077     end = _gvn.transform( new(C,3) AddXNode(end, MakeConX(end_base)) );
5078     end = _gvn.transform( new(C,3) AndXNode(end, MakeConX(~end_round)) );
5079     mem = ClearArrayNode::clear_memory(control(), mem, dest,
5080                                        start_con, end, &_gvn);
5081   } else if (start_con < 0 && dest_size != top()) {
5082     // Non-constant start, pre-rounded end after the tail of the array.
5083     // This is almost certainly a "round-to-end" operation.
5084     Node* start = slice_idx;
5085     start = ConvI2X(start);
5086     if (scale != 0)
5087       start = _gvn.transform( new(C,3) LShiftXNode( start, intcon(scale) ));
5088     start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(abase)) );
5089     if ((bump_bit | clear_low) != 0) {
5090       int to_clear = (bump_bit | clear_low);
5091       // Align up mod 8, then store a jint zero unconditionally
5092       // just before the mod-8 boundary.
5093       if (((abase + bump_bit) & ~to_clear) - bump_bit
5094           < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
5095         bump_bit = 0;
5096         assert((abase & to_clear) == 0, "array base must be long-aligned");
5097       } else {
5098         // Bump 'start' up to (or past) the next jint boundary:
5099         start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) );
5100         assert((abase & clear_low) == 0, "array base must be int-aligned");
5101       }
5102       // Round bumped 'start' down to jlong boundary in body of array.
5103       start = _gvn.transform( new(C,3) AndXNode(start, MakeConX(~to_clear)) );
5104       if (bump_bit != 0) {
5105         // Store a zero to the immediately preceding jint:
5106         Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-bump_bit)) );
5107         Node* p1 = basic_plus_adr(dest, x1);
5108         mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT);
5109         mem = _gvn.transform(mem);
5110       }
5111     }
5112     Node* end = dest_size; // pre-rounded
5113     mem = ClearArrayNode::clear_memory(control(), mem, dest,
5114                                        start, end, &_gvn);
5115   } else {
5116     // Non-constant start, unrounded non-constant end.
5117     // (Nobody zeroes a random midsection of an array using this routine.)
5118     ShouldNotReachHere();       // fix caller
5119   }
5120 
5121   // Done.
5122   set_memory(mem, adr_type);
5123 }
5124 
5125 
5126 bool
5127 LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
5128                                          BasicType basic_elem_type,
5129                                          AllocateNode* alloc,
5130                                          Node* src,  Node* src_offset,
5131                                          Node* dest, Node* dest_offset,
5132                                          Node* dest_size) {
5133   // See if there is an advantage from block transfer.
5134   int scale = exact_log2(type2aelembytes(basic_elem_type));
5135   if (scale >= LogBytesPerLong)
5136     return false;               // it is already a block transfer
5137 
5138   // Look at the alignment of the starting offsets.
5139   int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5140   const intptr_t BIG_NEG = -128;
5141   assert(BIG_NEG + 2*abase < 0, "neg enough");
5142 
5143   intptr_t src_off  = abase + ((intptr_t) find_int_con(src_offset, -1)  << scale);
5144   intptr_t dest_off = abase + ((intptr_t) find_int_con(dest_offset, -1) << scale);
5145   if (src_off < 0 || dest_off < 0)
5146     // At present, we can only understand constants.
5147     return false;
5148 
5149   if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
5150     // Non-aligned; too bad.
5151     // One more chance:  Pick off an initial 32-bit word.
5152     // This is a common case, since abase can be odd mod 8.
5153     if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
5154         ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
5155       Node* sptr = basic_plus_adr(src,  src_off);
5156       Node* dptr = basic_plus_adr(dest, dest_off);
5157       Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type);
5158       store_to_memory(control(), dptr, sval, T_INT, adr_type);
5159       src_off += BytesPerInt;
5160       dest_off += BytesPerInt;
5161     } else {
5162       return false;
5163     }
5164   }
5165   assert(src_off % BytesPerLong == 0, "");
5166   assert(dest_off % BytesPerLong == 0, "");
5167 
5168   // Do this copy by giant steps.
5169   Node* sptr  = basic_plus_adr(src,  src_off);
5170   Node* dptr  = basic_plus_adr(dest, dest_off);
5171   Node* countx = dest_size;
5172   countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(dest_off)) );
5173   countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong)) );
5174 
5175   bool disjoint_bases = true;   // since alloc != NULL
5176   generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
5177                                sptr, NULL, dptr, NULL, countx);
5178 
5179   return true;
5180 }
5181 
5182 
5183 // Helper function; generates code for the slow case.
5184 // We make a call to a runtime method which emulates the native method,
5185 // but without the native wrapper overhead.
5186 void
5187 LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
5188                                         Node* src,  Node* src_offset,
5189                                         Node* dest, Node* dest_offset,
5190                                         Node* copy_length) {
5191   Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
5192                                  OptoRuntime::slow_arraycopy_Type(),
5193                                  OptoRuntime::slow_arraycopy_Java(),
5194                                  "slow_arraycopy", adr_type,
5195                                  src, src_offset, dest, dest_offset,
5196                                  copy_length);
5197 
5198   // Handle exceptions thrown by this fellow:
5199   make_slow_call_ex(call, env()->Throwable_klass(), false);
5200 }
5201 
5202 // Helper function; generates code for cases requiring runtime checks.
5203 Node*
5204 LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
5205                                              Node* dest_elem_klass,
5206                                              Node* src,  Node* src_offset,
5207                                              Node* dest, Node* dest_offset,
5208                                              Node* copy_length) {
5209   if (stopped())  return NULL;
5210 
5211   address copyfunc_addr = StubRoutines::checkcast_arraycopy();
5212   if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5213     return NULL;
5214   }
5215 
5216   // Pick out the parameters required to perform a store-check
5217   // for the target array.  This is an optimistic check.  It will
5218   // look in each non-null element's class, at the desired klass's
5219   // super_check_offset, for the desired klass.
5220   int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc);
5221   Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
5222   Node* n3 = new(C, 3) LoadINode(NULL, immutable_memory(), p3, TypeRawPtr::BOTTOM);
5223   Node* check_offset = _gvn.transform(n3);
5224   Node* check_value  = dest_elem_klass;
5225 
5226   Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
5227   Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
5228 
5229   // (We know the arrays are never conjoint, because their types differ.)
5230   Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5231                                  OptoRuntime::checkcast_arraycopy_Type(),
5232                                  copyfunc_addr, "checkcast_arraycopy", adr_type,
5233                                  // five arguments, of which two are
5234                                  // intptr_t (jlong in LP64)
5235                                  src_start, dest_start,
5236                                  copy_length XTOP,
5237                                  check_offset XTOP,
5238                                  check_value);
5239 
5240   return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
5241 }
5242 
5243 
5244 // Helper function; generates code for cases requiring runtime checks.
5245 Node*
5246 LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
5247                                            Node* src,  Node* src_offset,
5248                                            Node* dest, Node* dest_offset,
5249                                            Node* copy_length) {
5250   if (stopped())  return NULL;
5251 
5252   address copyfunc_addr = StubRoutines::generic_arraycopy();
5253   if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5254     return NULL;
5255   }
5256 
5257   Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5258                     OptoRuntime::generic_arraycopy_Type(),
5259                     copyfunc_addr, "generic_arraycopy", adr_type,
5260                     src, src_offset, dest, dest_offset, copy_length);
5261 
5262   return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
5263 }
5264 
5265 // Helper function; generates the fast out-of-line call to an arraycopy stub.
5266 void
5267 LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
5268                                              BasicType basic_elem_type,
5269                                              bool disjoint_bases,
5270                                              Node* src,  Node* src_offset,
5271                                              Node* dest, Node* dest_offset,
5272                                              Node* copy_length) {
5273   if (stopped())  return;               // nothing to do
5274 
5275   Node* src_start  = src;
5276   Node* dest_start = dest;
5277   if (src_offset != NULL || dest_offset != NULL) {
5278     assert(src_offset != NULL && dest_offset != NULL, "");
5279     src_start  = array_element_address(src,  src_offset,  basic_elem_type);
5280     dest_start = array_element_address(dest, dest_offset, basic_elem_type);
5281   }
5282 
5283   // Figure out which arraycopy runtime method to call.
5284   const char* copyfunc_name = "arraycopy";
5285   address     copyfunc_addr =
5286       basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
5287                           disjoint_bases, copyfunc_name);
5288 
5289   // Call it.  Note that the count_ix value is not scaled to a byte-size.
5290   make_runtime_call(RC_LEAF|RC_NO_FP,
5291                     OptoRuntime::fast_arraycopy_Type(),
5292                     copyfunc_addr, copyfunc_name, adr_type,
5293                     src_start, dest_start, copy_length XTOP);
5294 }