< prev index next >

src/share/vm/opto/library_call.cpp

Print this page




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"

  31 #include "oops/objArrayKlass.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/callGenerator.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/convertnode.hpp"
  39 #include "opto/countbitsnode.hpp"
  40 #include "opto/intrinsicnode.hpp"
  41 #include "opto/idealKit.hpp"
  42 #include "opto/mathexactnode.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/mulnode.hpp"
  45 #include "opto/narrowptrnode.hpp"
  46 #include "opto/opaquenode.hpp"
  47 #include "opto/parse.hpp"
  48 #include "opto/runtime.hpp"

  49 #include "opto/subnode.hpp"
  50 #include "prims/nativeLookup.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "trace/traceMacros.hpp"
  53 
  54 class LibraryIntrinsic : public InlineCallGenerator {
  55   // Extend the set of intrinsics known to the runtime:
  56  public:
  57  private:
  58   bool             _is_virtual;
  59   bool             _does_virtual_dispatch;
  60   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  61   int8_t           _last_predicate; // Last generated predicate
  62   vmIntrinsics::ID _intrinsic_id;
  63 
  64  public:
  65   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  66     : InlineCallGenerator(m),
  67       _is_virtual(is_virtual),
  68       _does_virtual_dispatch(does_virtual_dispatch),


 955   C->set_has_split_ifs(true); // Has chance for split-if optimization
 956 
 957   return _gvn.transform(result);
 958 }
 959 
 960 //------------------------------inline_string_compareTo------------------------
 961 // public int java.lang.String.compareTo(String anotherString);
 962 bool LibraryCallKit::inline_string_compareTo() {
 963   Node* receiver = null_check(argument(0));
 964   Node* arg      = null_check(argument(1));
 965   if (stopped()) {
 966     return true;
 967   }
 968   set_result(make_string_method_node(Op_StrComp, receiver, arg));
 969   return true;
 970 }
 971 
 972 //------------------------------inline_string_equals------------------------
 973 bool LibraryCallKit::inline_string_equals() {
 974   Node* receiver = null_check_receiver();





 975   // NOTE: Do not null check argument for String.equals() because spec
 976   // allows to specify NULL as argument.
 977   Node* argument = this->argument(1);





 978   if (stopped()) {
 979     return true;
 980   }
 981 
 982   // paths (plus control) merge
 983   RegionNode* region = new RegionNode(5);
 984   Node* phi = new PhiNode(region, TypeInt::BOOL);
 985 
 986   // does source == target string?
 987   Node* cmp = _gvn.transform(new CmpPNode(receiver, argument));
 988   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
 989 
 990   Node* if_eq = generate_slow_guard(bol, NULL);
 991   if (if_eq != NULL) {
 992     // receiver == argument
 993     phi->init_req(2, intcon(1));
 994     region->init_req(2, if_eq);
 995   }
 996 
 997   // get String klass for instanceOf


1006     //instanceOf == true, fallthrough
1007 
1008     if (inst_false != NULL) {
1009       phi->init_req(3, intcon(0));
1010       region->init_req(3, inst_false);
1011     }
1012   }
1013 
1014   if (!stopped()) {
1015     const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1016 
1017     // Properly cast the argument to String
1018     argument = _gvn.transform(new CheckCastPPNode(control(), argument, string_type));
1019     // This path is taken only when argument's type is String:NotNull.
1020     argument = cast_not_null(argument, false);
1021 
1022     Node* no_ctrl = NULL;
1023 
1024     // Get start addr of receiver
1025     Node* receiver_val    = load_String_value(no_ctrl, receiver);





1026     Node* receiver_offset = load_String_offset(no_ctrl, receiver);
1027     Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
1028 
1029     // Get length of receiver
1030     Node* receiver_cnt  = load_String_length(no_ctrl, receiver);
1031 
1032     // Get start addr of argument
1033     Node* argument_val    = load_String_value(no_ctrl, argument);





1034     Node* argument_offset = load_String_offset(no_ctrl, argument);
1035     Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
1036 
1037     // Get length of argument
1038     Node* argument_cnt  = load_String_length(no_ctrl, argument);
1039 
1040     // Check for receiver count != argument count
1041     Node* cmp = _gvn.transform(new CmpINode(receiver_cnt, argument_cnt));
1042     Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1043     Node* if_ne = generate_slow_guard(bol, NULL);
1044     if (if_ne != NULL) {
1045       phi->init_req(4, intcon(0));
1046       region->init_req(4, if_ne);
1047     }
1048 
1049     // Check for count == 0 is done by assembler code for StrEquals.
1050 
1051     if (!stopped()) {
1052       Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
1053       phi->init_req(1, equals);
1054       region->init_req(1, control());
1055     }
1056   }
1057 
1058   // post merge
1059   set_control(_gvn.transform(region));
1060   record_for_igvn(region);
1061 
1062   set_result(_gvn.transform(phi));
1063   return true;
1064 }
1065 
1066 //------------------------------inline_array_equals----------------------------
1067 bool LibraryCallKit::inline_array_equals() {
1068   Node* arg1 = argument(0);
1069   Node* arg2 = argument(1);




1070   set_result(_gvn.transform(new AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
1071   return true;
1072 }
1073 
1074 // Java version of String.indexOf(constant string)
1075 // class StringDecl {
1076 //   StringDecl(char[] ca) {
1077 //     offset = 0;
1078 //     count = ca.length;
1079 //     value = ca;
1080 //   }
1081 //   int offset;
1082 //   int count;
1083 //   char[] value;
1084 // }
1085 //
1086 // static int string_indexOf_J(StringDecl string_object, char[] target_object,
1087 //                             int targetOffset, int cache_i, int md2) {
1088 //   int cache = cache_i;
1089 //   int sourceOffset = string_object.offset;


2135   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2136   default:  fatal_unexpected_iid(id);  break;
2137   }
2138   set_result(_gvn.transform(n));
2139   return true;
2140 }
2141 
2142 //----------------------------inline_unsafe_access----------------------------
2143 
2144 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2145 
2146 // Helper that guards and inserts a pre-barrier.
2147 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2148                                         Node* pre_val, bool need_mem_bar) {
2149   // We could be accessing the referent field of a reference object. If so, when G1
2150   // is enabled, we need to log the value in the referent field in an SATB buffer.
2151   // This routine performs some compile time filters and generates suitable
2152   // runtime filters that guard the pre-barrier code.
2153   // Also add memory barrier for non volatile load from the referent field
2154   // to prevent commoning of loads across safepoint.
2155   if (!UseG1GC && !need_mem_bar)
2156     return;
2157 
2158   // Some compile time checks.
2159 
2160   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2161   const TypeX* otype = offset->find_intptr_t_type();
2162   if (otype != NULL && otype->is_con() &&
2163       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2164     // Constant offset but not the reference_offset so just return
2165     return;
2166   }
2167 
2168   // We only need to generate the runtime guards for instances.
2169   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2170   if (btype != NULL) {
2171     if (btype->isa_aryptr()) {
2172       // Array type so nothing to do
2173       return;
2174     }
2175 


2324         vtype = T_ADDRESS;  // it is really a C void*
2325       assert(vtype == type, "putter must accept the expected value");
2326     }
2327 #endif // ASSERT
2328  }
2329 #endif //PRODUCT
2330 
2331   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2332 
2333   Node* receiver = argument(0);  // type: oop
2334 
2335   // Build address expression.
2336   Node* adr;
2337   Node* heap_base_oop = top();
2338   Node* offset = top();
2339   Node* val;
2340 
2341   if (!is_native_ptr) {
2342     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2343     Node* base = argument(1);  // type: oop





2344     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2345     offset = argument(2);  // type: long
2346     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2347     // to be plain byte offsets, which are also the same as those accepted
2348     // by oopDesc::field_base.
2349     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2350            "fieldOffset must be byte-scaled");
2351     // 32-bit machines ignore the high half!
2352     offset = ConvL2X(offset);
2353     adr = make_unsafe_address(base, offset);
2354     heap_base_oop = base;
2355     val = is_store ? argument(4) : NULL;
2356   } else {
2357     Node* ptr = argument(1);  // type: long
2358     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2359     adr = make_unsafe_address(NULL, ptr);
2360     val = is_store ? argument(3) : NULL;
2361   }
2362 
2363   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();


2475     // the end of this method.  So, pushing the load onto the stack at a later
2476     // point is fine.
2477     set_result(p);
2478   } else {
2479     // place effect of store into memory
2480     switch (type) {
2481     case T_DOUBLE:
2482       val = dstore_rounding(val);
2483       break;
2484     case T_ADDRESS:
2485       // Repackage the long as a pointer.
2486       val = ConvL2X(val);
2487       val = _gvn.transform(new CastX2PNode(val));
2488       break;
2489     }
2490 
2491     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2492     if (type != T_OBJECT ) {
2493       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
2494     } else {

2495       // Possibly an oop being stored to Java heap or native memory
2496       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2497         // oop to Java heap.
2498         (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2499       } else {
2500         // We can't tell at compile time if we are storing in the Java heap or outside
2501         // of it. So we need to emit code to conditionally do the proper type of
2502         // store.
2503 
2504         IdealKit ideal(this);
2505 #define __ ideal.
2506         // QQQ who knows what probability is here??
2507         __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2508           // Sync IdealKit and graphKit.
2509           sync_kit(ideal);
2510           Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2511           // Update IdealKit memory.
2512           __ sync_kit(this);
2513         } __ else_(); {
2514           __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile);


2604     const bool two_slot_type = type2size[type] == 2;
2605     receiver = argument(0);  // type: oop
2606     base     = argument(1);  // type: oop
2607     offset   = argument(2);  // type: long
2608     oldval   = argument(4);  // type: oop, int, or long
2609     newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
2610   } else if (kind == LS_xadd || kind == LS_xchg){
2611     receiver = argument(0);  // type: oop
2612     base     = argument(1);  // type: oop
2613     offset   = argument(2);  // type: long
2614     oldval   = NULL;
2615     newval   = argument(4);  // type: oop, int, or long
2616   }
2617 
2618   // Null check receiver.
2619   receiver = null_check(receiver);
2620   if (stopped()) {
2621     return true;
2622   }
2623 


2624   // Build field offset expression.
2625   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2626   // to be plain byte offsets, which are also the same as those accepted
2627   // by oopDesc::field_base.
2628   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2629   // 32-bit machines ignore the high half of long offsets
2630   offset = ConvL2X(offset);
2631   Node* adr = make_unsafe_address(base, offset);
2632   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2633 
2634   // For CAS, unlike inline_unsafe_access, there seems no point in
2635   // trying to refine types. Just use the coarse types here.
2636   const Type *value_type = Type::get_const_basic_type(type);
2637   Compile::AliasType* alias_type = C->alias_type(adr_type);
2638   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2639 
2640   if (kind == LS_xchg && type == T_OBJECT) {
2641     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2642     if (tjp != NULL) {
2643       value_type = tjp;


2645   }
2646 
2647   int alias_idx = C->get_alias_index(adr_type);
2648 
2649   // Memory-model-wise, a LoadStore acts like a little synchronized
2650   // block, so needs barriers on each side.  These don't translate
2651   // into actual barriers on most machines, but we still need rest of
2652   // compiler to respect ordering.
2653 
2654   insert_mem_bar(Op_MemBarRelease);
2655   insert_mem_bar(Op_MemBarCPUOrder);
2656 
2657   // 4984716: MemBars must be inserted before this
2658   //          memory node in order to avoid a false
2659   //          dependency which will confuse the scheduler.
2660   Node *mem = memory(alias_idx);
2661 
2662   // For now, we handle only those cases that actually exist: ints,
2663   // longs, and Object. Adding others should be straightforward.
2664   Node* load_store;

2665   switch(type) {
2666   case T_INT:
2667     if (kind == LS_xadd) {
2668       load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
2669     } else if (kind == LS_xchg) {
2670       load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
2671     } else if (kind == LS_cmpxchg) {
2672       load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
2673     } else {
2674       ShouldNotReachHere();
2675     }

2676     break;
2677   case T_LONG:
2678     if (kind == LS_xadd) {
2679       load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
2680     } else if (kind == LS_xchg) {
2681       load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
2682     } else if (kind == LS_cmpxchg) {
2683       load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2684     } else {
2685       ShouldNotReachHere();
2686     }

2687     break;
2688   case T_OBJECT:
2689     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2690     // could be delayed during Parse (for example, in adjust_map_after_if()).
2691     // Execute transformation here to avoid barrier generation in such case.
2692     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2693       newval = _gvn.makecon(TypePtr::NULL_PTR);
2694 


2695     // Reference stores need a store barrier.
2696     if (kind == LS_xchg) {
2697       // If pre-barrier must execute before the oop store, old value will require do_load here.
2698       if (!can_move_pre_barrier()) {
2699         pre_barrier(true /* do_load*/,
2700                     control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2701                     NULL /* pre_val*/,
2702                     T_OBJECT);
2703       } // Else move pre_barrier to use load_store value, see below.
2704     } else if (kind == LS_cmpxchg) {
2705       // Same as for newval above:
2706       if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2707         oldval = _gvn.makecon(TypePtr::NULL_PTR);
2708       }
2709       // The only known value which might get overwritten is oldval.
2710       pre_barrier(false /* do_load */,
2711                   control(), NULL, NULL, max_juint, NULL, NULL,
2712                   oldval /* pre_val */,
2713                   T_OBJECT);
2714     } else {
2715       ShouldNotReachHere();
2716     }
2717 
2718 #ifdef _LP64
2719     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2720       Node *newval_enc = _gvn.transform(new EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2721       if (kind == LS_xchg) {
2722         load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr,
2723                                                        newval_enc, adr_type, value_type->make_narrowoop()));
2724       } else {
2725         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2726         Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2727         load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr,
2728                                                                 newval_enc, oldval_enc));
2729       }

2730     } else
2731 #endif
2732     {
2733       if (kind == LS_xchg) {
2734         load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));

2735       } else {
2736         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2737         load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval));





























































2738       }
2739     }
2740     if (kind == LS_cmpxchg) {
2741       // Emit the post barrier only when the actual store happened.
2742       // This makes sense to check only for compareAndSet that can fail to set the value.
2743       // CAS success path is marked more likely since we anticipate this is a performance
2744       // critical path, while CAS failure path can use the penalty for going through unlikely
2745       // path as backoff. Which is still better than doing a store barrier there.
2746       IdealKit ideal(this);
2747       ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
2748         sync_kit(ideal);
2749         post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2750         ideal.sync_kit(this);
2751       } ideal.end_if();
2752       final_sync(ideal);
2753     } else {
2754       post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2755     }
2756     break;
2757   default:
2758     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2759     break;
2760   }
2761 
2762   // SCMemProjNodes represent the memory state of a LoadStore. Their
2763   // main role is to prevent LoadStore nodes from being optimized away
2764   // when their results aren't used.
2765   Node* proj = _gvn.transform(new SCMemProjNode(load_store));
2766   set_memory(proj, alias_idx);
2767 
2768   if (type == T_OBJECT && kind == LS_xchg) {
2769 #ifdef _LP64
2770     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2771       load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
2772     }
2773 #endif
2774     if (can_move_pre_barrier()) {
2775       // Don't need to load pre_val. The old value is returned by load_store.
2776       // The pre_barrier can execute after the xchg as long as no safepoint
2777       // gets inserted between them.
2778       pre_barrier(false /* do_load */,
2779                   control(), NULL, NULL, max_juint, NULL, NULL,
2780                   load_store /* pre_val */,
2781                   T_OBJECT);
2782     }
2783   }
2784 
2785   // Add the trailing membar surrounding the access
2786   insert_mem_bar(Op_MemBarCPUOrder);
2787   insert_mem_bar(Op_MemBarAcquire);
2788 
2789   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2790   set_result(load_store);
2791   return true;
2792 }
2793 
2794 //----------------------------inline_unsafe_ordered_store----------------------
2795 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2796 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2797 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2798 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2799   // This is another variant of inline_unsafe_access, differing in
2800   // that it always issues store-store ("release") barrier and ensures
2801   // store-atomicity (which only matters for "long").
2802 
2803   if (callee()->is_static())  return false;  // caller must have the capability!
2804 
2805 #ifndef PRODUCT
2806   {
2807     ResourceMark rm;
2808     // Check the signatures.
2809     ciSignature* sig = callee()->signature();
2810 #ifdef ASSERT


2814     assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
2815     assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
2816 #endif // ASSERT
2817   }
2818 #endif //PRODUCT
2819 
2820   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2821 
2822   // Get arguments:
2823   Node* receiver = argument(0);  // type: oop
2824   Node* base     = argument(1);  // type: oop
2825   Node* offset   = argument(2);  // type: long
2826   Node* val      = argument(4);  // type: oop, int, or long
2827 
2828   // Null check receiver.
2829   receiver = null_check(receiver);
2830   if (stopped()) {
2831     return true;
2832   }
2833 


2834   // Build field offset expression.
2835   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2836   // 32-bit machines ignore the high half of long offsets
2837   offset = ConvL2X(offset);
2838   Node* adr = make_unsafe_address(base, offset);
2839   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2840   const Type *value_type = Type::get_const_basic_type(type);
2841   Compile::AliasType* alias_type = C->alias_type(adr_type);
2842 
2843   insert_mem_bar(Op_MemBarRelease);
2844   insert_mem_bar(Op_MemBarCPUOrder);
2845   // Ensure that the store is atomic for longs:
2846   const bool require_atomic_access = true;
2847   Node* store;
2848   if (type == T_OBJECT) // reference stores need a store barrier.

2849     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);

2850   else {
2851     store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
2852   }
2853   insert_mem_bar(Op_MemBarCPUOrder);
2854   return true;
2855 }
2856 
2857 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
2858   // Regardless of form, don't allow previous ld/st to move down,
2859   // then issue acquire, release, or volatile mem_bar.
2860   insert_mem_bar(Op_MemBarCPUOrder);
2861   switch(id) {
2862     case vmIntrinsics::_loadFence:
2863       insert_mem_bar(Op_LoadFence);
2864       return true;
2865     case vmIntrinsics::_storeFence:
2866       insert_mem_bar(Op_StoreFence);
2867       return true;
2868     case vmIntrinsics::_fullFence:
2869       insert_mem_bar(Op_MemBarVolatile);


3151   Node* bits = intcon(modifier_bits);
3152   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3153   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3154   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3155   return generate_fair_guard(bol, region);
3156 }
3157 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3158   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3159 }
3160 
3161 //-------------------------inline_native_Class_query-------------------
3162 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3163   const Type* return_type = TypeInt::BOOL;
3164   Node* prim_return_value = top();  // what happens if it's a primitive class?
3165   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3166   bool expect_prim = false;     // most of these guys expect to work on refs
3167 
3168   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3169 
3170   Node* mirror = argument(0);





3171   Node* obj    = top();
3172 
3173   switch (id) {
3174   case vmIntrinsics::_isInstance:
3175     // nothing is an instance of a primitive type
3176     prim_return_value = intcon(0);
3177     obj = argument(1);



3178     break;
3179   case vmIntrinsics::_getModifiers:
3180     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3181     assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3182     return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3183     break;
3184   case vmIntrinsics::_isInterface:
3185     prim_return_value = intcon(0);
3186     break;
3187   case vmIntrinsics::_isArray:
3188     prim_return_value = intcon(0);
3189     expect_prim = true;  // cf. ObjectStreamClass.getClassSignature
3190     break;
3191   case vmIntrinsics::_isPrimitive:
3192     prim_return_value = intcon(1);
3193     expect_prim = true;  // obviously
3194     break;
3195   case vmIntrinsics::_getSuperclass:
3196     prim_return_value = null();
3197     return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);


3405     PreserveJVMState pjvms(this);
3406     set_control(_gvn.transform(region));
3407     uncommon_trap(Deoptimization::Reason_intrinsic,
3408                   Deoptimization::Action_maybe_recompile);
3409   }
3410   if (!stopped()) {
3411     set_result(res);
3412   }
3413   return true;
3414 }
3415 
3416 
3417 //--------------------------inline_native_subtype_check------------------------
3418 // This intrinsic takes the JNI calls out of the heart of
3419 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3420 bool LibraryCallKit::inline_native_subtype_check() {
3421   // Pull both arguments off the stack.
3422   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3423   args[0] = argument(0);
3424   args[1] = argument(1);

3425   Node* klasses[2];             // corresponding Klasses: superk, subk
3426   klasses[0] = klasses[1] = top();
3427 
3428   enum {
3429     // A full decision tree on {superc is prim, subc is prim}:
3430     _prim_0_path = 1,           // {P,N} => false
3431                                 // {P,P} & superc!=subc => false
3432     _prim_same_path,            // {P,P} & superc==subc => true
3433     _prim_1_path,               // {N,P} => false
3434     _ref_subtype_path,          // {N,N} & subtype check wins => true
3435     _both_ref_path,             // {N,N} & subtype check loses => false
3436     PATH_LIMIT
3437   };
3438 
3439   RegionNode* region = new RegionNode(PATH_LIMIT);
3440   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3441   record_for_igvn(region);
3442 
3443   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3444   const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;


3467     region->init_req(prim_path, null_ctl);
3468     if (stopped())  break;
3469     klasses[which_arg] = kls;
3470   }
3471 
3472   if (!stopped()) {
3473     // now we have two reference types, in klasses[0..1]
3474     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3475     Node* superk = klasses[0];  // the receiver
3476     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3477     // now we have a successful reference subtype check
3478     region->set_req(_ref_subtype_path, control());
3479   }
3480 
3481   // If both operands are primitive (both klasses null), then
3482   // we must return true when they are identical primitives.
3483   // It is convenient to test this after the first null klass check.
3484   set_control(region->in(_prim_0_path)); // go back to first null check
3485   if (!stopped()) {
3486     // Since superc is primitive, make a guard for the superc==subc case.

3487     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3488     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3489     generate_guard(bol_eq, region, PROB_FAIR);
3490     if (region->req() == PATH_LIMIT+1) {
3491       // A guard was added.  If the added guard is taken, superc==subc.
3492       region->swap_edges(PATH_LIMIT, _prim_same_path);
3493       region->del_req(PATH_LIMIT);
3494     }
3495     region->set_req(_prim_0_path, control()); // Not equal after all.
3496   }
3497 
3498   // these are the only paths that produce 'true':
3499   phi->set_req(_prim_same_path,   intcon(1));
3500   phi->set_req(_ref_subtype_path, intcon(1));
3501 
3502   // pull together the cases:
3503   assert(region->req() == PATH_LIMIT, "sane region");
3504   for (uint i = 1; i < region->req(); i++) {
3505     Node* ctl = region->in(i);
3506     if (ctl == NULL || ctl == top()) {


3711 
3712     // Bail out if length is negative.
3713     // Without this the new_array would throw
3714     // NegativeArraySizeException but IllegalArgumentException is what
3715     // should be thrown
3716     generate_negative_guard(length, bailout, &length);
3717 
3718     if (bailout->req() > 1) {
3719       PreserveJVMState pjvms(this);
3720       set_control(_gvn.transform(bailout));
3721       uncommon_trap(Deoptimization::Reason_intrinsic,
3722                     Deoptimization::Action_maybe_recompile);
3723     }
3724 
3725     if (!stopped()) {
3726       // How many elements will we copy from the original?
3727       // The answer is MinI(orig_length - start, length).
3728       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3729       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3730 


3731       // Generate a direct call to the right arraycopy function(s).
3732       // We know the copy is disjoint but we might not know if the
3733       // oop stores need checking.
3734       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3735       // This will fail a store-check if x contains any non-nulls.
3736 
3737       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3738       // loads/stores but it is legal only if we're sure the
3739       // Arrays.copyOf would succeed. So we need all input arguments
3740       // to the copyOf to be validated, including that the copy to the
3741       // new array won't trigger an ArrayStoreException. That subtype
3742       // check can be optimized if we know something on the type of
3743       // the input array from type speculation.
3744       if (_gvn.type(klass_node)->singleton()) {
3745         ciKlass* subk   = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
3746         ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3747 
3748         int test = C->static_subtype_check(superk, subk);
3749         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3750           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();


3892   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
3893   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
3894   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3895   Node* obj = NULL;
3896   if (!is_static) {
3897     // Check for hashing null object
3898     obj = null_check_receiver();
3899     if (stopped())  return true;        // unconditionally null
3900     result_reg->init_req(_null_path, top());
3901     result_val->init_req(_null_path, top());
3902   } else {
3903     // Do a null check, and return zero if null.
3904     // System.identityHashCode(null) == 0
3905     obj = argument(0);
3906     Node* null_ctl = top();
3907     obj = null_check_oop(obj, &null_ctl);
3908     result_reg->init_req(_null_path, null_ctl);
3909     result_val->init_req(_null_path, _gvn.intcon(0));
3910   }
3911 




3912   // Unconditionally null?  Then return right away.
3913   if (stopped()) {
3914     set_control( result_reg->in(_null_path));
3915     if (!stopped())
3916       set_result(result_val->in(_null_path));
3917     return true;
3918   }
3919 
3920   // We only go to the fast case code if we pass a number of guards.  The
3921   // paths which do not pass are accumulated in the slow_region.
3922   RegionNode* slow_region = new RegionNode(1);
3923   record_for_igvn(slow_region);
3924 
3925   // If this is a virtual call, we generate a funny guard.  We pull out
3926   // the vtable entry corresponding to hashCode() from the target object.
3927   // If the target method which we are calling happens to be the native
3928   // Object hashCode() method, we pass the guard.  We do not need this
3929   // guard for non-virtual calls -- the caller is known to be the native
3930   // Object hashCode().
3931   if (is_virtual) {


4207 #endif //_LP64
4208 
4209 //----------------------inline_unsafe_copyMemory-------------------------
4210 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4211 bool LibraryCallKit::inline_unsafe_copyMemory() {
4212   if (callee()->is_static())  return false;  // caller must have the capability!
4213   null_check_receiver();  // null-check receiver
4214   if (stopped())  return true;
4215 
4216   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
4217 
4218   Node* src_ptr =         argument(1);   // type: oop
4219   Node* src_off = ConvL2X(argument(2));  // type: long
4220   Node* dst_ptr =         argument(4);   // type: oop
4221   Node* dst_off = ConvL2X(argument(5));  // type: long
4222   Node* size    = ConvL2X(argument(7));  // type: long
4223 
4224   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4225          "fieldOffset must be byte-scaled");
4226 



4227   Node* src = make_unsafe_address(src_ptr, src_off);
4228   Node* dst = make_unsafe_address(dst_ptr, dst_off);
4229 
4230   // Conservatively insert a memory barrier on all memory slices.
4231   // Do not let writes of the copy source or destination float below the copy.
4232   insert_mem_bar(Op_MemBarCPUOrder);
4233 
4234   // Call it.  Note that the length argument is not scaled.
4235   make_runtime_call(RC_LEAF|RC_NO_FP,
4236                     OptoRuntime::fast_arraycopy_Type(),
4237                     StubRoutines::unsafe_arraycopy(),
4238                     "unsafe_arraycopy",
4239                     TypeRawPtr::BOTTOM,
4240                     src, dst, size XTOP);
4241 
4242   // Do not let reads of the copy destination float above the copy.
4243   insert_mem_bar(Op_MemBarCPUOrder);
4244 
4245   return true;
4246 }
4247 
4248 //------------------------clone_coping-----------------------------------
4249 // Helper function for inline_native_clone.
4250 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4251   assert(obj_size != NULL, "");
4252   Node* raw_obj = alloc_obj->in(1);
4253   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4254 


4255   AllocateNode* alloc = NULL;
4256   if (ReduceBulkZeroing) {
4257     // We will be completely responsible for initializing this object -
4258     // mark Initialize node as complete.
4259     alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4260     // The object was just allocated - there should be no any stores!
4261     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4262     // Mark as complete_with_arraycopy so that on AllocateNode
4263     // expansion, we know this AllocateNode is initialized by an array
4264     // copy and a StoreStore barrier exists after the array copy.
4265     alloc->initialization()->set_complete_with_arraycopy();
4266   }
4267 
4268   // Copy the fastest available way.
4269   // TODO: generate fields copies for small objects instead.
4270   Node* src  = obj;
4271   Node* dest = alloc_obj;
4272   Node* size = _gvn.transform(obj_size);
4273 
4274   // Exclude the header but include array length to copy by 8 bytes words.


4292   }
4293   src  = basic_plus_adr(src,  base_off);
4294   dest = basic_plus_adr(dest, base_off);
4295 
4296   // Compute the length also, if needed:
4297   Node* countx = size;
4298   countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
4299   countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong) ));
4300 
4301   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4302 
4303   ArrayCopyNode* ac = ArrayCopyNode::make(this, false, src, NULL, dest, NULL, countx, false);
4304   ac->set_clonebasic();
4305   Node* n = _gvn.transform(ac);
4306   if (n == ac) {
4307     set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
4308   } else {
4309     set_all_memory(n);
4310   }
4311 









4312   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4313   if (card_mark) {
4314     assert(!is_array, "");
4315     // Put in store barrier for any and all oops we are sticking
4316     // into this object.  (We could avoid this if we could prove
4317     // that the object type contains no oop fields at all.)
4318     Node* no_particular_value = NULL;
4319     Node* no_particular_field = NULL;
4320     int raw_adr_idx = Compile::AliasIdxRaw;
4321     post_barrier(control(),
4322                  memory(raw_adr_type),
4323                  alloc_obj,
4324                  no_particular_field,
4325                  raw_adr_idx,
4326                  no_particular_value,
4327                  T_OBJECT,
4328                  false);
4329   }
4330 
4331   // Do not let reads from the cloned object float above the arraycopy.


4418 
4419     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4420     int raw_adr_idx = Compile::AliasIdxRaw;
4421 
4422     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4423     if (array_ctl != NULL) {
4424       // It's an array.
4425       PreserveJVMState pjvms(this);
4426       set_control(array_ctl);
4427       Node* obj_length = load_array_length(obj);
4428       Node* obj_size  = NULL;
4429       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
4430 
4431       if (!use_ReduceInitialCardMarks()) {
4432         // If it is an oop array, it requires very special treatment,
4433         // because card marking is required on each card of the array.
4434         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4435         if (is_obja != NULL) {
4436           PreserveJVMState pjvms2(this);
4437           set_control(is_obja);



4438           // Generate a direct call to the right arraycopy function(s).
4439           Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4440           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL);
4441           ac->set_cloneoop();
4442           Node* n = _gvn.transform(ac);
4443           assert(n == ac, "cannot disappear");
4444           ac->connect_outputs(this);
4445 
4446           result_reg->init_req(_objArray_path, control());
4447           result_val->init_req(_objArray_path, alloc_obj);
4448           result_i_o ->set_req(_objArray_path, i_o());
4449           result_mem ->set_req(_objArray_path, reset_memory());
4450         }
4451       }
4452       // Otherwise, there are no card marks to worry about.
4453       // (We can dispense with card marks if we know the allocation
4454       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4455       //  causes the non-eden paths to take compensating steps to
4456       //  simulate a fresh allocation, so that no further
4457       //  card marks are required in compiled code to initialize


4666     _gvn.hash_delete(dest);
4667     dest->set_req(0, control());
4668     Node* destx = _gvn.transform(dest);
4669     assert(destx == dest, "where has the allocation result gone?");
4670   }
4671 }
4672 
4673 
4674 //------------------------------inline_arraycopy-----------------------
4675 // public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
4676 //                                                      Object dest, int destPos,
4677 //                                                      int length);
4678 bool LibraryCallKit::inline_arraycopy() {
4679   // Get the arguments.
4680   Node* src         = argument(0);  // type: oop
4681   Node* src_offset  = argument(1);  // type: int
4682   Node* dest        = argument(2);  // type: oop
4683   Node* dest_offset = argument(3);  // type: int
4684   Node* length      = argument(4);  // type: int
4685 
4686 
4687   // Check for allocation before we add nodes that would confuse
4688   // tightly_coupled_allocation()
4689   AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4690 
4691   int saved_reexecute_sp = -1;
4692   JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
4693   // See arraycopy_restore_alloc_state() comment
4694   // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards
4695   // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
4696   // if saved_jvms == NULL and alloc != NULL, we can’t emit any guards
4697   bool can_emit_guards = (alloc == NULL || saved_jvms != NULL);
4698 
4699   // The following tests must be performed
4700   // (1) src and dest are arrays.
4701   // (2) src and dest arrays must have elements of the same BasicType
4702   // (3) src and dest must not be null.
4703   // (4) src_offset must not be negative.
4704   // (5) dest_offset must not be negative.
4705   // (6) length must not be negative.
4706   // (7) src_offset + length must not exceed length of src.


4876       set_control(not_subtype_ctrl);
4877       uncommon_trap(Deoptimization::Reason_intrinsic,
4878                     Deoptimization::Action_make_not_entrant);
4879       assert(stopped(), "Should be stopped");
4880     }
4881     {
4882       PreserveJVMState pjvms(this);
4883       set_control(_gvn.transform(slow_region));
4884       uncommon_trap(Deoptimization::Reason_intrinsic,
4885                     Deoptimization::Action_make_not_entrant);
4886       assert(stopped(), "Should be stopped");
4887     }
4888   }
4889 
4890   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp);
4891 
4892   if (stopped()) {
4893     return true;
4894   }
4895 



4896   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
4897                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
4898                                           // so the compiler has a chance to eliminate them: during macro expansion,
4899                                           // we have to set their control (CastPP nodes are eliminated).
4900                                           load_object_klass(src), load_object_klass(dest),
4901                                           load_array_length(src), load_array_length(dest));
4902 
4903   ac->set_arraycopy(validated);
4904 
4905   Node* n = _gvn.transform(ac);
4906   if (n == ac) {
4907     ac->connect_outputs(this);
4908   } else {
4909     assert(validated, "shouldn't transform if all arguments not validated");
4910     set_all_memory(n);
4911   }
4912 
4913   return true;
4914 }
4915 
4916 
4917 // Helper function which determines if an arraycopy immediately follows
4918 // an allocation, with no intervening tests or other escapes for the object.
4919 AllocateArrayNode*
4920 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
4921                                            RegionNode* slow_region) {
4922   if (stopped())             return NULL;  // no fast path
4923   if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
4924 


4925   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
4926   if (alloc == NULL)  return NULL;
4927 
4928   Node* rawmem = memory(Compile::AliasIdxRaw);
4929   // Is the allocation's memory state untouched?
4930   if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
4931     // Bail out if there have been raw-memory effects since the allocation.
4932     // (Example:  There might have been a call or safepoint.)
4933     return NULL;
4934   }
4935   rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
4936   if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
4937     return NULL;
4938   }
4939 
4940   // There must be no unexpected observers of this allocation.
4941   for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
4942     Node* obs = ptr->fast_out(i);
4943     if (obs != this->map()) {
4944       return NULL;


4984 
4985   // If we get this far, we have an allocation which immediately
4986   // precedes the arraycopy, and we can take over zeroing the new object.
4987   // The arraycopy will finish the initialization, and provide
4988   // a new control state to which we will anchor the destination pointer.
4989 
4990   return alloc;
4991 }
4992 
4993 //-------------inline_encodeISOArray-----------------------------------
4994 // encode char[] to byte[] in ISO_8859_1
4995 bool LibraryCallKit::inline_encodeISOArray() {
4996   assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
4997   // no receiver since it is static method
4998   Node *src         = argument(0);
4999   Node *src_offset  = argument(1);
5000   Node *dst         = argument(2);
5001   Node *dst_offset  = argument(3);
5002   Node *length      = argument(4);
5003 



5004   const Type* src_type = src->Value(&_gvn);
5005   const Type* dst_type = dst->Value(&_gvn);
5006   const TypeAryPtr* top_src = src_type->isa_aryptr();
5007   const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5008   if (top_src  == NULL || top_src->klass()  == NULL ||
5009       top_dest == NULL || top_dest->klass() == NULL) {
5010     // failed array check
5011     return false;
5012   }
5013 
5014   // Figure out the size and type of the elements we will be copying.
5015   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5016   BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5017   if (src_elem != T_CHAR || dst_elem != T_BYTE) {
5018     return false;
5019   }
5020   Node* src_start = array_element_address(src, src_offset, src_elem);
5021   Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5022   // 'src_start' points to src array + scaled offset
5023   // 'dst_start' points to dst array + scaled offset


5033 
5034 //-------------inline_multiplyToLen-----------------------------------
5035 bool LibraryCallKit::inline_multiplyToLen() {
5036   assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
5037 
5038   address stubAddr = StubRoutines::multiplyToLen();
5039   if (stubAddr == NULL) {
5040     return false; // Intrinsic's stub is not implemented on this platform
5041   }
5042   const char* stubName = "multiplyToLen";
5043 
5044   assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
5045 
5046   // no receiver because it is a static method
5047   Node* x    = argument(0);
5048   Node* xlen = argument(1);
5049   Node* y    = argument(2);
5050   Node* ylen = argument(3);
5051   Node* z    = argument(4);
5052 




5053   const Type* x_type = x->Value(&_gvn);
5054   const Type* y_type = y->Value(&_gvn);
5055   const TypeAryPtr* top_x = x_type->isa_aryptr();
5056   const TypeAryPtr* top_y = y_type->isa_aryptr();
5057   if (top_x  == NULL || top_x->klass()  == NULL ||
5058       top_y == NULL || top_y->klass() == NULL) {
5059     // failed array check
5060     return false;
5061   }
5062 
5063   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5064   BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5065   if (x_elem != T_INT || y_elem != T_INT) {
5066     return false;
5067   }
5068 
5069   // Set the original stack and the reexecute bit for the interpreter to reexecute
5070   // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
5071   // on the return from z array allocation in runtime.
5072   { PreserveReexecuteState preexecs(this);


5133   return true;
5134 }
5135 
5136 //-------------inline_squareToLen------------------------------------
5137 bool LibraryCallKit::inline_squareToLen() {
5138   assert(UseSquareToLenIntrinsic, "not implementated on this platform");
5139 
5140   address stubAddr = StubRoutines::squareToLen();
5141   if (stubAddr == NULL) {
5142     return false; // Intrinsic's stub is not implemented on this platform
5143   }
5144   const char* stubName = "squareToLen";
5145 
5146   assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5147 
5148   Node* x    = argument(0);
5149   Node* len  = argument(1);
5150   Node* z    = argument(2);
5151   Node* zlen = argument(3);
5152 



5153   const Type* x_type = x->Value(&_gvn);
5154   const Type* z_type = z->Value(&_gvn);
5155   const TypeAryPtr* top_x = x_type->isa_aryptr();
5156   const TypeAryPtr* top_z = z_type->isa_aryptr();
5157   if (top_x  == NULL || top_x->klass()  == NULL ||
5158       top_z  == NULL || top_z->klass()  == NULL) {
5159     // failed array check
5160     return false;
5161   }
5162 
5163   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5164   BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5165   if (x_elem != T_INT || z_elem != T_INT) {
5166     return false;
5167   }
5168 
5169 
5170   Node* x_start = array_element_address(x, intcon(0), x_elem);
5171   Node* z_start = array_element_address(z, intcon(0), z_elem);
5172 


5180 }
5181 
5182 //-------------inline_mulAdd------------------------------------------
5183 bool LibraryCallKit::inline_mulAdd() {
5184   assert(UseMulAddIntrinsic, "not implementated on this platform");
5185 
5186   address stubAddr = StubRoutines::mulAdd();
5187   if (stubAddr == NULL) {
5188     return false; // Intrinsic's stub is not implemented on this platform
5189   }
5190   const char* stubName = "mulAdd";
5191 
5192   assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5193 
5194   Node* out      = argument(0);
5195   Node* in       = argument(1);
5196   Node* offset   = argument(2);
5197   Node* len      = argument(3);
5198   Node* k        = argument(4);
5199 



5200   const Type* out_type = out->Value(&_gvn);
5201   const Type* in_type = in->Value(&_gvn);
5202   const TypeAryPtr* top_out = out_type->isa_aryptr();
5203   const TypeAryPtr* top_in = in_type->isa_aryptr();
5204   if (top_out  == NULL || top_out->klass()  == NULL ||
5205       top_in == NULL || top_in->klass() == NULL) {
5206     // failed array check
5207     return false;
5208   }
5209 
5210   BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5211   BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5212   if (out_elem != T_INT || in_elem != T_INT) {
5213     return false;
5214   }
5215 
5216   Node* outlen = load_array_length(out);
5217   Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
5218   Node* out_start = array_element_address(out, intcon(0), out_elem);
5219   Node* in_start = array_element_address(in, intcon(0), in_elem);


5229 
5230 //-------------inline_montgomeryMultiply-----------------------------------
5231 bool LibraryCallKit::inline_montgomeryMultiply() {
5232   address stubAddr = StubRoutines::montgomeryMultiply();
5233   if (stubAddr == NULL) {
5234     return false; // Intrinsic's stub is not implemented on this platform
5235   }
5236 
5237   assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
5238   const char* stubName = "montgomery_square";
5239 
5240   assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
5241 
5242   Node* a    = argument(0);
5243   Node* b    = argument(1);
5244   Node* n    = argument(2);
5245   Node* len  = argument(3);
5246   Node* inv  = argument(4);
5247   Node* m    = argument(6);
5248 





5249   const Type* a_type = a->Value(&_gvn);
5250   const TypeAryPtr* top_a = a_type->isa_aryptr();
5251   const Type* b_type = b->Value(&_gvn);
5252   const TypeAryPtr* top_b = b_type->isa_aryptr();
5253   const Type* n_type = a->Value(&_gvn);
5254   const TypeAryPtr* top_n = n_type->isa_aryptr();
5255   const Type* m_type = a->Value(&_gvn);
5256   const TypeAryPtr* top_m = m_type->isa_aryptr();
5257   if (top_a  == NULL || top_a->klass()  == NULL ||
5258       top_b == NULL || top_b->klass()  == NULL ||
5259       top_n == NULL || top_n->klass()  == NULL ||
5260       top_m == NULL || top_m->klass()  == NULL) {
5261     // failed array check
5262     return false;
5263   }
5264 
5265   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5266   BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5267   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5268   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();


5288   return true;
5289 }
5290 
5291 bool LibraryCallKit::inline_montgomerySquare() {
5292   address stubAddr = StubRoutines::montgomerySquare();
5293   if (stubAddr == NULL) {
5294     return false; // Intrinsic's stub is not implemented on this platform
5295   }
5296 
5297   assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
5298   const char* stubName = "montgomery_square";
5299 
5300   assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
5301 
5302   Node* a    = argument(0);
5303   Node* n    = argument(1);
5304   Node* len  = argument(2);
5305   Node* inv  = argument(3);
5306   Node* m    = argument(5);
5307 




5308   const Type* a_type = a->Value(&_gvn);
5309   const TypeAryPtr* top_a = a_type->isa_aryptr();
5310   const Type* n_type = a->Value(&_gvn);
5311   const TypeAryPtr* top_n = n_type->isa_aryptr();
5312   const Type* m_type = a->Value(&_gvn);
5313   const TypeAryPtr* top_m = m_type->isa_aryptr();
5314   if (top_a  == NULL || top_a->klass()  == NULL ||
5315       top_n == NULL || top_n->klass()  == NULL ||
5316       top_m == NULL || top_m->klass()  == NULL) {
5317     // failed array check
5318     return false;
5319   }
5320 
5321   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5322   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5323   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5324   if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5325     return false;
5326   }
5327 


5374   crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
5375   result = _gvn.transform(new XorINode(crc, result));
5376   result = _gvn.transform(new XorINode(result, M1));
5377   set_result(result);
5378   return true;
5379 }
5380 
5381 /**
5382  * Calculate CRC32 for byte[] array.
5383  * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
5384  */
5385 bool LibraryCallKit::inline_updateBytesCRC32() {
5386   assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5387   assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5388   // no receiver since it is static method
5389   Node* crc     = argument(0); // type: int
5390   Node* src     = argument(1); // type: oop
5391   Node* offset  = argument(2); // type: int
5392   Node* length  = argument(3); // type: int
5393 


5394   const Type* src_type = src->Value(&_gvn);
5395   const TypeAryPtr* top_src = src_type->isa_aryptr();
5396   if (top_src  == NULL || top_src->klass()  == NULL) {
5397     // failed array check
5398     return false;
5399   }
5400 
5401   // Figure out the size and type of the elements we will be copying.
5402   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5403   if (src_elem != T_BYTE) {
5404     return false;
5405   }
5406 
5407   // 'src_start' points to src array + scaled offset
5408   Node* src_start = array_element_address(src, offset, src_elem);
5409 
5410   // We assume that range check is done by caller.
5411   // TODO: generate range check (offset+length < src.length) in debug VM.
5412 
5413   // Call the stub.


5476   Node* src     = argument(1); // type: oop
5477   Node* offset  = argument(2); // type: int
5478   Node* end     = argument(3); // type: int
5479 
5480   Node* length = _gvn.transform(new SubINode(end, offset));
5481 
5482   const Type* src_type = src->Value(&_gvn);
5483   const TypeAryPtr* top_src = src_type->isa_aryptr();
5484   if (top_src  == NULL || top_src->klass()  == NULL) {
5485     // failed array check
5486     return false;
5487   }
5488 
5489   // Figure out the size and type of the elements we will be copying.
5490   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5491   if (src_elem != T_BYTE) {
5492     return false;
5493   }
5494 
5495   // 'src_start' points to src array + scaled offset

5496   Node* src_start = array_element_address(src, offset, src_elem);
5497 
5498   // static final int[] byteTable in class CRC32C
5499   Node* table = get_table_from_crc32c_class(callee()->holder());

5500   Node* table_start = array_element_address(table, intcon(0), T_INT);
5501 
5502   // We assume that range check is done by caller.
5503   // TODO: generate range check (offset+length < src.length) in debug VM.
5504 
5505   // Call the stub.
5506   address stubAddr = StubRoutines::updateBytesCRC32C();
5507   const char *stubName = "updateBytesCRC32C";
5508 
5509   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5510                                  stubAddr, stubName, TypePtr::BOTTOM,
5511                                  crc, src_start, length, table_start);
5512   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5513   set_result(result);
5514   return true;
5515 }
5516 
5517 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
5518 //
5519 // Calculate CRC32C for DirectByteBuffer.


5523   assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5524   assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
5525   assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5526   // no receiver since it is a static method
5527   Node* crc     = argument(0); // type: int
5528   Node* src     = argument(1); // type: long
5529   Node* offset  = argument(3); // type: int
5530   Node* end     = argument(4); // type: int
5531 
5532   Node* length = _gvn.transform(new SubINode(end, offset));
5533 
5534   src = ConvL2X(src);  // adjust Java long to machine word
5535   Node* base = _gvn.transform(new CastX2PNode(src));
5536   offset = ConvI2X(offset);
5537 
5538   // 'src_start' points to src array + scaled offset
5539   Node* src_start = basic_plus_adr(top(), base, offset);
5540 
5541   // static final int[] byteTable in class CRC32C
5542   Node* table = get_table_from_crc32c_class(callee()->holder());

5543   Node* table_start = array_element_address(table, intcon(0), T_INT);
5544 
5545   // Call the stub.
5546   address stubAddr = StubRoutines::updateBytesCRC32C();
5547   const char *stubName = "updateBytesCRC32C";
5548 
5549   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5550                                  stubAddr, stubName, TypePtr::BOTTOM,
5551                                  crc, src_start, length, table_start);
5552   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5553   set_result(result);
5554   return true;
5555 }
5556 
5557 //------------------------------inline_updateBytesAdler32----------------------
5558 //
5559 // Calculate Adler32 checksum for byte[] array.
5560 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
5561 //
5562 bool LibraryCallKit::inline_updateBytesAdler32() {


5566   // no receiver since it is static method
5567   Node* crc     = argument(0); // type: int
5568   Node* src     = argument(1); // type: oop
5569   Node* offset  = argument(2); // type: int
5570   Node* length  = argument(3); // type: int
5571 
5572   const Type* src_type = src->Value(&_gvn);
5573   const TypeAryPtr* top_src = src_type->isa_aryptr();
5574   if (top_src  == NULL || top_src->klass()  == NULL) {
5575     // failed array check
5576     return false;
5577   }
5578 
5579   // Figure out the size and type of the elements we will be copying.
5580   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5581   if (src_elem != T_BYTE) {
5582     return false;
5583   }
5584 
5585   // 'src_start' points to src array + scaled offset

5586   Node* src_start = array_element_address(src, offset, src_elem);
5587 
5588   // We assume that range check is done by caller.
5589   // TODO: generate range check (offset+length < src.length) in debug VM.
5590 
5591   // Call the stub.
5592   address stubAddr = StubRoutines::updateBytesAdler32();
5593   const char *stubName = "updateBytesAdler32";
5594 
5595   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5596                                  stubAddr, stubName, TypePtr::BOTTOM,
5597                                  crc, src_start, length);
5598   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5599   set_result(result);
5600   return true;
5601 }
5602 
5603 //------------------------------inline_updateByteBufferAdler32---------------
5604 //
5605 // Calculate Adler32 checksum for DirectByteBuffer.


5628 
5629   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5630                                  stubAddr, stubName, TypePtr::BOTTOM,
5631                                  crc, src_start, length);
5632 
5633   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5634   set_result(result);
5635   return true;
5636 }
5637 
5638 //----------------------------inline_reference_get----------------------------
5639 // public T java.lang.ref.Reference.get();
5640 bool LibraryCallKit::inline_reference_get() {
5641   const int referent_offset = java_lang_ref_Reference::referent_offset;
5642   guarantee(referent_offset > 0, "should have already been set");
5643 
5644   // Get the argument:
5645   Node* reference_obj = null_check_receiver();
5646   if (stopped()) return true;
5647 




5648   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
5649 
5650   ciInstanceKlass* klass = env()->Object_klass();
5651   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5652 
5653   Node* no_ctrl = NULL;
5654   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
5655 
5656   // Use the pre-barrier to record the value in the referent field
5657   pre_barrier(false /* do_load */,
5658               control(),
5659               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
5660               result /* pre_val */,
5661               T_OBJECT);
5662 
5663   // Add memory barrier to prevent commoning reads from this field
5664   // across safepoint since GC can change its value.
5665   insert_mem_bar(Op_MemBarCPUOrder);
5666 
5667   set_result(result);


5676     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5677     assert(tinst != NULL, "obj is null");
5678     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5679     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5680     fromKls = tinst->klass()->as_instance_klass();
5681   } else {
5682     assert(is_static, "only for static field access");
5683   }
5684   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5685                                               ciSymbol::make(fieldTypeString),
5686                                               is_static);
5687 
5688   assert (field != NULL, "undefined field");
5689   if (field == NULL) return (Node *) NULL;
5690 
5691   if (is_static) {
5692     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5693     fromObj = makecon(tip);
5694   }
5695 


5696   // Next code  copied from Parse::do_get_xxx():
5697 
5698   // Compute address and memory type.
5699   int offset  = field->offset_in_bytes();
5700   bool is_vol = field->is_volatile();
5701   ciType* field_klass = field->type();
5702   assert(field_klass->is_loaded(), "should be loaded");
5703   const TypePtr* adr_type = C->alias_type(field)->adr_type();
5704   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5705   BasicType bt = field->layout_type();
5706 
5707   // Build the resultant type of the load
5708   const Type *type;
5709   if (bt == T_OBJECT) {
5710     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5711   } else {
5712     type = Type::get_const_basic_type(bt);
5713   }
5714 
5715   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {


5736   assert(UseAES, "need AES instruction support");
5737 
5738   switch(id) {
5739   case vmIntrinsics::_aescrypt_encryptBlock:
5740     stubAddr = StubRoutines::aescrypt_encryptBlock();
5741     stubName = "aescrypt_encryptBlock";
5742     break;
5743   case vmIntrinsics::_aescrypt_decryptBlock:
5744     stubAddr = StubRoutines::aescrypt_decryptBlock();
5745     stubName = "aescrypt_decryptBlock";
5746     break;
5747   }
5748   if (stubAddr == NULL) return false;
5749 
5750   Node* aescrypt_object = argument(0);
5751   Node* src             = argument(1);
5752   Node* src_offset      = argument(2);
5753   Node* dest            = argument(3);
5754   Node* dest_offset     = argument(4);
5755 




5756   // (1) src and dest are arrays.
5757   const Type* src_type = src->Value(&_gvn);
5758   const Type* dest_type = dest->Value(&_gvn);
5759   const TypeAryPtr* top_src = src_type->isa_aryptr();
5760   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5761   assert (top_src  != NULL && top_src->klass()  != NULL &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5762 
5763   // for the quick and dirty code we will skip all the checks.
5764   // we are just trying to get the call to be generated.
5765   Node* src_start  = src;
5766   Node* dest_start = dest;
5767   if (src_offset != NULL || dest_offset != NULL) {
5768     assert(src_offset != NULL && dest_offset != NULL, "");
5769     src_start  = array_element_address(src,  src_offset,  T_BYTE);
5770     dest_start = array_element_address(dest, dest_offset, T_BYTE);
5771   }
5772 
5773   // now need to get the start of its expanded key array
5774   // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5775   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);


5804 
5805   switch(id) {
5806   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
5807     stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
5808     stubName = "cipherBlockChaining_encryptAESCrypt";
5809     break;
5810   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
5811     stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
5812     stubName = "cipherBlockChaining_decryptAESCrypt";
5813     break;
5814   }
5815   if (stubAddr == NULL) return false;
5816 
5817   Node* cipherBlockChaining_object = argument(0);
5818   Node* src                        = argument(1);
5819   Node* src_offset                 = argument(2);
5820   Node* len                        = argument(3);
5821   Node* dest                       = argument(4);
5822   Node* dest_offset                = argument(5);
5823 




5824   // (1) src and dest are arrays.
5825   const Type* src_type = src->Value(&_gvn);
5826   const Type* dest_type = dest->Value(&_gvn);
5827   const TypeAryPtr* top_src = src_type->isa_aryptr();
5828   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5829   assert (top_src  != NULL && top_src->klass()  != NULL
5830           &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5831 
5832   // checks are the responsibility of the caller
5833   Node* src_start  = src;
5834   Node* dest_start = dest;
5835   if (src_offset != NULL || dest_offset != NULL) {
5836     assert(src_offset != NULL && dest_offset != NULL, "");
5837     src_start  = array_element_address(src,  src_offset,  T_BYTE);
5838     dest_start = array_element_address(dest, dest_offset, T_BYTE);
5839   }
5840 
5841   // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
5842   // (because of the predicated logic executed earlier).
5843   // so we cast it here safely.


5848 
5849   // cast it to what we know it will be at runtime
5850   const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
5851   assert(tinst != NULL, "CBC obj is null");
5852   assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
5853   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
5854   assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
5855 
5856   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
5857   const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
5858   const TypeOopPtr* xtype = aklass->as_instance_type();
5859   Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
5860   aescrypt_object = _gvn.transform(aescrypt_object);
5861 
5862   // we need to get the start of the aescrypt_object's expanded key array
5863   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5864   if (k_start == NULL) return false;
5865 
5866   // similarly, get the start address of the r vector
5867   Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);



5868   if (objRvec == NULL) return false;
5869   Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
5870 
5871   Node* cbcCrypt;
5872   if (Matcher::pass_original_key_for_aes()) {
5873     // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
5874     // compatibility issues between Java key expansion and SPARC crypto instructions
5875     Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5876     if (original_k_start == NULL) return false;
5877 
5878     // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
5879     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5880                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5881                                  stubAddr, stubName, TypePtr::BOTTOM,
5882                                  src_start, dest_start, k_start, r_start, len, original_k_start);
5883   } else {
5884     // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
5885     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5886                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5887                                  stubAddr, stubName, TypePtr::BOTTOM,
5888                                  src_start, dest_start, k_start, r_start, len);
5889   }
5890 
5891   // return cipher length (int)
5892   Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
5893   set_result(retvalue);
5894   return true;
5895 }
5896 
5897 //------------------------------get_key_start_from_aescrypt_object-----------------------
5898 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
5899   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
5900   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
5901   if (objAESCryptKey == NULL) return (Node *) NULL;


5902 
5903   // now have the array, need to get the start address of the K array
5904   Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
5905   return k_start;
5906 }
5907 
5908 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
5909 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
5910   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
5911   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
5912   if (objAESCryptKey == NULL) return (Node *) NULL;
5913 
5914   // now have the array, need to get the start address of the lastKey array
5915   Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
5916   return original_k_start;
5917 }
5918 
5919 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
5920 // Return node representing slow path of predicate check.
5921 // the pseudo code we want to emulate with this predicate is:




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shenandoah/shenandoahRuntime.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/c2compiler.hpp"
  36 #include "opto/callGenerator.hpp"
  37 #include "opto/castnode.hpp"
  38 #include "opto/cfgnode.hpp"
  39 #include "opto/convertnode.hpp"
  40 #include "opto/countbitsnode.hpp"
  41 #include "opto/intrinsicnode.hpp"
  42 #include "opto/idealKit.hpp"
  43 #include "opto/mathexactnode.hpp"
  44 #include "opto/movenode.hpp"
  45 #include "opto/mulnode.hpp"
  46 #include "opto/narrowptrnode.hpp"
  47 #include "opto/opaquenode.hpp"
  48 #include "opto/parse.hpp"
  49 #include "opto/runtime.hpp"
  50 #include "opto/shenandoahSupport.hpp"
  51 #include "opto/subnode.hpp"
  52 #include "prims/nativeLookup.hpp"
  53 #include "runtime/sharedRuntime.hpp"
  54 #include "trace/traceMacros.hpp"
  55 
  56 class LibraryIntrinsic : public InlineCallGenerator {
  57   // Extend the set of intrinsics known to the runtime:
  58  public:
  59  private:
  60   bool             _is_virtual;
  61   bool             _does_virtual_dispatch;
  62   int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  63   int8_t           _last_predicate; // Last generated predicate
  64   vmIntrinsics::ID _intrinsic_id;
  65 
  66  public:
  67   LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
  68     : InlineCallGenerator(m),
  69       _is_virtual(is_virtual),
  70       _does_virtual_dispatch(does_virtual_dispatch),


 957   C->set_has_split_ifs(true); // Has chance for split-if optimization
 958 
 959   return _gvn.transform(result);
 960 }
 961 
 962 //------------------------------inline_string_compareTo------------------------
 963 // public int java.lang.String.compareTo(String anotherString);
 964 bool LibraryCallKit::inline_string_compareTo() {
 965   Node* receiver = null_check(argument(0));
 966   Node* arg      = null_check(argument(1));
 967   if (stopped()) {
 968     return true;
 969   }
 970   set_result(make_string_method_node(Op_StrComp, receiver, arg));
 971   return true;
 972 }
 973 
 974 //------------------------------inline_string_equals------------------------
 975 bool LibraryCallKit::inline_string_equals() {
 976   Node* receiver = null_check_receiver();
 977 
 978   if (ShenandoahVerifyReadsToFromSpace) {
 979     receiver = shenandoah_read_barrier(receiver);
 980   }
 981 
 982   // NOTE: Do not null check argument for String.equals() because spec
 983   // allows to specify NULL as argument.
 984   Node* argument = this->argument(1);
 985 
 986   if (ShenandoahVerifyReadsToFromSpace) {
 987     argument = shenandoah_read_barrier(argument);
 988   }
 989 
 990   if (stopped()) {
 991     return true;
 992   }
 993 
 994   // paths (plus control) merge
 995   RegionNode* region = new RegionNode(5);
 996   Node* phi = new PhiNode(region, TypeInt::BOOL);
 997 
 998   // does source == target string?
 999   Node* cmp = _gvn.transform(new CmpPNode(receiver, argument));
1000   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1001 
1002   Node* if_eq = generate_slow_guard(bol, NULL);
1003   if (if_eq != NULL) {
1004     // receiver == argument
1005     phi->init_req(2, intcon(1));
1006     region->init_req(2, if_eq);
1007   }
1008 
1009   // get String klass for instanceOf


1018     //instanceOf == true, fallthrough
1019 
1020     if (inst_false != NULL) {
1021       phi->init_req(3, intcon(0));
1022       region->init_req(3, inst_false);
1023     }
1024   }
1025 
1026   if (!stopped()) {
1027     const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1028 
1029     // Properly cast the argument to String
1030     argument = _gvn.transform(new CheckCastPPNode(control(), argument, string_type));
1031     // This path is taken only when argument's type is String:NotNull.
1032     argument = cast_not_null(argument, false);
1033 
1034     Node* no_ctrl = NULL;
1035 
1036     // Get start addr of receiver
1037     Node* receiver_val    = load_String_value(no_ctrl, receiver);
1038 
1039     if (ShenandoahVerifyReadsToFromSpace) {
1040       receiver_val = shenandoah_read_barrier(receiver_val);
1041     }
1042 
1043     Node* receiver_offset = load_String_offset(no_ctrl, receiver);
1044     Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
1045 
1046     // Get length of receiver
1047     Node* receiver_cnt  = load_String_length(no_ctrl, receiver);
1048 
1049     // Get start addr of argument
1050     Node* argument_val    = load_String_value(no_ctrl, argument);
1051 
1052     if (ShenandoahVerifyReadsToFromSpace) {
1053       argument_val = shenandoah_read_barrier(argument_val);
1054     }
1055 
1056     Node* argument_offset = load_String_offset(no_ctrl, argument);
1057     Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
1058 
1059     // Get length of argument
1060     Node* argument_cnt  = load_String_length(no_ctrl, argument);
1061 
1062     // Check for receiver count != argument count
1063     Node* cmp = _gvn.transform(new CmpINode(receiver_cnt, argument_cnt));
1064     Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1065     Node* if_ne = generate_slow_guard(bol, NULL);
1066     if (if_ne != NULL) {
1067       phi->init_req(4, intcon(0));
1068       region->init_req(4, if_ne);
1069     }
1070 
1071     // Check for count == 0 is done by assembler code for StrEquals.
1072 
1073     if (!stopped()) {
1074       Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
1075       phi->init_req(1, equals);
1076       region->init_req(1, control());
1077     }
1078   }
1079 
1080   // post merge
1081   set_control(_gvn.transform(region));
1082   record_for_igvn(region);
1083 
1084   set_result(_gvn.transform(phi));
1085   return true;
1086 }
1087 
1088 //------------------------------inline_array_equals----------------------------
1089 bool LibraryCallKit::inline_array_equals() {
1090   Node* arg1 = argument(0);
1091   Node* arg2 = argument(1);
1092 
1093   arg1 = shenandoah_read_barrier(arg1);
1094   arg2 = shenandoah_read_barrier(arg2);
1095 
1096   set_result(_gvn.transform(new AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
1097   return true;
1098 }
1099 
1100 // Java version of String.indexOf(constant string)
1101 // class StringDecl {
1102 //   StringDecl(char[] ca) {
1103 //     offset = 0;
1104 //     count = ca.length;
1105 //     value = ca;
1106 //   }
1107 //   int offset;
1108 //   int count;
1109 //   char[] value;
1110 // }
1111 //
1112 // static int string_indexOf_J(StringDecl string_object, char[] target_object,
1113 //                             int targetOffset, int cache_i, int md2) {
1114 //   int cache = cache_i;
1115 //   int sourceOffset = string_object.offset;


2161   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2162   default:  fatal_unexpected_iid(id);  break;
2163   }
2164   set_result(_gvn.transform(n));
2165   return true;
2166 }
2167 
2168 //----------------------------inline_unsafe_access----------------------------
2169 
2170 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2171 
2172 // Helper that guards and inserts a pre-barrier.
2173 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2174                                         Node* pre_val, bool need_mem_bar) {
2175   // We could be accessing the referent field of a reference object. If so, when G1
2176   // is enabled, we need to log the value in the referent field in an SATB buffer.
2177   // This routine performs some compile time filters and generates suitable
2178   // runtime filters that guard the pre-barrier code.
2179   // Also add memory barrier for non volatile load from the referent field
2180   // to prevent commoning of loads across safepoint.
2181   if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar)
2182     return;
2183 
2184   // Some compile time checks.
2185 
2186   // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2187   const TypeX* otype = offset->find_intptr_t_type();
2188   if (otype != NULL && otype->is_con() &&
2189       otype->get_con() != java_lang_ref_Reference::referent_offset) {
2190     // Constant offset but not the reference_offset so just return
2191     return;
2192   }
2193 
2194   // We only need to generate the runtime guards for instances.
2195   const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2196   if (btype != NULL) {
2197     if (btype->isa_aryptr()) {
2198       // Array type so nothing to do
2199       return;
2200     }
2201 


2350         vtype = T_ADDRESS;  // it is really a C void*
2351       assert(vtype == type, "putter must accept the expected value");
2352     }
2353 #endif // ASSERT
2354  }
2355 #endif //PRODUCT
2356 
2357   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2358 
2359   Node* receiver = argument(0);  // type: oop
2360 
2361   // Build address expression.
2362   Node* adr;
2363   Node* heap_base_oop = top();
2364   Node* offset = top();
2365   Node* val;
2366 
2367   if (!is_native_ptr) {
2368     // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2369     Node* base = argument(1);  // type: oop
2370     if (is_store) {
2371       base = shenandoah_write_barrier(base);
2372     } else {
2373       base = shenandoah_read_barrier(base);
2374     }
2375     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2376     offset = argument(2);  // type: long
2377     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2378     // to be plain byte offsets, which are also the same as those accepted
2379     // by oopDesc::field_base.
2380     assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2381            "fieldOffset must be byte-scaled");
2382     // 32-bit machines ignore the high half!
2383     offset = ConvL2X(offset);
2384     adr = make_unsafe_address(base, offset);
2385     heap_base_oop = base;
2386     val = is_store ? argument(4) : NULL;
2387   } else {
2388     Node* ptr = argument(1);  // type: long
2389     ptr = ConvL2X(ptr);  // adjust Java long to machine word
2390     adr = make_unsafe_address(NULL, ptr);
2391     val = is_store ? argument(3) : NULL;
2392   }
2393 
2394   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();


2506     // the end of this method.  So, pushing the load onto the stack at a later
2507     // point is fine.
2508     set_result(p);
2509   } else {
2510     // place effect of store into memory
2511     switch (type) {
2512     case T_DOUBLE:
2513       val = dstore_rounding(val);
2514       break;
2515     case T_ADDRESS:
2516       // Repackage the long as a pointer.
2517       val = ConvL2X(val);
2518       val = _gvn.transform(new CastX2PNode(val));
2519       break;
2520     }
2521 
2522     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2523     if (type != T_OBJECT ) {
2524       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
2525     } else {
2526       val = shenandoah_read_barrier_nomem(val);
2527       // Possibly an oop being stored to Java heap or native memory
2528       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2529         // oop to Java heap.
2530         (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2531       } else {
2532         // We can't tell at compile time if we are storing in the Java heap or outside
2533         // of it. So we need to emit code to conditionally do the proper type of
2534         // store.
2535 
2536         IdealKit ideal(this);
2537 #define __ ideal.
2538         // QQQ who knows what probability is here??
2539         __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2540           // Sync IdealKit and graphKit.
2541           sync_kit(ideal);
2542           Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2543           // Update IdealKit memory.
2544           __ sync_kit(this);
2545         } __ else_(); {
2546           __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile);


2636     const bool two_slot_type = type2size[type] == 2;
2637     receiver = argument(0);  // type: oop
2638     base     = argument(1);  // type: oop
2639     offset   = argument(2);  // type: long
2640     oldval   = argument(4);  // type: oop, int, or long
2641     newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
2642   } else if (kind == LS_xadd || kind == LS_xchg){
2643     receiver = argument(0);  // type: oop
2644     base     = argument(1);  // type: oop
2645     offset   = argument(2);  // type: long
2646     oldval   = NULL;
2647     newval   = argument(4);  // type: oop, int, or long
2648   }
2649 
2650   // Null check receiver.
2651   receiver = null_check(receiver);
2652   if (stopped()) {
2653     return true;
2654   }
2655 
2656   base = shenandoah_write_barrier(base);
2657 
2658   // Build field offset expression.
2659   // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2660   // to be plain byte offsets, which are also the same as those accepted
2661   // by oopDesc::field_base.
2662   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2663   // 32-bit machines ignore the high half of long offsets
2664   offset = ConvL2X(offset);
2665   Node* adr = make_unsafe_address(base, offset);
2666   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2667 
2668   // For CAS, unlike inline_unsafe_access, there seems no point in
2669   // trying to refine types. Just use the coarse types here.
2670   const Type *value_type = Type::get_const_basic_type(type);
2671   Compile::AliasType* alias_type = C->alias_type(adr_type);
2672   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2673 
2674   if (kind == LS_xchg && type == T_OBJECT) {
2675     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2676     if (tjp != NULL) {
2677       value_type = tjp;


2679   }
2680 
2681   int alias_idx = C->get_alias_index(adr_type);
2682 
2683   // Memory-model-wise, a LoadStore acts like a little synchronized
2684   // block, so needs barriers on each side.  These don't translate
2685   // into actual barriers on most machines, but we still need rest of
2686   // compiler to respect ordering.
2687 
2688   insert_mem_bar(Op_MemBarRelease);
2689   insert_mem_bar(Op_MemBarCPUOrder);
2690 
2691   // 4984716: MemBars must be inserted before this
2692   //          memory node in order to avoid a false
2693   //          dependency which will confuse the scheduler.
2694   Node *mem = memory(alias_idx);
2695 
2696   // For now, we handle only those cases that actually exist: ints,
2697   // longs, and Object. Adding others should be straightforward.
2698   Node* load_store;
2699   Node* result;
2700   switch(type) {
2701   case T_INT:
2702     if (kind == LS_xadd) {
2703       load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
2704     } else if (kind == LS_xchg) {
2705       load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
2706     } else if (kind == LS_cmpxchg) {
2707       load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
2708     } else {
2709       ShouldNotReachHere();
2710     }
2711     result = load_store;
2712     break;
2713   case T_LONG:
2714     if (kind == LS_xadd) {
2715       load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
2716     } else if (kind == LS_xchg) {
2717       load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
2718     } else if (kind == LS_cmpxchg) {
2719       load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2720     } else {
2721       ShouldNotReachHere();
2722     }
2723     result = load_store;
2724     break;
2725   case T_OBJECT:
2726     // Transformation of a value which could be NULL pointer (CastPP #NULL)
2727     // could be delayed during Parse (for example, in adjust_map_after_if()).
2728     // Execute transformation here to avoid barrier generation in such case.
2729     if (_gvn.type(newval) == TypePtr::NULL_PTR)
2730       newval = _gvn.makecon(TypePtr::NULL_PTR);
2731 
2732     newval = shenandoah_read_barrier_nomem(newval);
2733 
2734     // Reference stores need a store barrier.
2735     if (kind == LS_xchg) {
2736       // If pre-barrier must execute before the oop store, old value will require do_load here.
2737       if (!can_move_pre_barrier()) {
2738         pre_barrier(true /* do_load*/,
2739                     control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2740                     NULL /* pre_val*/,
2741                     T_OBJECT);
2742       } // Else move pre_barrier to use load_store value, see below.
2743     } else if (kind == LS_cmpxchg) {
2744       // Same as for newval above:
2745       if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2746         oldval = _gvn.makecon(TypePtr::NULL_PTR);
2747       }
2748       // The only known value which might get overwritten is oldval.
2749       pre_barrier(false /* do_load */,
2750                   control(), NULL, NULL, max_juint, NULL, NULL,
2751                   oldval /* pre_val */,
2752                   T_OBJECT);
2753     } else {
2754       ShouldNotReachHere();
2755     }
2756 
2757 #ifdef _LP64
2758     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2759       Node *newval_enc = _gvn.transform(new EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2760       if (kind == LS_xchg) {
2761         load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr,
2762                                                        newval_enc, adr_type, value_type->make_narrowoop()));
2763       } else {
2764         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2765         Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2766         load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr,
2767                                                                 newval_enc, oldval_enc));
2768       }
2769       result = load_store;
2770     } else
2771 #endif
2772     {
2773       if (kind == LS_xchg) {
2774         load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2775         result = load_store;
2776       } else {
2777         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2778         load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2779         result = load_store;
2780 
2781         if (UseShenandoahGC) {
2782           // if (! success)
2783           Node* cmp_true = _gvn.transform(new CmpINode(load_store, intcon(1)));
2784           Node* tst_true = _gvn.transform(new BoolNode(cmp_true, BoolTest::eq));
2785           IfNode* iff = create_and_map_if(control(), tst_true, PROB_LIKELY_MAG(2), COUNT_UNKNOWN);
2786           Node* iftrue = _gvn.transform(new IfTrueNode(iff));
2787           Node* iffalse = _gvn.transform(new IfFalseNode(iff));
2788 
2789           enum { _success_path = 1, _fail_path, _shenandoah_path, PATH_LIMIT };
2790           RegionNode* region = new RegionNode(PATH_LIMIT);
2791           Node*       phi    = new PhiNode(region, TypeInt::BOOL);
2792           // success -> return result of CAS1.
2793           region->init_req(_success_path, iftrue);
2794           phi   ->init_req(_success_path, load_store);
2795 
2796           // failure
2797           set_control(iffalse);
2798 
2799           // if (read_barrier(expected) == read_barrier(old)
2800           oldval = shenandoah_read_barrier(oldval);
2801 
2802           // Load old value from memory. We shuold really use what we get back from the CAS,
2803           // if we can.
2804           Node* current = make_load(control(), adr, TypeInstPtr::BOTTOM, type, MemNode::unordered);
2805           // read_barrier(old)
2806           Node* new_current = shenandoah_read_barrier(current);
2807 
2808           Node* chk = _gvn.transform(new CmpPNode(new_current, oldval));
2809           Node* test = _gvn.transform(new BoolNode(chk, BoolTest::eq));
2810 
2811           IfNode* iff2 = create_and_map_if(control(), test, PROB_UNLIKELY_MAG(2), COUNT_UNKNOWN);
2812           Node* iftrue2 = _gvn.transform(new IfTrueNode(iff2));
2813           Node* iffalse2 = _gvn.transform(new IfFalseNode(iff2));
2814 
2815           // If they are not equal, it's a legitimate failure and we return the result of CAS1.
2816           region->init_req(_fail_path, iffalse2);
2817           phi   ->init_req(_fail_path, load_store);
2818 
2819           // Otherwise we retry with old.
2820           set_control(iftrue2);
2821 
2822           Node *call = make_runtime_call(RC_LEAF | RC_NO_IO,
2823                                          OptoRuntime::shenandoah_cas_obj_Type(),
2824                                          CAST_FROM_FN_PTR(address, ShenandoahRuntime::compare_and_swap_object),
2825                                          "shenandoah_cas_obj",
2826                                          NULL,
2827                                          adr, newval, current);
2828 
2829           Node* retval = _gvn.transform(new ProjNode(call, TypeFunc::Parms + 0));
2830 
2831           region->init_req(_shenandoah_path, control());
2832           phi   ->init_req(_shenandoah_path, retval);
2833 
2834           set_control(_gvn.transform(region));
2835           record_for_igvn(region);
2836           phi = _gvn.transform(phi);
2837           result = phi;
2838         }
2839 
2840       }
2841     }
2842     if (kind == LS_cmpxchg) {
2843       // Emit the post barrier only when the actual store happened.
2844       // This makes sense to check only for compareAndSet that can fail to set the value.
2845       // CAS success path is marked more likely since we anticipate this is a performance
2846       // critical path, while CAS failure path can use the penalty for going through unlikely
2847       // path as backoff. Which is still better than doing a store barrier there.
2848       IdealKit ideal(this);
2849       ideal.if_then(result, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
2850         sync_kit(ideal);
2851         post_barrier(ideal.ctrl(), result, base, adr, alias_idx, newval, T_OBJECT, true);
2852         ideal.sync_kit(this);
2853       } ideal.end_if();
2854       final_sync(ideal);
2855     } else {
2856       post_barrier(control(), result, base, adr, alias_idx, newval, T_OBJECT, true);
2857     }
2858     break;
2859   default:
2860     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2861     break;
2862   }
2863 
2864   // SCMemProjNodes represent the memory state of a LoadStore. Their
2865   // main role is to prevent LoadStore nodes from being optimized away
2866   // when their results aren't used.
2867   Node* proj = _gvn.transform(new SCMemProjNode(load_store));
2868   set_memory(proj, alias_idx);
2869 
2870   if (type == T_OBJECT && kind == LS_xchg) {
2871 #ifdef _LP64
2872     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2873       result = _gvn.transform(new DecodeNNode(result, result->get_ptr_type()));
2874     }
2875 #endif
2876     if (can_move_pre_barrier()) {
2877       // Don't need to load pre_val. The old value is returned by load_store.
2878       // The pre_barrier can execute after the xchg as long as no safepoint
2879       // gets inserted between them.
2880       pre_barrier(false /* do_load */,
2881                   control(), NULL, NULL, max_juint, NULL, NULL,
2882                   result /* pre_val */,
2883                   T_OBJECT);
2884     }
2885   }
2886 
2887   // Add the trailing membar surrounding the access
2888   insert_mem_bar(Op_MemBarCPUOrder);
2889   insert_mem_bar(Op_MemBarAcquire);
2890 
2891   assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2892   set_result(result);
2893   return true;
2894 }
2895 
2896 //----------------------------inline_unsafe_ordered_store----------------------
2897 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2898 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2899 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2900 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2901   // This is another variant of inline_unsafe_access, differing in
2902   // that it always issues store-store ("release") barrier and ensures
2903   // store-atomicity (which only matters for "long").
2904 
2905   if (callee()->is_static())  return false;  // caller must have the capability!
2906 
2907 #ifndef PRODUCT
2908   {
2909     ResourceMark rm;
2910     // Check the signatures.
2911     ciSignature* sig = callee()->signature();
2912 #ifdef ASSERT


2916     assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
2917     assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
2918 #endif // ASSERT
2919   }
2920 #endif //PRODUCT
2921 
2922   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2923 
2924   // Get arguments:
2925   Node* receiver = argument(0);  // type: oop
2926   Node* base     = argument(1);  // type: oop
2927   Node* offset   = argument(2);  // type: long
2928   Node* val      = argument(4);  // type: oop, int, or long
2929 
2930   // Null check receiver.
2931   receiver = null_check(receiver);
2932   if (stopped()) {
2933     return true;
2934   }
2935 
2936   base = shenandoah_write_barrier(base);
2937 
2938   // Build field offset expression.
2939   assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2940   // 32-bit machines ignore the high half of long offsets
2941   offset = ConvL2X(offset);
2942   Node* adr = make_unsafe_address(base, offset);
2943   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2944   const Type *value_type = Type::get_const_basic_type(type);
2945   Compile::AliasType* alias_type = C->alias_type(adr_type);
2946 
2947   insert_mem_bar(Op_MemBarRelease);
2948   insert_mem_bar(Op_MemBarCPUOrder);
2949   // Ensure that the store is atomic for longs:
2950   const bool require_atomic_access = true;
2951   Node* store;
2952   if (type == T_OBJECT) { // reference stores need a store barrier.
2953     val = shenandoah_read_barrier_nomem(val);
2954     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
2955   }
2956   else {
2957     store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
2958   }
2959   insert_mem_bar(Op_MemBarCPUOrder);
2960   return true;
2961 }
2962 
2963 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
2964   // Regardless of form, don't allow previous ld/st to move down,
2965   // then issue acquire, release, or volatile mem_bar.
2966   insert_mem_bar(Op_MemBarCPUOrder);
2967   switch(id) {
2968     case vmIntrinsics::_loadFence:
2969       insert_mem_bar(Op_LoadFence);
2970       return true;
2971     case vmIntrinsics::_storeFence:
2972       insert_mem_bar(Op_StoreFence);
2973       return true;
2974     case vmIntrinsics::_fullFence:
2975       insert_mem_bar(Op_MemBarVolatile);


3257   Node* bits = intcon(modifier_bits);
3258   Node* mbit = _gvn.transform(new AndINode(mods, mask));
3259   Node* cmp  = _gvn.transform(new CmpINode(mbit, bits));
3260   Node* bol  = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3261   return generate_fair_guard(bol, region);
3262 }
3263 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3264   return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3265 }
3266 
3267 //-------------------------inline_native_Class_query-------------------
3268 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3269   const Type* return_type = TypeInt::BOOL;
3270   Node* prim_return_value = top();  // what happens if it's a primitive class?
3271   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3272   bool expect_prim = false;     // most of these guys expect to work on refs
3273 
3274   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3275 
3276   Node* mirror = argument(0);
3277 
3278   if (ShenandoahVerifyReadsToFromSpace) {
3279     mirror = shenandoah_read_barrier(mirror);
3280   }
3281 
3282   Node* obj    = top();
3283 
3284   switch (id) {
3285   case vmIntrinsics::_isInstance:
3286     // nothing is an instance of a primitive type
3287     prim_return_value = intcon(0);
3288     obj = argument(1);
3289     if (ShenandoahVerifyReadsToFromSpace) {
3290       obj = shenandoah_read_barrier(obj);
3291     }
3292     break;
3293   case vmIntrinsics::_getModifiers:
3294     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3295     assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3296     return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3297     break;
3298   case vmIntrinsics::_isInterface:
3299     prim_return_value = intcon(0);
3300     break;
3301   case vmIntrinsics::_isArray:
3302     prim_return_value = intcon(0);
3303     expect_prim = true;  // cf. ObjectStreamClass.getClassSignature
3304     break;
3305   case vmIntrinsics::_isPrimitive:
3306     prim_return_value = intcon(1);
3307     expect_prim = true;  // obviously
3308     break;
3309   case vmIntrinsics::_getSuperclass:
3310     prim_return_value = null();
3311     return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);


3519     PreserveJVMState pjvms(this);
3520     set_control(_gvn.transform(region));
3521     uncommon_trap(Deoptimization::Reason_intrinsic,
3522                   Deoptimization::Action_maybe_recompile);
3523   }
3524   if (!stopped()) {
3525     set_result(res);
3526   }
3527   return true;
3528 }
3529 
3530 
3531 //--------------------------inline_native_subtype_check------------------------
3532 // This intrinsic takes the JNI calls out of the heart of
3533 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3534 bool LibraryCallKit::inline_native_subtype_check() {
3535   // Pull both arguments off the stack.
3536   Node* args[2];                // two java.lang.Class mirrors: superc, subc
3537   args[0] = argument(0);
3538   args[1] = argument(1);
3539 
3540   Node* klasses[2];             // corresponding Klasses: superk, subk
3541   klasses[0] = klasses[1] = top();
3542 
3543   enum {
3544     // A full decision tree on {superc is prim, subc is prim}:
3545     _prim_0_path = 1,           // {P,N} => false
3546                                 // {P,P} & superc!=subc => false
3547     _prim_same_path,            // {P,P} & superc==subc => true
3548     _prim_1_path,               // {N,P} => false
3549     _ref_subtype_path,          // {N,N} & subtype check wins => true
3550     _both_ref_path,             // {N,N} & subtype check loses => false
3551     PATH_LIMIT
3552   };
3553 
3554   RegionNode* region = new RegionNode(PATH_LIMIT);
3555   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3556   record_for_igvn(region);
3557 
3558   const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3559   const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;


3582     region->init_req(prim_path, null_ctl);
3583     if (stopped())  break;
3584     klasses[which_arg] = kls;
3585   }
3586 
3587   if (!stopped()) {
3588     // now we have two reference types, in klasses[0..1]
3589     Node* subk   = klasses[1];  // the argument to isAssignableFrom
3590     Node* superk = klasses[0];  // the receiver
3591     region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3592     // now we have a successful reference subtype check
3593     region->set_req(_ref_subtype_path, control());
3594   }
3595 
3596   // If both operands are primitive (both klasses null), then
3597   // we must return true when they are identical primitives.
3598   // It is convenient to test this after the first null klass check.
3599   set_control(region->in(_prim_0_path)); // go back to first null check
3600   if (!stopped()) {
3601     // Since superc is primitive, make a guard for the superc==subc case.
3602     shenandoah_acmp_barrier(args[0], args[1]);
3603     Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3604     Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3605     generate_guard(bol_eq, region, PROB_FAIR);
3606     if (region->req() == PATH_LIMIT+1) {
3607       // A guard was added.  If the added guard is taken, superc==subc.
3608       region->swap_edges(PATH_LIMIT, _prim_same_path);
3609       region->del_req(PATH_LIMIT);
3610     }
3611     region->set_req(_prim_0_path, control()); // Not equal after all.
3612   }
3613 
3614   // these are the only paths that produce 'true':
3615   phi->set_req(_prim_same_path,   intcon(1));
3616   phi->set_req(_ref_subtype_path, intcon(1));
3617 
3618   // pull together the cases:
3619   assert(region->req() == PATH_LIMIT, "sane region");
3620   for (uint i = 1; i < region->req(); i++) {
3621     Node* ctl = region->in(i);
3622     if (ctl == NULL || ctl == top()) {


3827 
3828     // Bail out if length is negative.
3829     // Without this the new_array would throw
3830     // NegativeArraySizeException but IllegalArgumentException is what
3831     // should be thrown
3832     generate_negative_guard(length, bailout, &length);
3833 
3834     if (bailout->req() > 1) {
3835       PreserveJVMState pjvms(this);
3836       set_control(_gvn.transform(bailout));
3837       uncommon_trap(Deoptimization::Reason_intrinsic,
3838                     Deoptimization::Action_maybe_recompile);
3839     }
3840 
3841     if (!stopped()) {
3842       // How many elements will we copy from the original?
3843       // The answer is MinI(orig_length - start, length).
3844       Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3845       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3846 
3847       original = shenandoah_read_barrier(original);
3848 
3849       // Generate a direct call to the right arraycopy function(s).
3850       // We know the copy is disjoint but we might not know if the
3851       // oop stores need checking.
3852       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3853       // This will fail a store-check if x contains any non-nulls.
3854 
3855       // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3856       // loads/stores but it is legal only if we're sure the
3857       // Arrays.copyOf would succeed. So we need all input arguments
3858       // to the copyOf to be validated, including that the copy to the
3859       // new array won't trigger an ArrayStoreException. That subtype
3860       // check can be optimized if we know something on the type of
3861       // the input array from type speculation.
3862       if (_gvn.type(klass_node)->singleton()) {
3863         ciKlass* subk   = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
3864         ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3865 
3866         int test = C->static_subtype_check(superk, subk);
3867         if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3868           const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();


4010   PhiNode*    result_val = new PhiNode(result_reg, TypeInt::INT);
4011   PhiNode*    result_io  = new PhiNode(result_reg, Type::ABIO);
4012   PhiNode*    result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4013   Node* obj = NULL;
4014   if (!is_static) {
4015     // Check for hashing null object
4016     obj = null_check_receiver();
4017     if (stopped())  return true;        // unconditionally null
4018     result_reg->init_req(_null_path, top());
4019     result_val->init_req(_null_path, top());
4020   } else {
4021     // Do a null check, and return zero if null.
4022     // System.identityHashCode(null) == 0
4023     obj = argument(0);
4024     Node* null_ctl = top();
4025     obj = null_check_oop(obj, &null_ctl);
4026     result_reg->init_req(_null_path, null_ctl);
4027     result_val->init_req(_null_path, _gvn.intcon(0));
4028   }
4029 
4030   if (ShenandoahVerifyReadsToFromSpace) {
4031     obj = shenandoah_read_barrier(obj);
4032   }
4033 
4034   // Unconditionally null?  Then return right away.
4035   if (stopped()) {
4036     set_control( result_reg->in(_null_path));
4037     if (!stopped())
4038       set_result(result_val->in(_null_path));
4039     return true;
4040   }
4041 
4042   // We only go to the fast case code if we pass a number of guards.  The
4043   // paths which do not pass are accumulated in the slow_region.
4044   RegionNode* slow_region = new RegionNode(1);
4045   record_for_igvn(slow_region);
4046 
4047   // If this is a virtual call, we generate a funny guard.  We pull out
4048   // the vtable entry corresponding to hashCode() from the target object.
4049   // If the target method which we are calling happens to be the native
4050   // Object hashCode() method, we pass the guard.  We do not need this
4051   // guard for non-virtual calls -- the caller is known to be the native
4052   // Object hashCode().
4053   if (is_virtual) {


4329 #endif //_LP64
4330 
4331 //----------------------inline_unsafe_copyMemory-------------------------
4332 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4333 bool LibraryCallKit::inline_unsafe_copyMemory() {
4334   if (callee()->is_static())  return false;  // caller must have the capability!
4335   null_check_receiver();  // null-check receiver
4336   if (stopped())  return true;
4337 
4338   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
4339 
4340   Node* src_ptr =         argument(1);   // type: oop
4341   Node* src_off = ConvL2X(argument(2));  // type: long
4342   Node* dst_ptr =         argument(4);   // type: oop
4343   Node* dst_off = ConvL2X(argument(5));  // type: long
4344   Node* size    = ConvL2X(argument(7));  // type: long
4345 
4346   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4347          "fieldOffset must be byte-scaled");
4348 
4349   src_ptr = shenandoah_read_barrier(src_ptr);
4350   dst_ptr = shenandoah_write_barrier(dst_ptr);
4351 
4352   Node* src = make_unsafe_address(src_ptr, src_off);
4353   Node* dst = make_unsafe_address(dst_ptr, dst_off);
4354 
4355   // Conservatively insert a memory barrier on all memory slices.
4356   // Do not let writes of the copy source or destination float below the copy.
4357   insert_mem_bar(Op_MemBarCPUOrder);
4358 
4359   // Call it.  Note that the length argument is not scaled.
4360   make_runtime_call(RC_LEAF|RC_NO_FP,
4361                     OptoRuntime::fast_arraycopy_Type(),
4362                     StubRoutines::unsafe_arraycopy(),
4363                     "unsafe_arraycopy",
4364                     TypeRawPtr::BOTTOM,
4365                     src, dst, size XTOP);
4366 
4367   // Do not let reads of the copy destination float above the copy.
4368   insert_mem_bar(Op_MemBarCPUOrder);
4369 
4370   return true;
4371 }
4372 
4373 //------------------------clone_coping-----------------------------------
4374 // Helper function for inline_native_clone.
4375 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4376   assert(obj_size != NULL, "");
4377   Node* raw_obj = alloc_obj->in(1);
4378   assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4379 
4380   obj = shenandoah_read_barrier(obj);
4381 
4382   AllocateNode* alloc = NULL;
4383   if (ReduceBulkZeroing) {
4384     // We will be completely responsible for initializing this object -
4385     // mark Initialize node as complete.
4386     alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4387     // The object was just allocated - there should be no any stores!
4388     guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4389     // Mark as complete_with_arraycopy so that on AllocateNode
4390     // expansion, we know this AllocateNode is initialized by an array
4391     // copy and a StoreStore barrier exists after the array copy.
4392     alloc->initialization()->set_complete_with_arraycopy();
4393   }
4394 
4395   // Copy the fastest available way.
4396   // TODO: generate fields copies for small objects instead.
4397   Node* src  = obj;
4398   Node* dest = alloc_obj;
4399   Node* size = _gvn.transform(obj_size);
4400 
4401   // Exclude the header but include array length to copy by 8 bytes words.


4419   }
4420   src  = basic_plus_adr(src,  base_off);
4421   dest = basic_plus_adr(dest, base_off);
4422 
4423   // Compute the length also, if needed:
4424   Node* countx = size;
4425   countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
4426   countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong) ));
4427 
4428   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4429 
4430   ArrayCopyNode* ac = ArrayCopyNode::make(this, false, src, NULL, dest, NULL, countx, false);
4431   ac->set_clonebasic();
4432   Node* n = _gvn.transform(ac);
4433   if (n == ac) {
4434     set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
4435   } else {
4436     set_all_memory(n);
4437   }
4438 
4439   if (UseShenandoahGC) {
4440     // Make sure that references in the cloned object are updated for Shenandoah.
4441     make_runtime_call(RC_LEAF|RC_NO_FP,
4442                       OptoRuntime::shenandoah_clone_barrier_Type(),
4443                       CAST_FROM_FN_PTR(address, SharedRuntime::shenandoah_clone_barrier),
4444                       "shenandoah_clone_barrier", TypePtr::BOTTOM,
4445                       alloc_obj);
4446   }
4447 
4448   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4449   if (card_mark) {
4450     assert(!is_array, "");
4451     // Put in store barrier for any and all oops we are sticking
4452     // into this object.  (We could avoid this if we could prove
4453     // that the object type contains no oop fields at all.)
4454     Node* no_particular_value = NULL;
4455     Node* no_particular_field = NULL;
4456     int raw_adr_idx = Compile::AliasIdxRaw;
4457     post_barrier(control(),
4458                  memory(raw_adr_type),
4459                  alloc_obj,
4460                  no_particular_field,
4461                  raw_adr_idx,
4462                  no_particular_value,
4463                  T_OBJECT,
4464                  false);
4465   }
4466 
4467   // Do not let reads from the cloned object float above the arraycopy.


4554 
4555     const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4556     int raw_adr_idx = Compile::AliasIdxRaw;
4557 
4558     Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4559     if (array_ctl != NULL) {
4560       // It's an array.
4561       PreserveJVMState pjvms(this);
4562       set_control(array_ctl);
4563       Node* obj_length = load_array_length(obj);
4564       Node* obj_size  = NULL;
4565       Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
4566 
4567       if (!use_ReduceInitialCardMarks()) {
4568         // If it is an oop array, it requires very special treatment,
4569         // because card marking is required on each card of the array.
4570         Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4571         if (is_obja != NULL) {
4572           PreserveJVMState pjvms2(this);
4573           set_control(is_obja);
4574 
4575           obj = shenandoah_read_barrier(obj);
4576 
4577           // Generate a direct call to the right arraycopy function(s).
4578           Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4579           ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL);
4580           ac->set_cloneoop();
4581           Node* n = _gvn.transform(ac);
4582           assert(n == ac, "cannot disappear");
4583           ac->connect_outputs(this);
4584 
4585           result_reg->init_req(_objArray_path, control());
4586           result_val->init_req(_objArray_path, alloc_obj);
4587           result_i_o ->set_req(_objArray_path, i_o());
4588           result_mem ->set_req(_objArray_path, reset_memory());
4589         }
4590       }
4591       // Otherwise, there are no card marks to worry about.
4592       // (We can dispense with card marks if we know the allocation
4593       //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4594       //  causes the non-eden paths to take compensating steps to
4595       //  simulate a fresh allocation, so that no further
4596       //  card marks are required in compiled code to initialize


4805     _gvn.hash_delete(dest);
4806     dest->set_req(0, control());
4807     Node* destx = _gvn.transform(dest);
4808     assert(destx == dest, "where has the allocation result gone?");
4809   }
4810 }
4811 
4812 
4813 //------------------------------inline_arraycopy-----------------------
4814 // public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
4815 //                                                      Object dest, int destPos,
4816 //                                                      int length);
4817 bool LibraryCallKit::inline_arraycopy() {
4818   // Get the arguments.
4819   Node* src         = argument(0);  // type: oop
4820   Node* src_offset  = argument(1);  // type: int
4821   Node* dest        = argument(2);  // type: oop
4822   Node* dest_offset = argument(3);  // type: int
4823   Node* length      = argument(4);  // type: int
4824 

4825   // Check for allocation before we add nodes that would confuse
4826   // tightly_coupled_allocation()
4827   AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4828 
4829   int saved_reexecute_sp = -1;
4830   JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
4831   // See arraycopy_restore_alloc_state() comment
4832   // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards
4833   // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
4834   // if saved_jvms == NULL and alloc != NULL, we can’t emit any guards
4835   bool can_emit_guards = (alloc == NULL || saved_jvms != NULL);
4836 
4837   // The following tests must be performed
4838   // (1) src and dest are arrays.
4839   // (2) src and dest arrays must have elements of the same BasicType
4840   // (3) src and dest must not be null.
4841   // (4) src_offset must not be negative.
4842   // (5) dest_offset must not be negative.
4843   // (6) length must not be negative.
4844   // (7) src_offset + length must not exceed length of src.


5014       set_control(not_subtype_ctrl);
5015       uncommon_trap(Deoptimization::Reason_intrinsic,
5016                     Deoptimization::Action_make_not_entrant);
5017       assert(stopped(), "Should be stopped");
5018     }
5019     {
5020       PreserveJVMState pjvms(this);
5021       set_control(_gvn.transform(slow_region));
5022       uncommon_trap(Deoptimization::Reason_intrinsic,
5023                     Deoptimization::Action_make_not_entrant);
5024       assert(stopped(), "Should be stopped");
5025     }
5026   }
5027 
5028   arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp);
5029 
5030   if (stopped()) {
5031     return true;
5032   }
5033 
5034   src = shenandoah_read_barrier(src);
5035   dest = shenandoah_write_barrier(dest);
5036 
5037   ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
5038                                           // Create LoadRange and LoadKlass nodes for use during macro expansion here
5039                                           // so the compiler has a chance to eliminate them: during macro expansion,
5040                                           // we have to set their control (CastPP nodes are eliminated).
5041                                           load_object_klass(src), load_object_klass(dest),
5042                                           load_array_length(src), load_array_length(dest));
5043 
5044   ac->set_arraycopy(validated);
5045 
5046   Node* n = _gvn.transform(ac);
5047   if (n == ac) {
5048     ac->connect_outputs(this);
5049   } else {
5050     assert(validated, "shouldn't transform if all arguments not validated");
5051     set_all_memory(n);
5052   }
5053 
5054   return true;
5055 }
5056 
5057 
5058 // Helper function which determines if an arraycopy immediately follows
5059 // an allocation, with no intervening tests or other escapes for the object.
5060 AllocateArrayNode*
5061 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
5062                                            RegionNode* slow_region) {
5063   if (stopped())             return NULL;  // no fast path
5064   if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
5065 
5066   ptr = ShenandoahBarrierNode::skip_through_barrier(ptr);
5067 
5068   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
5069   if (alloc == NULL)  return NULL;
5070 
5071   Node* rawmem = memory(Compile::AliasIdxRaw);
5072   // Is the allocation's memory state untouched?
5073   if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
5074     // Bail out if there have been raw-memory effects since the allocation.
5075     // (Example:  There might have been a call or safepoint.)
5076     return NULL;
5077   }
5078   rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
5079   if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
5080     return NULL;
5081   }
5082 
5083   // There must be no unexpected observers of this allocation.
5084   for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
5085     Node* obs = ptr->fast_out(i);
5086     if (obs != this->map()) {
5087       return NULL;


5127 
5128   // If we get this far, we have an allocation which immediately
5129   // precedes the arraycopy, and we can take over zeroing the new object.
5130   // The arraycopy will finish the initialization, and provide
5131   // a new control state to which we will anchor the destination pointer.
5132 
5133   return alloc;
5134 }
5135 
5136 //-------------inline_encodeISOArray-----------------------------------
5137 // encode char[] to byte[] in ISO_8859_1
5138 bool LibraryCallKit::inline_encodeISOArray() {
5139   assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
5140   // no receiver since it is static method
5141   Node *src         = argument(0);
5142   Node *src_offset  = argument(1);
5143   Node *dst         = argument(2);
5144   Node *dst_offset  = argument(3);
5145   Node *length      = argument(4);
5146 
5147   src = shenandoah_read_barrier(src);
5148   dst = shenandoah_write_barrier(dst);
5149 
5150   const Type* src_type = src->Value(&_gvn);
5151   const Type* dst_type = dst->Value(&_gvn);
5152   const TypeAryPtr* top_src = src_type->isa_aryptr();
5153   const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5154   if (top_src  == NULL || top_src->klass()  == NULL ||
5155       top_dest == NULL || top_dest->klass() == NULL) {
5156     // failed array check
5157     return false;
5158   }
5159 
5160   // Figure out the size and type of the elements we will be copying.
5161   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5162   BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5163   if (src_elem != T_CHAR || dst_elem != T_BYTE) {
5164     return false;
5165   }
5166   Node* src_start = array_element_address(src, src_offset, src_elem);
5167   Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5168   // 'src_start' points to src array + scaled offset
5169   // 'dst_start' points to dst array + scaled offset


5179 
5180 //-------------inline_multiplyToLen-----------------------------------
5181 bool LibraryCallKit::inline_multiplyToLen() {
5182   assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
5183 
5184   address stubAddr = StubRoutines::multiplyToLen();
5185   if (stubAddr == NULL) {
5186     return false; // Intrinsic's stub is not implemented on this platform
5187   }
5188   const char* stubName = "multiplyToLen";
5189 
5190   assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
5191 
5192   // no receiver because it is a static method
5193   Node* x    = argument(0);
5194   Node* xlen = argument(1);
5195   Node* y    = argument(2);
5196   Node* ylen = argument(3);
5197   Node* z    = argument(4);
5198 
5199   x = shenandoah_read_barrier(x);
5200   y = shenandoah_read_barrier(y);
5201   z = shenandoah_write_barrier(z);
5202 
5203   const Type* x_type = x->Value(&_gvn);
5204   const Type* y_type = y->Value(&_gvn);
5205   const TypeAryPtr* top_x = x_type->isa_aryptr();
5206   const TypeAryPtr* top_y = y_type->isa_aryptr();
5207   if (top_x  == NULL || top_x->klass()  == NULL ||
5208       top_y == NULL || top_y->klass() == NULL) {
5209     // failed array check
5210     return false;
5211   }
5212 
5213   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5214   BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5215   if (x_elem != T_INT || y_elem != T_INT) {
5216     return false;
5217   }
5218 
5219   // Set the original stack and the reexecute bit for the interpreter to reexecute
5220   // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
5221   // on the return from z array allocation in runtime.
5222   { PreserveReexecuteState preexecs(this);


5283   return true;
5284 }
5285 
5286 //-------------inline_squareToLen------------------------------------
5287 bool LibraryCallKit::inline_squareToLen() {
5288   assert(UseSquareToLenIntrinsic, "not implementated on this platform");
5289 
5290   address stubAddr = StubRoutines::squareToLen();
5291   if (stubAddr == NULL) {
5292     return false; // Intrinsic's stub is not implemented on this platform
5293   }
5294   const char* stubName = "squareToLen";
5295 
5296   assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5297 
5298   Node* x    = argument(0);
5299   Node* len  = argument(1);
5300   Node* z    = argument(2);
5301   Node* zlen = argument(3);
5302 
5303   x = shenandoah_read_barrier(x);
5304   z = shenandoah_write_barrier(z);
5305 
5306   const Type* x_type = x->Value(&_gvn);
5307   const Type* z_type = z->Value(&_gvn);
5308   const TypeAryPtr* top_x = x_type->isa_aryptr();
5309   const TypeAryPtr* top_z = z_type->isa_aryptr();
5310   if (top_x  == NULL || top_x->klass()  == NULL ||
5311       top_z  == NULL || top_z->klass()  == NULL) {
5312     // failed array check
5313     return false;
5314   }
5315 
5316   BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5317   BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5318   if (x_elem != T_INT || z_elem != T_INT) {
5319     return false;
5320   }
5321 
5322 
5323   Node* x_start = array_element_address(x, intcon(0), x_elem);
5324   Node* z_start = array_element_address(z, intcon(0), z_elem);
5325 


5333 }
5334 
5335 //-------------inline_mulAdd------------------------------------------
5336 bool LibraryCallKit::inline_mulAdd() {
5337   assert(UseMulAddIntrinsic, "not implementated on this platform");
5338 
5339   address stubAddr = StubRoutines::mulAdd();
5340   if (stubAddr == NULL) {
5341     return false; // Intrinsic's stub is not implemented on this platform
5342   }
5343   const char* stubName = "mulAdd";
5344 
5345   assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5346 
5347   Node* out      = argument(0);
5348   Node* in       = argument(1);
5349   Node* offset   = argument(2);
5350   Node* len      = argument(3);
5351   Node* k        = argument(4);
5352 
5353   in = shenandoah_read_barrier(in);
5354   out = shenandoah_write_barrier(out);
5355 
5356   const Type* out_type = out->Value(&_gvn);
5357   const Type* in_type = in->Value(&_gvn);
5358   const TypeAryPtr* top_out = out_type->isa_aryptr();
5359   const TypeAryPtr* top_in = in_type->isa_aryptr();
5360   if (top_out  == NULL || top_out->klass()  == NULL ||
5361       top_in == NULL || top_in->klass() == NULL) {
5362     // failed array check
5363     return false;
5364   }
5365 
5366   BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5367   BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5368   if (out_elem != T_INT || in_elem != T_INT) {
5369     return false;
5370   }
5371 
5372   Node* outlen = load_array_length(out);
5373   Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
5374   Node* out_start = array_element_address(out, intcon(0), out_elem);
5375   Node* in_start = array_element_address(in, intcon(0), in_elem);


5385 
5386 //-------------inline_montgomeryMultiply-----------------------------------
5387 bool LibraryCallKit::inline_montgomeryMultiply() {
5388   address stubAddr = StubRoutines::montgomeryMultiply();
5389   if (stubAddr == NULL) {
5390     return false; // Intrinsic's stub is not implemented on this platform
5391   }
5392 
5393   assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
5394   const char* stubName = "montgomery_square";
5395 
5396   assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
5397 
5398   Node* a    = argument(0);
5399   Node* b    = argument(1);
5400   Node* n    = argument(2);
5401   Node* len  = argument(3);
5402   Node* inv  = argument(4);
5403   Node* m    = argument(6);
5404 
5405   a = shenandoah_read_barrier(a);
5406   b = shenandoah_read_barrier(b);
5407   n = shenandoah_read_barrier(n);
5408   m = shenandoah_write_barrier(m);
5409 
5410   const Type* a_type = a->Value(&_gvn);
5411   const TypeAryPtr* top_a = a_type->isa_aryptr();
5412   const Type* b_type = b->Value(&_gvn);
5413   const TypeAryPtr* top_b = b_type->isa_aryptr();
5414   const Type* n_type = a->Value(&_gvn);
5415   const TypeAryPtr* top_n = n_type->isa_aryptr();
5416   const Type* m_type = a->Value(&_gvn);
5417   const TypeAryPtr* top_m = m_type->isa_aryptr();
5418   if (top_a  == NULL || top_a->klass()  == NULL ||
5419       top_b == NULL || top_b->klass()  == NULL ||
5420       top_n == NULL || top_n->klass()  == NULL ||
5421       top_m == NULL || top_m->klass()  == NULL) {
5422     // failed array check
5423     return false;
5424   }
5425 
5426   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5427   BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5428   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5429   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();


5449   return true;
5450 }
5451 
5452 bool LibraryCallKit::inline_montgomerySquare() {
5453   address stubAddr = StubRoutines::montgomerySquare();
5454   if (stubAddr == NULL) {
5455     return false; // Intrinsic's stub is not implemented on this platform
5456   }
5457 
5458   assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
5459   const char* stubName = "montgomery_square";
5460 
5461   assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
5462 
5463   Node* a    = argument(0);
5464   Node* n    = argument(1);
5465   Node* len  = argument(2);
5466   Node* inv  = argument(3);
5467   Node* m    = argument(5);
5468 
5469   a = shenandoah_read_barrier(a);
5470   n = shenandoah_read_barrier(n);
5471   m = shenandoah_write_barrier(m);
5472 
5473   const Type* a_type = a->Value(&_gvn);
5474   const TypeAryPtr* top_a = a_type->isa_aryptr();
5475   const Type* n_type = a->Value(&_gvn);
5476   const TypeAryPtr* top_n = n_type->isa_aryptr();
5477   const Type* m_type = a->Value(&_gvn);
5478   const TypeAryPtr* top_m = m_type->isa_aryptr();
5479   if (top_a  == NULL || top_a->klass()  == NULL ||
5480       top_n == NULL || top_n->klass()  == NULL ||
5481       top_m == NULL || top_m->klass()  == NULL) {
5482     // failed array check
5483     return false;
5484   }
5485 
5486   BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5487   BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5488   BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5489   if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5490     return false;
5491   }
5492 


5539   crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
5540   result = _gvn.transform(new XorINode(crc, result));
5541   result = _gvn.transform(new XorINode(result, M1));
5542   set_result(result);
5543   return true;
5544 }
5545 
5546 /**
5547  * Calculate CRC32 for byte[] array.
5548  * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
5549  */
5550 bool LibraryCallKit::inline_updateBytesCRC32() {
5551   assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5552   assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5553   // no receiver since it is static method
5554   Node* crc     = argument(0); // type: int
5555   Node* src     = argument(1); // type: oop
5556   Node* offset  = argument(2); // type: int
5557   Node* length  = argument(3); // type: int
5558 
5559   src = shenandoah_read_barrier(src);
5560 
5561   const Type* src_type = src->Value(&_gvn);
5562   const TypeAryPtr* top_src = src_type->isa_aryptr();
5563   if (top_src  == NULL || top_src->klass()  == NULL) {
5564     // failed array check
5565     return false;
5566   }
5567 
5568   // Figure out the size and type of the elements we will be copying.
5569   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5570   if (src_elem != T_BYTE) {
5571     return false;
5572   }
5573 
5574   // 'src_start' points to src array + scaled offset
5575   Node* src_start = array_element_address(src, offset, src_elem);
5576 
5577   // We assume that range check is done by caller.
5578   // TODO: generate range check (offset+length < src.length) in debug VM.
5579 
5580   // Call the stub.


5643   Node* src     = argument(1); // type: oop
5644   Node* offset  = argument(2); // type: int
5645   Node* end     = argument(3); // type: int
5646 
5647   Node* length = _gvn.transform(new SubINode(end, offset));
5648 
5649   const Type* src_type = src->Value(&_gvn);
5650   const TypeAryPtr* top_src = src_type->isa_aryptr();
5651   if (top_src  == NULL || top_src->klass()  == NULL) {
5652     // failed array check
5653     return false;
5654   }
5655 
5656   // Figure out the size and type of the elements we will be copying.
5657   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5658   if (src_elem != T_BYTE) {
5659     return false;
5660   }
5661 
5662   // 'src_start' points to src array + scaled offset
5663   src = shenandoah_read_barrier(src);
5664   Node* src_start = array_element_address(src, offset, src_elem);
5665 
5666   // static final int[] byteTable in class CRC32C
5667   Node* table = get_table_from_crc32c_class(callee()->holder());
5668   table = shenandoah_read_barrier(table);
5669   Node* table_start = array_element_address(table, intcon(0), T_INT);
5670 
5671   // We assume that range check is done by caller.
5672   // TODO: generate range check (offset+length < src.length) in debug VM.
5673 
5674   // Call the stub.
5675   address stubAddr = StubRoutines::updateBytesCRC32C();
5676   const char *stubName = "updateBytesCRC32C";
5677 
5678   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5679                                  stubAddr, stubName, TypePtr::BOTTOM,
5680                                  crc, src_start, length, table_start);
5681   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5682   set_result(result);
5683   return true;
5684 }
5685 
5686 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
5687 //
5688 // Calculate CRC32C for DirectByteBuffer.


5692   assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5693   assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
5694   assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5695   // no receiver since it is a static method
5696   Node* crc     = argument(0); // type: int
5697   Node* src     = argument(1); // type: long
5698   Node* offset  = argument(3); // type: int
5699   Node* end     = argument(4); // type: int
5700 
5701   Node* length = _gvn.transform(new SubINode(end, offset));
5702 
5703   src = ConvL2X(src);  // adjust Java long to machine word
5704   Node* base = _gvn.transform(new CastX2PNode(src));
5705   offset = ConvI2X(offset);
5706 
5707   // 'src_start' points to src array + scaled offset
5708   Node* src_start = basic_plus_adr(top(), base, offset);
5709 
5710   // static final int[] byteTable in class CRC32C
5711   Node* table = get_table_from_crc32c_class(callee()->holder());
5712   table = shenandoah_read_barrier(table);
5713   Node* table_start = array_element_address(table, intcon(0), T_INT);
5714 
5715   // Call the stub.
5716   address stubAddr = StubRoutines::updateBytesCRC32C();
5717   const char *stubName = "updateBytesCRC32C";
5718 
5719   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5720                                  stubAddr, stubName, TypePtr::BOTTOM,
5721                                  crc, src_start, length, table_start);
5722   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5723   set_result(result);
5724   return true;
5725 }
5726 
5727 //------------------------------inline_updateBytesAdler32----------------------
5728 //
5729 // Calculate Adler32 checksum for byte[] array.
5730 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
5731 //
5732 bool LibraryCallKit::inline_updateBytesAdler32() {


5736   // no receiver since it is static method
5737   Node* crc     = argument(0); // type: int
5738   Node* src     = argument(1); // type: oop
5739   Node* offset  = argument(2); // type: int
5740   Node* length  = argument(3); // type: int
5741 
5742   const Type* src_type = src->Value(&_gvn);
5743   const TypeAryPtr* top_src = src_type->isa_aryptr();
5744   if (top_src  == NULL || top_src->klass()  == NULL) {
5745     // failed array check
5746     return false;
5747   }
5748 
5749   // Figure out the size and type of the elements we will be copying.
5750   BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5751   if (src_elem != T_BYTE) {
5752     return false;
5753   }
5754 
5755   // 'src_start' points to src array + scaled offset
5756   src = shenandoah_read_barrier(src);
5757   Node* src_start = array_element_address(src, offset, src_elem);
5758 
5759   // We assume that range check is done by caller.
5760   // TODO: generate range check (offset+length < src.length) in debug VM.
5761 
5762   // Call the stub.
5763   address stubAddr = StubRoutines::updateBytesAdler32();
5764   const char *stubName = "updateBytesAdler32";
5765 
5766   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5767                                  stubAddr, stubName, TypePtr::BOTTOM,
5768                                  crc, src_start, length);
5769   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5770   set_result(result);
5771   return true;
5772 }
5773 
5774 //------------------------------inline_updateByteBufferAdler32---------------
5775 //
5776 // Calculate Adler32 checksum for DirectByteBuffer.


5799 
5800   Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5801                                  stubAddr, stubName, TypePtr::BOTTOM,
5802                                  crc, src_start, length);
5803 
5804   Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5805   set_result(result);
5806   return true;
5807 }
5808 
5809 //----------------------------inline_reference_get----------------------------
5810 // public T java.lang.ref.Reference.get();
5811 bool LibraryCallKit::inline_reference_get() {
5812   const int referent_offset = java_lang_ref_Reference::referent_offset;
5813   guarantee(referent_offset > 0, "should have already been set");
5814 
5815   // Get the argument:
5816   Node* reference_obj = null_check_receiver();
5817   if (stopped()) return true;
5818 
5819   if (ShenandoahVerifyReadsToFromSpace) {
5820     reference_obj = shenandoah_read_barrier(reference_obj);
5821   }
5822 
5823   Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
5824 
5825   ciInstanceKlass* klass = env()->Object_klass();
5826   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5827 
5828   Node* no_ctrl = NULL;
5829   Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
5830 
5831   // Use the pre-barrier to record the value in the referent field
5832   pre_barrier(false /* do_load */,
5833               control(),
5834               NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
5835               result /* pre_val */,
5836               T_OBJECT);
5837 
5838   // Add memory barrier to prevent commoning reads from this field
5839   // across safepoint since GC can change its value.
5840   insert_mem_bar(Op_MemBarCPUOrder);
5841 
5842   set_result(result);


5851     const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5852     assert(tinst != NULL, "obj is null");
5853     assert(tinst->klass()->is_loaded(), "obj is not loaded");
5854     assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5855     fromKls = tinst->klass()->as_instance_klass();
5856   } else {
5857     assert(is_static, "only for static field access");
5858   }
5859   ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
5860                                               ciSymbol::make(fieldTypeString),
5861                                               is_static);
5862 
5863   assert (field != NULL, "undefined field");
5864   if (field == NULL) return (Node *) NULL;
5865 
5866   if (is_static) {
5867     const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
5868     fromObj = makecon(tip);
5869   }
5870 
5871   fromObj = shenandoah_read_barrier(fromObj);
5872 
5873   // Next code  copied from Parse::do_get_xxx():
5874 
5875   // Compute address and memory type.
5876   int offset  = field->offset_in_bytes();
5877   bool is_vol = field->is_volatile();
5878   ciType* field_klass = field->type();
5879   assert(field_klass->is_loaded(), "should be loaded");
5880   const TypePtr* adr_type = C->alias_type(field)->adr_type();
5881   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5882   BasicType bt = field->layout_type();
5883 
5884   // Build the resultant type of the load
5885   const Type *type;
5886   if (bt == T_OBJECT) {
5887     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5888   } else {
5889     type = Type::get_const_basic_type(bt);
5890   }
5891 
5892   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {


5913   assert(UseAES, "need AES instruction support");
5914 
5915   switch(id) {
5916   case vmIntrinsics::_aescrypt_encryptBlock:
5917     stubAddr = StubRoutines::aescrypt_encryptBlock();
5918     stubName = "aescrypt_encryptBlock";
5919     break;
5920   case vmIntrinsics::_aescrypt_decryptBlock:
5921     stubAddr = StubRoutines::aescrypt_decryptBlock();
5922     stubName = "aescrypt_decryptBlock";
5923     break;
5924   }
5925   if (stubAddr == NULL) return false;
5926 
5927   Node* aescrypt_object = argument(0);
5928   Node* src             = argument(1);
5929   Node* src_offset      = argument(2);
5930   Node* dest            = argument(3);
5931   Node* dest_offset     = argument(4);
5932 
5933   // Resolve src and dest arrays for ShenandoahGC.
5934   src = shenandoah_read_barrier(src);
5935   dest = shenandoah_write_barrier(dest);
5936 
5937   // (1) src and dest are arrays.
5938   const Type* src_type = src->Value(&_gvn);
5939   const Type* dest_type = dest->Value(&_gvn);
5940   const TypeAryPtr* top_src = src_type->isa_aryptr();
5941   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5942   assert (top_src  != NULL && top_src->klass()  != NULL &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5943 
5944   // for the quick and dirty code we will skip all the checks.
5945   // we are just trying to get the call to be generated.
5946   Node* src_start  = src;
5947   Node* dest_start = dest;
5948   if (src_offset != NULL || dest_offset != NULL) {
5949     assert(src_offset != NULL && dest_offset != NULL, "");
5950     src_start  = array_element_address(src,  src_offset,  T_BYTE);
5951     dest_start = array_element_address(dest, dest_offset, T_BYTE);
5952   }
5953 
5954   // now need to get the start of its expanded key array
5955   // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5956   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);


5985 
5986   switch(id) {
5987   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
5988     stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
5989     stubName = "cipherBlockChaining_encryptAESCrypt";
5990     break;
5991   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
5992     stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
5993     stubName = "cipherBlockChaining_decryptAESCrypt";
5994     break;
5995   }
5996   if (stubAddr == NULL) return false;
5997 
5998   Node* cipherBlockChaining_object = argument(0);
5999   Node* src                        = argument(1);
6000   Node* src_offset                 = argument(2);
6001   Node* len                        = argument(3);
6002   Node* dest                       = argument(4);
6003   Node* dest_offset                = argument(5);
6004 
6005   // Resolve src and dest arrays for ShenandoahGC.
6006   src = shenandoah_read_barrier(src);
6007   dest = shenandoah_write_barrier(dest);
6008 
6009   // (1) src and dest are arrays.
6010   const Type* src_type = src->Value(&_gvn);
6011   const Type* dest_type = dest->Value(&_gvn);
6012   const TypeAryPtr* top_src = src_type->isa_aryptr();
6013   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6014   assert (top_src  != NULL && top_src->klass()  != NULL
6015           &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6016 
6017   // checks are the responsibility of the caller
6018   Node* src_start  = src;
6019   Node* dest_start = dest;
6020   if (src_offset != NULL || dest_offset != NULL) {
6021     assert(src_offset != NULL && dest_offset != NULL, "");
6022     src_start  = array_element_address(src,  src_offset,  T_BYTE);
6023     dest_start = array_element_address(dest, dest_offset, T_BYTE);
6024   }
6025 
6026   // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6027   // (because of the predicated logic executed earlier).
6028   // so we cast it here safely.


6033 
6034   // cast it to what we know it will be at runtime
6035   const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
6036   assert(tinst != NULL, "CBC obj is null");
6037   assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
6038   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6039   assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6040 
6041   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6042   const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6043   const TypeOopPtr* xtype = aklass->as_instance_type();
6044   Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
6045   aescrypt_object = _gvn.transform(aescrypt_object);
6046 
6047   // we need to get the start of the aescrypt_object's expanded key array
6048   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6049   if (k_start == NULL) return false;
6050 
6051   // similarly, get the start address of the r vector
6052   Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
6053 
6054   objRvec = shenandoah_write_barrier(objRvec);
6055 
6056   if (objRvec == NULL) return false;
6057   Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
6058 
6059   Node* cbcCrypt;
6060   if (Matcher::pass_original_key_for_aes()) {
6061     // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
6062     // compatibility issues between Java key expansion and SPARC crypto instructions
6063     Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
6064     if (original_k_start == NULL) return false;
6065 
6066     // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
6067     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6068                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6069                                  stubAddr, stubName, TypePtr::BOTTOM,
6070                                  src_start, dest_start, k_start, r_start, len, original_k_start);
6071   } else {
6072     // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6073     cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6074                                  OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6075                                  stubAddr, stubName, TypePtr::BOTTOM,
6076                                  src_start, dest_start, k_start, r_start, len);
6077   }
6078 
6079   // return cipher length (int)
6080   Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
6081   set_result(retvalue);
6082   return true;
6083 }
6084 
6085 //------------------------------get_key_start_from_aescrypt_object-----------------------
6086 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6087   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
6088   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6089   if (objAESCryptKey == NULL) return (Node *) NULL;
6090 
6091   objAESCryptKey = shenandoah_read_barrier(objAESCryptKey);
6092 
6093   // now have the array, need to get the start address of the K array
6094   Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6095   return k_start;
6096 }
6097 
6098 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
6099 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6100   Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6101   assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6102   if (objAESCryptKey == NULL) return (Node *) NULL;
6103 
6104   // now have the array, need to get the start address of the lastKey array
6105   Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6106   return original_k_start;
6107 }
6108 
6109 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6110 // Return node representing slow path of predicate check.
6111 // the pseudo code we want to emulate with this predicate is:


< prev index next >