< prev index next >

src/share/vm/opto/library_call.cpp

Print this page




2695   // from this field across safepoint since GC can change its value.
2696   bool need_read_barrier = !is_native_ptr && !is_store &&
2697                            offset != top() && heap_base_oop != top();
2698 
2699   if (!is_store && type == T_OBJECT) {
2700     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2701     if (tjp != NULL) {
2702       value_type = tjp;
2703     }
2704   }
2705 
2706   receiver = null_check(receiver);
2707   if (stopped()) {
2708     return true;
2709   }
2710   // Heap pointers get a null-check from the interpreter,
2711   // as a courtesy.  However, this is not guaranteed by Unsafe,
2712   // and it is not possible to fully distinguish unintended nulls
2713   // from intended ones in this API.
2714 



2715   if (is_volatile) {
2716     // We need to emit leading and trailing CPU membars (see below) in
2717     // addition to memory membars when is_volatile. This is a little
2718     // too strong, but avoids the need to insert per-alias-type
2719     // volatile membars (for stores; compare Parse::do_put_xxx), which
2720     // we cannot do effectively here because we probably only have a
2721     // rough approximation of type.
2722     need_mem_bar = true;
2723     // For Stores, place a memory ordering barrier now.
2724     if (is_store) {
2725       insert_mem_bar(Op_MemBarRelease);
2726     } else {
2727       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2728         insert_mem_bar(Op_MemBarVolatile);
2729       }
2730     }
2731   }
2732 
2733   // Memory barrier to prevent normal and 'unsafe' accesses from
2734   // bypassing each other.  Happens after null checks, so the
2735   // exception paths do not take memory state from the memory barrier,
2736   // so there's no problems making a strong assert about mixing users
2737   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2738   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2739   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2740 
2741   if (!is_store) {
2742     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2743     // To be valid, unsafe loads may depend on other conditions than
2744     // the one that guards them: pin the Load node
2745     Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2746     // load value
2747     switch (type) {
2748     case T_BOOLEAN:
2749     case T_CHAR:
2750     case T_BYTE:
2751     case T_SHORT:
2752     case T_INT:
2753     case T_LONG:
2754     case T_FLOAT:
2755     case T_DOUBLE:
2756       break;
2757     case T_OBJECT:
2758       if (need_read_barrier) {
2759         insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2760       }
2761       break;
2762     case T_ADDRESS:
2763       // Cast to an int type.
2764       p = _gvn.transform(new (C) CastP2XNode(NULL, p));
2765       p = ConvX2UL(p);
2766       break;
2767     default:
2768       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2769       break;
2770     }
2771     // The load node has the control of the preceding MemBarCPUOrder.  All
2772     // following nodes will have the control of the MemBarCPUOrder inserted at
2773     // the end of this method.  So, pushing the load onto the stack at a later
2774     // point is fine.
2775     set_result(p);
2776   } else {
2777     // place effect of store into memory
2778     switch (type) {
2779     case T_DOUBLE:
2780       val = dstore_rounding(val);
2781       break;
2782     case T_ADDRESS:
2783       // Repackage the long as a pointer.
2784       val = ConvL2X(val);
2785       val = _gvn.transform(new (C) CastX2PNode(val));
2786       break;
2787     }
2788 
2789     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2790     if (type == T_OBJECT ) {
2791       (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2792     } else {
2793       (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2794     }
2795   }
2796 
2797   if (is_volatile) {
2798     if (!is_store) {
2799       insert_mem_bar(Op_MemBarAcquire);

2800     } else {
2801       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2802         insert_mem_bar(Op_MemBarVolatile);

2803       }
2804     }
2805   }
2806 
2807   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2808 
2809   return true;
2810 }
2811 
2812 //----------------------------inline_unsafe_prefetch----------------------------
2813 
2814 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2815 #ifndef PRODUCT
2816   {
2817     ResourceMark rm;
2818     // Check the signatures.
2819     ciSignature* sig = callee()->signature();
2820 #ifdef ASSERT
2821     // Object getObject(Object base, int/long offset), etc.
2822     BasicType rtype = sig->return_type()->basic_type();


2982   if (kind == LS_xchg && type == T_OBJECT) {
2983     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2984     if (tjp != NULL) {
2985       value_type = tjp;
2986     }
2987   }
2988 
2989   // Null check receiver.
2990   receiver = null_check(receiver);
2991   if (stopped()) {
2992     return true;
2993   }
2994 
2995   int alias_idx = C->get_alias_index(adr_type);
2996 
2997   // Memory-model-wise, a LoadStore acts like a little synchronized
2998   // block, so needs barriers on each side.  These don't translate
2999   // into actual barriers on most machines, but we still need rest of
3000   // compiler to respect ordering.
3001 
3002   insert_mem_bar(Op_MemBarRelease);
3003   insert_mem_bar(Op_MemBarCPUOrder);
3004 
3005   // 4984716: MemBars must be inserted before this
3006   //          memory node in order to avoid a false
3007   //          dependency which will confuse the scheduler.
3008   Node *mem = memory(alias_idx);
3009 
3010   // For now, we handle only those cases that actually exist: ints,
3011   // longs, and Object. Adding others should be straightforward.
3012   Node* load_store = NULL;
3013   switch(type) {
3014   case T_INT:
3015     if (kind == LS_xadd) {
3016       load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3017     } else if (kind == LS_xchg) {
3018       load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3019     } else if (kind == LS_cmpxchg) {
3020       load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3021     } else {
3022       ShouldNotReachHere();


3081       if (kind == LS_xchg) {
3082         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3083       } else {
3084         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3085         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3086       }
3087     }
3088     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3089     break;
3090   default:
3091     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3092     break;
3093   }
3094 
3095   // SCMemProjNodes represent the memory state of a LoadStore. Their
3096   // main role is to prevent LoadStore nodes from being optimized away
3097   // when their results aren't used.
3098   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3099   set_memory(proj, alias_idx);
3100 


3101   if (type == T_OBJECT && kind == LS_xchg) {
3102 #ifdef _LP64
3103     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3104       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3105     }
3106 #endif
3107     if (can_move_pre_barrier()) {
3108       // Don't need to load pre_val. The old value is returned by load_store.
3109       // The pre_barrier can execute after the xchg as long as no safepoint
3110       // gets inserted between them.
3111       pre_barrier(false /* do_load */,
3112                   control(), NULL, NULL, max_juint, NULL, NULL,
3113                   load_store /* pre_val */,
3114                   T_OBJECT);
3115     }
3116   }
3117 
3118   // Add the trailing membar surrounding the access
3119   insert_mem_bar(Op_MemBarCPUOrder);
3120   insert_mem_bar(Op_MemBarAcquire);

3121 
3122   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3123   set_result(load_store);
3124   return true;
3125 }
3126 
3127 //----------------------------inline_unsafe_ordered_store----------------------
3128 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3129 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3130 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3131 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3132   // This is another variant of inline_unsafe_access, differing in
3133   // that it always issues store-store ("release") barrier and ensures
3134   // store-atomicity (which only matters for "long").
3135 
3136   if (callee()->is_static())  return false;  // caller must have the capability!
3137 
3138 #ifndef PRODUCT
3139   {
3140     ResourceMark rm;


6340 
6341   // Next code  copied from Parse::do_get_xxx():
6342 
6343   // Compute address and memory type.
6344   int offset  = field->offset_in_bytes();
6345   bool is_vol = field->is_volatile();
6346   ciType* field_klass = field->type();
6347   assert(field_klass->is_loaded(), "should be loaded");
6348   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6349   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6350   BasicType bt = field->layout_type();
6351 
6352   // Build the resultant type of the load
6353   const Type *type;
6354   if (bt == T_OBJECT) {
6355     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6356   } else {
6357     type = Type::get_const_basic_type(bt);
6358   }
6359 

6360   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6361     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6362   }
6363   // Build the load.
6364   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6365   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6366   // If reference is volatile, prevent following memory ops from
6367   // floating up past the volatile read.  Also prevents commoning
6368   // another volatile read.
6369   if (is_vol) {
6370     // Memory barrier includes bogus read of value to force load BEFORE membar
6371     insert_mem_bar(Op_MemBarAcquire, loadedField);

6372   }
6373   return loadedField;
6374 }
6375 
6376 
6377 //------------------------------inline_aescrypt_Block-----------------------
6378 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6379   address stubAddr = NULL;
6380   const char *stubName;
6381   assert(UseAES, "need AES instruction support");
6382 
6383   switch(id) {
6384   case vmIntrinsics::_aescrypt_encryptBlock:
6385     stubAddr = StubRoutines::aescrypt_encryptBlock();
6386     stubName = "aescrypt_encryptBlock";
6387     break;
6388   case vmIntrinsics::_aescrypt_decryptBlock:
6389     stubAddr = StubRoutines::aescrypt_decryptBlock();
6390     stubName = "aescrypt_decryptBlock";
6391     break;




2695   // from this field across safepoint since GC can change its value.
2696   bool need_read_barrier = !is_native_ptr && !is_store &&
2697                            offset != top() && heap_base_oop != top();
2698 
2699   if (!is_store && type == T_OBJECT) {
2700     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2701     if (tjp != NULL) {
2702       value_type = tjp;
2703     }
2704   }
2705 
2706   receiver = null_check(receiver);
2707   if (stopped()) {
2708     return true;
2709   }
2710   // Heap pointers get a null-check from the interpreter,
2711   // as a courtesy.  However, this is not guaranteed by Unsafe,
2712   // and it is not possible to fully distinguish unintended nulls
2713   // from intended ones in this API.
2714 
2715   Node* load = NULL;
2716   Node* store = NULL;
2717   Node* leading_membar = NULL;
2718   if (is_volatile) {
2719     // We need to emit leading and trailing CPU membars (see below) in
2720     // addition to memory membars when is_volatile. This is a little
2721     // too strong, but avoids the need to insert per-alias-type
2722     // volatile membars (for stores; compare Parse::do_put_xxx), which
2723     // we cannot do effectively here because we probably only have a
2724     // rough approximation of type.
2725     need_mem_bar = true;
2726     // For Stores, place a memory ordering barrier now.
2727     if (is_store) {
2728       leading_membar = insert_mem_bar(Op_MemBarRelease);
2729     } else {
2730       if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2731         leading_membar = insert_mem_bar(Op_MemBarVolatile);
2732       }
2733     }
2734   }
2735 
2736   // Memory barrier to prevent normal and 'unsafe' accesses from
2737   // bypassing each other.  Happens after null checks, so the
2738   // exception paths do not take memory state from the memory barrier,
2739   // so there's no problems making a strong assert about mixing users
2740   // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2741   // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2742   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2743 
2744   if (!is_store) {
2745     MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
2746     // To be valid, unsafe loads may depend on other conditions than
2747     // the one that guards them: pin the Load node
2748     load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
2749     // load value
2750     switch (type) {
2751     case T_BOOLEAN:
2752     case T_CHAR:
2753     case T_BYTE:
2754     case T_SHORT:
2755     case T_INT:
2756     case T_LONG:
2757     case T_FLOAT:
2758     case T_DOUBLE:
2759       break;
2760     case T_OBJECT:
2761       if (need_read_barrier) {
2762         insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar));
2763       }
2764       break;
2765     case T_ADDRESS:
2766       // Cast to an int type.
2767       load = _gvn.transform(new (C) CastP2XNode(NULL, load));
2768       load = ConvX2UL(load);
2769       break;
2770     default:
2771       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2772       break;
2773     }
2774     // The load node has the control of the preceding MemBarCPUOrder.  All
2775     // following nodes will have the control of the MemBarCPUOrder inserted at
2776     // the end of this method.  So, pushing the load onto the stack at a later
2777     // point is fine.
2778     set_result(load);
2779   } else {
2780     // place effect of store into memory
2781     switch (type) {
2782     case T_DOUBLE:
2783       val = dstore_rounding(val);
2784       break;
2785     case T_ADDRESS:
2786       // Repackage the long as a pointer.
2787       val = ConvL2X(val);
2788       val = _gvn.transform(new (C) CastX2PNode(val));
2789       break;
2790     }
2791 
2792     MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2793     if (type == T_OBJECT ) {
2794       store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
2795     } else {
2796       store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
2797     }
2798   }
2799 
2800   if (is_volatile) {
2801     if (!is_store) {
2802       Node* mb = insert_mem_bar(Op_MemBarAcquire, load);
2803       mb->as_MemBar()->set_trailing_load();
2804     } else {
2805       if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2806         Node* mb = insert_mem_bar(Op_MemBarVolatile, store);
2807         MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
2808       }
2809     }
2810   }
2811 
2812   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2813 
2814   return true;
2815 }
2816 
2817 //----------------------------inline_unsafe_prefetch----------------------------
2818 
2819 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2820 #ifndef PRODUCT
2821   {
2822     ResourceMark rm;
2823     // Check the signatures.
2824     ciSignature* sig = callee()->signature();
2825 #ifdef ASSERT
2826     // Object getObject(Object base, int/long offset), etc.
2827     BasicType rtype = sig->return_type()->basic_type();


2987   if (kind == LS_xchg && type == T_OBJECT) {
2988     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2989     if (tjp != NULL) {
2990       value_type = tjp;
2991     }
2992   }
2993 
2994   // Null check receiver.
2995   receiver = null_check(receiver);
2996   if (stopped()) {
2997     return true;
2998   }
2999 
3000   int alias_idx = C->get_alias_index(adr_type);
3001 
3002   // Memory-model-wise, a LoadStore acts like a little synchronized
3003   // block, so needs barriers on each side.  These don't translate
3004   // into actual barriers on most machines, but we still need rest of
3005   // compiler to respect ordering.
3006 
3007   Node* leading_membar = insert_mem_bar(Op_MemBarRelease);
3008   insert_mem_bar(Op_MemBarCPUOrder);
3009 
3010   // 4984716: MemBars must be inserted before this
3011   //          memory node in order to avoid a false
3012   //          dependency which will confuse the scheduler.
3013   Node *mem = memory(alias_idx);
3014 
3015   // For now, we handle only those cases that actually exist: ints,
3016   // longs, and Object. Adding others should be straightforward.
3017   Node* load_store = NULL;
3018   switch(type) {
3019   case T_INT:
3020     if (kind == LS_xadd) {
3021       load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
3022     } else if (kind == LS_xchg) {
3023       load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
3024     } else if (kind == LS_cmpxchg) {
3025       load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
3026     } else {
3027       ShouldNotReachHere();


3086       if (kind == LS_xchg) {
3087         load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
3088       } else {
3089         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
3090         load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
3091       }
3092     }
3093     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
3094     break;
3095   default:
3096     fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
3097     break;
3098   }
3099 
3100   // SCMemProjNodes represent the memory state of a LoadStore. Their
3101   // main role is to prevent LoadStore nodes from being optimized away
3102   // when their results aren't used.
3103   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
3104   set_memory(proj, alias_idx);
3105 
3106   Node* access = load_store;
3107 
3108   if (type == T_OBJECT && kind == LS_xchg) {
3109 #ifdef _LP64
3110     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
3111       load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
3112     }
3113 #endif
3114     if (can_move_pre_barrier()) {
3115       // Don't need to load pre_val. The old value is returned by load_store.
3116       // The pre_barrier can execute after the xchg as long as no safepoint
3117       // gets inserted between them.
3118       pre_barrier(false /* do_load */,
3119                   control(), NULL, NULL, max_juint, NULL, NULL,
3120                   load_store /* pre_val */,
3121                   T_OBJECT);
3122     }
3123   }
3124 
3125   // Add the trailing membar surrounding the access
3126   insert_mem_bar(Op_MemBarCPUOrder);
3127   Node* mb = insert_mem_bar(Op_MemBarAcquire, access);
3128   MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
3129 
3130   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3131   set_result(load_store);
3132   return true;
3133 }
3134 
3135 //----------------------------inline_unsafe_ordered_store----------------------
3136 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3137 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3138 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3139 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3140   // This is another variant of inline_unsafe_access, differing in
3141   // that it always issues store-store ("release") barrier and ensures
3142   // store-atomicity (which only matters for "long").
3143 
3144   if (callee()->is_static())  return false;  // caller must have the capability!
3145 
3146 #ifndef PRODUCT
3147   {
3148     ResourceMark rm;


6348 
6349   // Next code  copied from Parse::do_get_xxx():
6350 
6351   // Compute address and memory type.
6352   int offset  = field->offset_in_bytes();
6353   bool is_vol = field->is_volatile();
6354   ciType* field_klass = field->type();
6355   assert(field_klass->is_loaded(), "should be loaded");
6356   const TypePtr* adr_type = C->alias_type(field)->adr_type();
6357   Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6358   BasicType bt = field->layout_type();
6359 
6360   // Build the resultant type of the load
6361   const Type *type;
6362   if (bt == T_OBJECT) {
6363     type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6364   } else {
6365     type = Type::get_const_basic_type(bt);
6366   }
6367 
6368   Node* leading_membar = NULL;
6369   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
6370     leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
6371   }
6372   // Build the load.
6373   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
6374   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
6375   // If reference is volatile, prevent following memory ops from
6376   // floating up past the volatile read.  Also prevents commoning
6377   // another volatile read.
6378   if (is_vol) {
6379     // Memory barrier includes bogus read of value to force load BEFORE membar
6380     Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField);
6381     mb->as_MemBar()->set_trailing_load();
6382   }
6383   return loadedField;
6384 }
6385 
6386 
6387 //------------------------------inline_aescrypt_Block-----------------------
6388 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6389   address stubAddr = NULL;
6390   const char *stubName;
6391   assert(UseAES, "need AES instruction support");
6392 
6393   switch(id) {
6394   case vmIntrinsics::_aescrypt_encryptBlock:
6395     stubAddr = StubRoutines::aescrypt_encryptBlock();
6396     stubName = "aescrypt_encryptBlock";
6397     break;
6398   case vmIntrinsics::_aescrypt_decryptBlock:
6399     stubAddr = StubRoutines::aescrypt_decryptBlock();
6400     stubName = "aescrypt_decryptBlock";
6401     break;


< prev index next >