< prev index next >

src/share/vm/opto/library_call.cpp

Print this page
rev 9088 : 8139040: Fix initializations before ShouldNotReachHere() etc. and enable -Wuninitialized on linux.


1348   }
1349   set_result(result);
1350   return true;
1351 }
1352 
1353 //--------------------------round_double_node--------------------------------
1354 // Round a double node if necessary.
1355 Node* LibraryCallKit::round_double_node(Node* n) {
1356   if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1357     n = _gvn.transform(new RoundDoubleNode(0, n));
1358   return n;
1359 }
1360 
1361 //------------------------------inline_math-----------------------------------
1362 // public static double Math.abs(double)
1363 // public static double Math.sqrt(double)
1364 // public static double Math.log(double)
1365 // public static double Math.log10(double)
1366 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1367   Node* arg = round_double_node(argument(0));
1368   Node* n;
1369   switch (id) {
1370   case vmIntrinsics::_dabs:   n = new AbsDNode(                arg);  break;
1371   case vmIntrinsics::_dsqrt:  n = new SqrtDNode(C, control(),  arg);  break;
1372   case vmIntrinsics::_dlog:   n = new LogDNode(C, control(),   arg);  break;
1373   case vmIntrinsics::_dlog10: n = new Log10DNode(C, control(), arg);  break;
1374   default:  fatal_unexpected_iid(id);  break;
1375   }
1376   set_result(_gvn.transform(n));
1377   return true;
1378 }
1379 
1380 //------------------------------inline_trig----------------------------------
1381 // Inline sin/cos/tan instructions, if possible.  If rounding is required, do
1382 // argument reduction which will turn into a fast/slow diamond.
1383 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
1384   Node* arg = round_double_node(argument(0));
1385   Node* n = NULL;
1386 
1387   switch (id) {
1388   case vmIntrinsics::_dsin:  n = new SinDNode(C, control(), arg);  break;


2104     return basic_plus_adr(base, offset);
2105   }
2106 }
2107 
2108 //--------------------------inline_number_methods-----------------------------
2109 // inline int     Integer.numberOfLeadingZeros(int)
2110 // inline int        Long.numberOfLeadingZeros(long)
2111 //
2112 // inline int     Integer.numberOfTrailingZeros(int)
2113 // inline int        Long.numberOfTrailingZeros(long)
2114 //
2115 // inline int     Integer.bitCount(int)
2116 // inline int        Long.bitCount(long)
2117 //
2118 // inline char  Character.reverseBytes(char)
2119 // inline short     Short.reverseBytes(short)
2120 // inline int     Integer.reverseBytes(int)
2121 // inline long       Long.reverseBytes(long)
2122 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2123   Node* arg = argument(0);
2124   Node* n;
2125   switch (id) {
2126   case vmIntrinsics::_numberOfLeadingZeros_i:   n = new CountLeadingZerosINode( arg);  break;
2127   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2128   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2129   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2130   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2131   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2132   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2133   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2134   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2135   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2136   default:  fatal_unexpected_iid(id);  break;
2137   }
2138   set_result(_gvn.transform(n));
2139   return true;
2140 }
2141 
2142 //----------------------------inline_unsafe_access----------------------------
2143 
2144 const static BasicType T_ADDRESS_HOLDER = T_LONG;


2644     }
2645   }
2646 
2647   int alias_idx = C->get_alias_index(adr_type);
2648 
2649   // Memory-model-wise, a LoadStore acts like a little synchronized
2650   // block, so needs barriers on each side.  These don't translate
2651   // into actual barriers on most machines, but we still need rest of
2652   // compiler to respect ordering.
2653 
2654   insert_mem_bar(Op_MemBarRelease);
2655   insert_mem_bar(Op_MemBarCPUOrder);
2656 
2657   // 4984716: MemBars must be inserted before this
2658   //          memory node in order to avoid a false
2659   //          dependency which will confuse the scheduler.
2660   Node *mem = memory(alias_idx);
2661 
2662   // For now, we handle only those cases that actually exist: ints,
2663   // longs, and Object. Adding others should be straightforward.
2664   Node* load_store;
2665   switch(type) {
2666   case T_INT:
2667     if (kind == LS_xadd) {
2668       load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
2669     } else if (kind == LS_xchg) {
2670       load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
2671     } else if (kind == LS_cmpxchg) {
2672       load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
2673     } else {
2674       ShouldNotReachHere();
2675     }
2676     break;
2677   case T_LONG:
2678     if (kind == LS_xadd) {
2679       load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
2680     } else if (kind == LS_xchg) {
2681       load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
2682     } else if (kind == LS_cmpxchg) {
2683       load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2684     } else {


3650   // It could be a dynamic mix of int[], boolean[], Object[], etc.
3651   Node* result = load_array_length(array);
3652 
3653   C->set_has_split_ifs(true);  // Has chance for split-if optimization
3654   set_result(result);
3655   return true;
3656 }
3657 
3658 //------------------------inline_array_copyOf----------------------------
3659 // public static <T,U> T[] java.util.Arrays.copyOf(     U[] original, int newLength,         Class<? extends T[]> newType);
3660 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from,      int to, Class<? extends T[]> newType);
3661 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3662   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3663 
3664   // Get the arguments.
3665   Node* original          = argument(0);
3666   Node* start             = is_copyOfRange? argument(1): intcon(0);
3667   Node* end               = is_copyOfRange? argument(2): argument(1);
3668   Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3669 
3670   Node* newcopy;
3671 
3672   // Set the original stack and the reexecute bit for the interpreter to reexecute
3673   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3674   { PreserveReexecuteState preexecs(this);
3675     jvms()->set_should_reexecute(true);
3676 
3677     array_type_mirror = null_check(array_type_mirror);
3678     original          = null_check(original);
3679 
3680     // Check if a null path was taken unconditionally.
3681     if (stopped())  return true;
3682 
3683     Node* orig_length = load_array_length(original);
3684 
3685     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3686     klass_node = null_check(klass_node);
3687 
3688     RegionNode* bailout = new RegionNode(1);
3689     record_for_igvn(bailout);
3690 


4085       break;
4086     }
4087   }
4088 
4089 #ifndef PRODUCT
4090   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4091     tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4092     tty->print_cr("  JVM state at this point:");
4093     for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4094       ciMethod* m = jvms()->of_depth(i)->method();
4095       tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4096     }
4097   }
4098 #endif
4099 
4100   return false;  // bail-out; let JVM_GetCallerClass do the work
4101 }
4102 
4103 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4104   Node* arg = argument(0);
4105   Node* result;
4106 
4107   switch (id) {
4108   case vmIntrinsics::_floatToRawIntBits:    result = new MoveF2INode(arg);  break;
4109   case vmIntrinsics::_intBitsToFloat:       result = new MoveI2FNode(arg);  break;
4110   case vmIntrinsics::_doubleToRawLongBits:  result = new MoveD2LNode(arg);  break;
4111   case vmIntrinsics::_longBitsToDouble:     result = new MoveL2DNode(arg);  break;
4112 
4113   case vmIntrinsics::_doubleToLongBits: {
4114     // two paths (plus control) merge in a wood
4115     RegionNode *r = new RegionNode(3);
4116     Node *phi = new PhiNode(r, TypeLong::LONG);
4117 
4118     Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
4119     // Build the boolean node
4120     Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4121 
4122     // Branch either way.
4123     // NaN case is less traveled, which makes all the difference.
4124     IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4125     Node *opt_isnan = _gvn.transform(ifisnan);


5714 
5715   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
5716     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
5717   }
5718   // Build the load.
5719   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
5720   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
5721   // If reference is volatile, prevent following memory ops from
5722   // floating up past the volatile read.  Also prevents commoning
5723   // another volatile read.
5724   if (is_vol) {
5725     // Memory barrier includes bogus read of value to force load BEFORE membar
5726     insert_mem_bar(Op_MemBarAcquire, loadedField);
5727   }
5728   return loadedField;
5729 }
5730 
5731 
5732 //------------------------------inline_aescrypt_Block-----------------------
5733 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
5734   address stubAddr;
5735   const char *stubName;
5736   assert(UseAES, "need AES instruction support");
5737 
5738   switch(id) {
5739   case vmIntrinsics::_aescrypt_encryptBlock:
5740     stubAddr = StubRoutines::aescrypt_encryptBlock();
5741     stubName = "aescrypt_encryptBlock";
5742     break;
5743   case vmIntrinsics::_aescrypt_decryptBlock:
5744     stubAddr = StubRoutines::aescrypt_decryptBlock();
5745     stubName = "aescrypt_decryptBlock";
5746     break;
5747   }
5748   if (stubAddr == NULL) return false;
5749 
5750   Node* aescrypt_object = argument(0);
5751   Node* src             = argument(1);
5752   Node* src_offset      = argument(2);
5753   Node* dest            = argument(3);
5754   Node* dest_offset     = argument(4);


5780     // compatibility issues between Java key expansion and SPARC crypto instructions
5781     Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5782     if (original_k_start == NULL) return false;
5783 
5784     // Call the stub.
5785     make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5786                       stubAddr, stubName, TypePtr::BOTTOM,
5787                       src_start, dest_start, k_start, original_k_start);
5788   } else {
5789     // Call the stub.
5790     make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5791                       stubAddr, stubName, TypePtr::BOTTOM,
5792                       src_start, dest_start, k_start);
5793   }
5794 
5795   return true;
5796 }
5797 
5798 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
5799 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
5800   address stubAddr;
5801   const char *stubName;
5802 
5803   assert(UseAES, "need AES instruction support");
5804 
5805   switch(id) {
5806   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
5807     stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
5808     stubName = "cipherBlockChaining_encryptAESCrypt";
5809     break;
5810   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
5811     stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
5812     stubName = "cipherBlockChaining_decryptAESCrypt";
5813     break;
5814   }
5815   if (stubAddr == NULL) return false;
5816 
5817   Node* cipherBlockChaining_object = argument(0);
5818   Node* src                        = argument(1);
5819   Node* src_offset                 = argument(2);
5820   Node* len                        = argument(3);
5821   Node* dest                       = argument(4);




1348   }
1349   set_result(result);
1350   return true;
1351 }
1352 
1353 //--------------------------round_double_node--------------------------------
1354 // Round a double node if necessary.
1355 Node* LibraryCallKit::round_double_node(Node* n) {
1356   if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1357     n = _gvn.transform(new RoundDoubleNode(0, n));
1358   return n;
1359 }
1360 
1361 //------------------------------inline_math-----------------------------------
1362 // public static double Math.abs(double)
1363 // public static double Math.sqrt(double)
1364 // public static double Math.log(double)
1365 // public static double Math.log10(double)
1366 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1367   Node* arg = round_double_node(argument(0));
1368   Node* n = NULL;
1369   switch (id) {
1370   case vmIntrinsics::_dabs:   n = new AbsDNode(                arg);  break;
1371   case vmIntrinsics::_dsqrt:  n = new SqrtDNode(C, control(),  arg);  break;
1372   case vmIntrinsics::_dlog:   n = new LogDNode(C, control(),   arg);  break;
1373   case vmIntrinsics::_dlog10: n = new Log10DNode(C, control(), arg);  break;
1374   default:  fatal_unexpected_iid(id);  break;
1375   }
1376   set_result(_gvn.transform(n));
1377   return true;
1378 }
1379 
1380 //------------------------------inline_trig----------------------------------
1381 // Inline sin/cos/tan instructions, if possible.  If rounding is required, do
1382 // argument reduction which will turn into a fast/slow diamond.
1383 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
1384   Node* arg = round_double_node(argument(0));
1385   Node* n = NULL;
1386 
1387   switch (id) {
1388   case vmIntrinsics::_dsin:  n = new SinDNode(C, control(), arg);  break;


2104     return basic_plus_adr(base, offset);
2105   }
2106 }
2107 
2108 //--------------------------inline_number_methods-----------------------------
2109 // inline int     Integer.numberOfLeadingZeros(int)
2110 // inline int        Long.numberOfLeadingZeros(long)
2111 //
2112 // inline int     Integer.numberOfTrailingZeros(int)
2113 // inline int        Long.numberOfTrailingZeros(long)
2114 //
2115 // inline int     Integer.bitCount(int)
2116 // inline int        Long.bitCount(long)
2117 //
2118 // inline char  Character.reverseBytes(char)
2119 // inline short     Short.reverseBytes(short)
2120 // inline int     Integer.reverseBytes(int)
2121 // inline long       Long.reverseBytes(long)
2122 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2123   Node* arg = argument(0);
2124   Node* n = NULL;
2125   switch (id) {
2126   case vmIntrinsics::_numberOfLeadingZeros_i:   n = new CountLeadingZerosINode( arg);  break;
2127   case vmIntrinsics::_numberOfLeadingZeros_l:   n = new CountLeadingZerosLNode( arg);  break;
2128   case vmIntrinsics::_numberOfTrailingZeros_i:  n = new CountTrailingZerosINode(arg);  break;
2129   case vmIntrinsics::_numberOfTrailingZeros_l:  n = new CountTrailingZerosLNode(arg);  break;
2130   case vmIntrinsics::_bitCount_i:               n = new PopCountINode(          arg);  break;
2131   case vmIntrinsics::_bitCount_l:               n = new PopCountLNode(          arg);  break;
2132   case vmIntrinsics::_reverseBytes_c:           n = new ReverseBytesUSNode(0,   arg);  break;
2133   case vmIntrinsics::_reverseBytes_s:           n = new ReverseBytesSNode( 0,   arg);  break;
2134   case vmIntrinsics::_reverseBytes_i:           n = new ReverseBytesINode( 0,   arg);  break;
2135   case vmIntrinsics::_reverseBytes_l:           n = new ReverseBytesLNode( 0,   arg);  break;
2136   default:  fatal_unexpected_iid(id);  break;
2137   }
2138   set_result(_gvn.transform(n));
2139   return true;
2140 }
2141 
2142 //----------------------------inline_unsafe_access----------------------------
2143 
2144 const static BasicType T_ADDRESS_HOLDER = T_LONG;


2644     }
2645   }
2646 
2647   int alias_idx = C->get_alias_index(adr_type);
2648 
2649   // Memory-model-wise, a LoadStore acts like a little synchronized
2650   // block, so needs barriers on each side.  These don't translate
2651   // into actual barriers on most machines, but we still need rest of
2652   // compiler to respect ordering.
2653 
2654   insert_mem_bar(Op_MemBarRelease);
2655   insert_mem_bar(Op_MemBarCPUOrder);
2656 
2657   // 4984716: MemBars must be inserted before this
2658   //          memory node in order to avoid a false
2659   //          dependency which will confuse the scheduler.
2660   Node *mem = memory(alias_idx);
2661 
2662   // For now, we handle only those cases that actually exist: ints,
2663   // longs, and Object. Adding others should be straightforward.
2664   Node* load_store = NULL;
2665   switch(type) {
2666   case T_INT:
2667     if (kind == LS_xadd) {
2668       load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
2669     } else if (kind == LS_xchg) {
2670       load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
2671     } else if (kind == LS_cmpxchg) {
2672       load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
2673     } else {
2674       ShouldNotReachHere();
2675     }
2676     break;
2677   case T_LONG:
2678     if (kind == LS_xadd) {
2679       load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
2680     } else if (kind == LS_xchg) {
2681       load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
2682     } else if (kind == LS_cmpxchg) {
2683       load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2684     } else {


3650   // It could be a dynamic mix of int[], boolean[], Object[], etc.
3651   Node* result = load_array_length(array);
3652 
3653   C->set_has_split_ifs(true);  // Has chance for split-if optimization
3654   set_result(result);
3655   return true;
3656 }
3657 
3658 //------------------------inline_array_copyOf----------------------------
3659 // public static <T,U> T[] java.util.Arrays.copyOf(     U[] original, int newLength,         Class<? extends T[]> newType);
3660 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from,      int to, Class<? extends T[]> newType);
3661 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3662   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3663 
3664   // Get the arguments.
3665   Node* original          = argument(0);
3666   Node* start             = is_copyOfRange? argument(1): intcon(0);
3667   Node* end               = is_copyOfRange? argument(2): argument(1);
3668   Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3669 
3670   Node* newcopy = NULL;
3671 
3672   // Set the original stack and the reexecute bit for the interpreter to reexecute
3673   // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3674   { PreserveReexecuteState preexecs(this);
3675     jvms()->set_should_reexecute(true);
3676 
3677     array_type_mirror = null_check(array_type_mirror);
3678     original          = null_check(original);
3679 
3680     // Check if a null path was taken unconditionally.
3681     if (stopped())  return true;
3682 
3683     Node* orig_length = load_array_length(original);
3684 
3685     Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3686     klass_node = null_check(klass_node);
3687 
3688     RegionNode* bailout = new RegionNode(1);
3689     record_for_igvn(bailout);
3690 


4085       break;
4086     }
4087   }
4088 
4089 #ifndef PRODUCT
4090   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4091     tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4092     tty->print_cr("  JVM state at this point:");
4093     for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4094       ciMethod* m = jvms()->of_depth(i)->method();
4095       tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4096     }
4097   }
4098 #endif
4099 
4100   return false;  // bail-out; let JVM_GetCallerClass do the work
4101 }
4102 
4103 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4104   Node* arg = argument(0);
4105   Node* result = NULL;
4106 
4107   switch (id) {
4108   case vmIntrinsics::_floatToRawIntBits:    result = new MoveF2INode(arg);  break;
4109   case vmIntrinsics::_intBitsToFloat:       result = new MoveI2FNode(arg);  break;
4110   case vmIntrinsics::_doubleToRawLongBits:  result = new MoveD2LNode(arg);  break;
4111   case vmIntrinsics::_longBitsToDouble:     result = new MoveL2DNode(arg);  break;
4112 
4113   case vmIntrinsics::_doubleToLongBits: {
4114     // two paths (plus control) merge in a wood
4115     RegionNode *r = new RegionNode(3);
4116     Node *phi = new PhiNode(r, TypeLong::LONG);
4117 
4118     Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
4119     // Build the boolean node
4120     Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4121 
4122     // Branch either way.
4123     // NaN case is less traveled, which makes all the difference.
4124     IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4125     Node *opt_isnan = _gvn.transform(ifisnan);


5714 
5715   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
5716     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
5717   }
5718   // Build the load.
5719   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
5720   Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
5721   // If reference is volatile, prevent following memory ops from
5722   // floating up past the volatile read.  Also prevents commoning
5723   // another volatile read.
5724   if (is_vol) {
5725     // Memory barrier includes bogus read of value to force load BEFORE membar
5726     insert_mem_bar(Op_MemBarAcquire, loadedField);
5727   }
5728   return loadedField;
5729 }
5730 
5731 
5732 //------------------------------inline_aescrypt_Block-----------------------
5733 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
5734   address stubAddr = NULL;
5735   const char *stubName;
5736   assert(UseAES, "need AES instruction support");
5737 
5738   switch(id) {
5739   case vmIntrinsics::_aescrypt_encryptBlock:
5740     stubAddr = StubRoutines::aescrypt_encryptBlock();
5741     stubName = "aescrypt_encryptBlock";
5742     break;
5743   case vmIntrinsics::_aescrypt_decryptBlock:
5744     stubAddr = StubRoutines::aescrypt_decryptBlock();
5745     stubName = "aescrypt_decryptBlock";
5746     break;
5747   }
5748   if (stubAddr == NULL) return false;
5749 
5750   Node* aescrypt_object = argument(0);
5751   Node* src             = argument(1);
5752   Node* src_offset      = argument(2);
5753   Node* dest            = argument(3);
5754   Node* dest_offset     = argument(4);


5780     // compatibility issues between Java key expansion and SPARC crypto instructions
5781     Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5782     if (original_k_start == NULL) return false;
5783 
5784     // Call the stub.
5785     make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5786                       stubAddr, stubName, TypePtr::BOTTOM,
5787                       src_start, dest_start, k_start, original_k_start);
5788   } else {
5789     // Call the stub.
5790     make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5791                       stubAddr, stubName, TypePtr::BOTTOM,
5792                       src_start, dest_start, k_start);
5793   }
5794 
5795   return true;
5796 }
5797 
5798 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
5799 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
5800   address stubAddr = NULL;
5801   const char *stubName = NULL;
5802 
5803   assert(UseAES, "need AES instruction support");
5804 
5805   switch(id) {
5806   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
5807     stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
5808     stubName = "cipherBlockChaining_encryptAESCrypt";
5809     break;
5810   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
5811     stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
5812     stubName = "cipherBlockChaining_decryptAESCrypt";
5813     break;
5814   }
5815   if (stubAddr == NULL) return false;
5816 
5817   Node* cipherBlockChaining_object = argument(0);
5818   Node* src                        = argument(1);
5819   Node* src_offset                 = argument(2);
5820   Node* len                        = argument(3);
5821   Node* dest                       = argument(4);


< prev index next >