src/share/vm/opto/library_call.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/library_call.cpp

Print this page
rev 7390 : 8028595: WhiteBox API for stress testing of TieredCompilation
Reviewed-by:


 270   bool inline_reference_get();
 271   bool inline_Class_cast();
 272   bool inline_aescrypt_Block(vmIntrinsics::ID id);
 273   bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
 274   Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
 275   Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
 276   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
 277   bool inline_sha_implCompress(vmIntrinsics::ID id);
 278   bool inline_digestBase_implCompressMB(int predicate);
 279   bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
 280                                  bool long_state, address stubAddr, const char *stubName,
 281                                  Node* src_start, Node* ofs, Node* limit);
 282   Node* get_state_from_sha_object(Node *sha_object);
 283   Node* get_state_from_sha5_object(Node *sha_object);
 284   Node* inline_digestBase_implCompressMB_predicate(int predicate);
 285   bool inline_encodeISOArray();
 286   bool inline_updateCRC32();
 287   bool inline_updateBytesCRC32();
 288   bool inline_updateByteBufferCRC32();
 289   bool inline_multiplyToLen();


 290 };
 291 
 292 
 293 //---------------------------make_vm_intrinsic----------------------------
 294 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
 295   vmIntrinsics::ID id = m->intrinsic_id();
 296   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 297 
 298   ccstr disable_intr = NULL;
 299 
 300   if ((DisableIntrinsic[0] != '\0'
 301        && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) ||
 302       (method_has_option_value("DisableIntrinsic", disable_intr)
 303        && strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) {
 304     // disabled by a user request on the command line:
 305     // example: -XX:DisableIntrinsic=_hashCode,_getClass
 306     return NULL;
 307   }
 308 
 309   if (!m->is_loaded()) {


 802   case vmIntrinsics::_prefetchWriteStatic:      return inline_unsafe_prefetch(!is_native_ptr,  is_store,  is_static);
 803 
 804   case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
 805   case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
 806   case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
 807 
 808   case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
 809   case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
 810   case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
 811 
 812   case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
 813   case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
 814   case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
 815   case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
 816   case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
 817 
 818   case vmIntrinsics::_loadFence:
 819   case vmIntrinsics::_storeFence:
 820   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 821 


 822   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 823   case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
 824 
 825 #ifdef TRACE_HAVE_INTRINSICS
 826   case vmIntrinsics::_classID:                  return inline_native_classID();
 827   case vmIntrinsics::_threadID:                 return inline_native_threadID();
 828   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
 829 #endif
 830   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 831   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 832   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 833   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 834   case vmIntrinsics::_newArray:                 return inline_native_newArray();
 835   case vmIntrinsics::_getLength:                return inline_native_getLength();
 836   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 837   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 838   case vmIntrinsics::_equalsC:                  return inline_array_equals();
 839   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 840 
 841   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();


3179     ShouldNotReachHere();
3180   }
3181   set_result(threadid);
3182   return true;
3183 }
3184 #endif
3185 
3186 //------------------------inline_native_time_funcs--------------
3187 // inline code for System.currentTimeMillis() and System.nanoTime()
3188 // these have the same type and signature
3189 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3190   const TypeFunc* tf = OptoRuntime::void_long_Type();
3191   const TypePtr* no_memory_effects = NULL;
3192   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3193   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3194 #ifdef ASSERT
3195   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3196   assert(value_top == top(), "second value must be top");
3197 #endif
3198   set_result(value);





3199   return true;
3200 }
3201 
3202 //------------------------inline_native_currentThread------------------
3203 bool LibraryCallKit::inline_native_currentThread() {
3204   Node* junk = NULL;
3205   set_result(generate_current_thread(junk));
3206   return true;
3207 }
3208 
3209 //------------------------inline_native_isInterrupted------------------
3210 // private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
3211 bool LibraryCallKit::inline_native_isInterrupted() {
3212   // Add a fast path to t.isInterrupted(clear_int):
3213   //   (t == Thread.current() &&
3214   //    (!TLS._osthread._interrupted || WINDOWS_ONLY(false) NOT_WINDOWS(!clear_int)))
3215   //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
3216   // So, in the common case that the interrupt bit is false,
3217   // we avoid making a call into the VM.  Even if the interrupt bit
3218   // is true, if the clear_int argument is false, we avoid the VM call.




 270   bool inline_reference_get();
 271   bool inline_Class_cast();
 272   bool inline_aescrypt_Block(vmIntrinsics::ID id);
 273   bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
 274   Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
 275   Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
 276   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
 277   bool inline_sha_implCompress(vmIntrinsics::ID id);
 278   bool inline_digestBase_implCompressMB(int predicate);
 279   bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
 280                                  bool long_state, address stubAddr, const char *stubName,
 281                                  Node* src_start, Node* ofs, Node* limit);
 282   Node* get_state_from_sha_object(Node *sha_object);
 283   Node* get_state_from_sha5_object(Node *sha_object);
 284   Node* inline_digestBase_implCompressMB_predicate(int predicate);
 285   bool inline_encodeISOArray();
 286   bool inline_updateCRC32();
 287   bool inline_updateBytesCRC32();
 288   bool inline_updateByteBufferCRC32();
 289   bool inline_multiplyToLen();
 290 
 291   bool inline_getCompiler();
 292 };
 293 
 294 
 295 //---------------------------make_vm_intrinsic----------------------------
 296 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
 297   vmIntrinsics::ID id = m->intrinsic_id();
 298   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 299 
 300   ccstr disable_intr = NULL;
 301 
 302   if ((DisableIntrinsic[0] != '\0'
 303        && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) ||
 304       (method_has_option_value("DisableIntrinsic", disable_intr)
 305        && strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) {
 306     // disabled by a user request on the command line:
 307     // example: -XX:DisableIntrinsic=_hashCode,_getClass
 308     return NULL;
 309   }
 310 
 311   if (!m->is_loaded()) {


 804   case vmIntrinsics::_prefetchWriteStatic:      return inline_unsafe_prefetch(!is_native_ptr,  is_store,  is_static);
 805 
 806   case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
 807   case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
 808   case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
 809 
 810   case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
 811   case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
 812   case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
 813 
 814   case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
 815   case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
 816   case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
 817   case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
 818   case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
 819 
 820   case vmIntrinsics::_loadFence:
 821   case vmIntrinsics::_storeFence:
 822   case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
 823 
 824   case vmIntrinsics::_getCompiler:              return inline_getCompiler();
 825 
 826   case vmIntrinsics::_currentThread:            return inline_native_currentThread();
 827   case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
 828 
 829 #ifdef TRACE_HAVE_INTRINSICS
 830   case vmIntrinsics::_classID:                  return inline_native_classID();
 831   case vmIntrinsics::_threadID:                 return inline_native_threadID();
 832   case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
 833 #endif
 834   case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
 835   case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
 836   case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
 837   case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
 838   case vmIntrinsics::_newArray:                 return inline_native_newArray();
 839   case vmIntrinsics::_getLength:                return inline_native_getLength();
 840   case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
 841   case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
 842   case vmIntrinsics::_equalsC:                  return inline_array_equals();
 843   case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
 844 
 845   case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();


3183     ShouldNotReachHere();
3184   }
3185   set_result(threadid);
3186   return true;
3187 }
3188 #endif
3189 
3190 //------------------------inline_native_time_funcs--------------
3191 // inline code for System.currentTimeMillis() and System.nanoTime()
3192 // these have the same type and signature
3193 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3194   const TypeFunc* tf = OptoRuntime::void_long_Type();
3195   const TypePtr* no_memory_effects = NULL;
3196   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3197   Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3198 #ifdef ASSERT
3199   Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3200   assert(value_top == top(), "second value must be top");
3201 #endif
3202   set_result(value);
3203   return true;
3204 }
3205 
3206 bool LibraryCallKit::inline_getCompiler() {
3207   set_result(_gvn.transform(intcon(2)));
3208   return true;
3209 }
3210 
3211 //------------------------inline_native_currentThread------------------
3212 bool LibraryCallKit::inline_native_currentThread() {
3213   Node* junk = NULL;
3214   set_result(generate_current_thread(junk));
3215   return true;
3216 }
3217 
3218 //------------------------inline_native_isInterrupted------------------
3219 // private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
3220 bool LibraryCallKit::inline_native_isInterrupted() {
3221   // Add a fast path to t.isInterrupted(clear_int):
3222   //   (t == Thread.current() &&
3223   //    (!TLS._osthread._interrupted || WINDOWS_ONLY(false) NOT_WINDOWS(!clear_int)))
3224   //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
3225   // So, in the common case that the interrupt bit is false,
3226   // we avoid making a call into the VM.  Even if the interrupt bit
3227   // is true, if the clear_int argument is false, we avoid the VM call.


src/share/vm/opto/library_call.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File