src/share/vm/opto/library_call.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8003135 Sdiff src/share/vm/opto

src/share/vm/opto/library_call.cpp

Print this page




2935 bool LibraryCallKit::inline_native_currentThread() {
2936   Node* junk = NULL;
2937   set_result(generate_current_thread(junk));
2938   return true;
2939 }
2940 
2941 //------------------------inline_native_isInterrupted------------------
2942 // private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
2943 bool LibraryCallKit::inline_native_isInterrupted() {
2944   // Add a fast path to t.isInterrupted(clear_int):
2945   //   (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int))
2946   //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
2947   // So, in the common case that the interrupt bit is false,
2948   // we avoid making a call into the VM.  Even if the interrupt bit
2949   // is true, if the clear_int argument is false, we avoid the VM call.
2950   // However, if the receiver is not currentThread, we must call the VM,
2951   // because there must be some locking done around the operation.
2952 
2953   // We only go to the fast case code if we pass two guards.
2954   // Paths which do not pass are accumulated in the slow_region.











2955   RegionNode* slow_region = new (C) RegionNode(1);
2956   record_for_igvn(slow_region);
2957   RegionNode* result_rgn = new (C) RegionNode(1+3); // fast1, fast2, slow
2958   PhiNode*    result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
2959   enum { no_int_result_path   = 1,
2960          no_clear_result_path = 2,
2961          slow_result_path     = 3
2962   };
2963 
2964   // (a) Receiving thread must be the current thread.
2965   Node* rec_thr = argument(0);
2966   Node* tls_ptr = NULL;
2967   Node* cur_thr = generate_current_thread(tls_ptr);
2968   Node* cmp_thr = _gvn.transform( new (C) CmpPNode(cur_thr, rec_thr) );
2969   Node* bol_thr = _gvn.transform( new (C) BoolNode(cmp_thr, BoolTest::ne) );
2970 
2971   bool known_current_thread = (_gvn.type(bol_thr) == TypeInt::ZERO);
2972   if (!known_current_thread)
2973     generate_slow_guard(bol_thr, slow_region);
2974 
2975   // (b) Interrupt bit on TLS must be false.
2976   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
2977   Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
2978   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));







2979   // Set the control input on the field _interrupted read to prevent it floating up.
2980   Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
2981   Node* cmp_bit = _gvn.transform( new (C) CmpINode(int_bit, intcon(0)) );
2982   Node* bol_bit = _gvn.transform( new (C) BoolNode(cmp_bit, BoolTest::ne) );
2983 
2984   IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2985 
2986   // First fast path:  if (!TLS._interrupted) return false;
2987   Node* false_bit = _gvn.transform( new (C) IfFalseNode(iff_bit) );
2988   result_rgn->init_req(no_int_result_path, false_bit);
2989   result_val->init_req(no_int_result_path, intcon(0));
2990 
2991   // drop through to next case
2992   set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)) );
2993 
2994   // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
2995   Node* clr_arg = argument(1);
2996   Node* cmp_arg = _gvn.transform( new (C) CmpINode(clr_arg, intcon(0)) );
2997   Node* bol_arg = _gvn.transform( new (C) BoolNode(cmp_arg, BoolTest::ne) );
2998   IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
2999 
3000   // Second fast path:  ... else if (!clear_int) return true;
3001   Node* false_arg = _gvn.transform( new (C) IfFalseNode(iff_arg) );
3002   result_rgn->init_req(no_clear_result_path, false_arg);
3003   result_val->init_req(no_clear_result_path, intcon(1));
3004 
3005   // drop through to next case
3006   set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)) );
3007 
3008   // (d) Otherwise, go to the slow path.
3009   slow_region->add_req(control());
3010   set_control( _gvn.transform(slow_region) );
3011 
3012   if (stopped()) {
3013     // There is no slow path.
3014     result_rgn->init_req(slow_result_path, top());
3015     result_val->init_req(slow_result_path, top());
3016   } else {


3017     // non-virtual because it is a private non-static
3018     CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
3019 
3020     Node* slow_val = set_results_for_java_call(slow_call);
3021     // this->control() comes from set_results_for_java_call
3022 
3023     // If we know that the result of the slow call will be true, tell the optimizer!
3024     if (known_current_thread)  slow_val = intcon(1);

3025 
3026     Node* fast_io  = slow_call->in(TypeFunc::I_O);
3027     Node* fast_mem = slow_call->in(TypeFunc::Memory);
3028     // These two phis are pre-filled with copies of of the fast IO and Memory
3029     Node* io_phi   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
3030     Node* mem_phi  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3031 
3032     result_rgn->init_req(slow_result_path, control());
3033     io_phi    ->init_req(slow_result_path, i_o());
3034     mem_phi   ->init_req(slow_result_path, reset_memory());
3035     result_val->init_req(slow_result_path, slow_val);
3036 
3037     set_all_memory( _gvn.transform(mem_phi) );
3038     set_i_o(        _gvn.transform(io_phi) );
3039   }
3040 
3041   C->set_has_split_ifs(true); // Has chance for split-if optimization
3042   set_result(result_rgn, result_val);
3043   return true;
3044 }
3045 
3046 //---------------------------load_mirror_from_klass----------------------------
3047 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3048 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3049   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3050   return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT);
3051 }
3052 
3053 //-----------------------load_klass_from_mirror_common-------------------------
3054 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3055 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3056 // and branch to the given path on the region.
3057 // If never_see_null, take an uncommon trap on null, so we can optimistically
3058 // compile for the non-null case.




2935 bool LibraryCallKit::inline_native_currentThread() {
2936   Node* junk = NULL;
2937   set_result(generate_current_thread(junk));
2938   return true;
2939 }
2940 
2941 //------------------------inline_native_isInterrupted------------------
2942 // private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
2943 bool LibraryCallKit::inline_native_isInterrupted() {
2944   // Add a fast path to t.isInterrupted(clear_int):
2945   //   (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int))
2946   //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
2947   // So, in the common case that the interrupt bit is false,
2948   // we avoid making a call into the VM.  Even if the interrupt bit
2949   // is true, if the clear_int argument is false, we avoid the VM call.
2950   // However, if the receiver is not currentThread, we must call the VM,
2951   // because there must be some locking done around the operation.
2952 
2953   // We only go to the fast case code if we pass two guards.
2954   // Paths which do not pass are accumulated in the slow_region.
2955 
2956   enum {
2957     no_int_result_path   = 1, // t == Thread.current() && TLS._osthread._interrupted
2958     no_clear_result_path = 2, // t == Thread.current() && !TLS._osthread._interrupted && !clear_int
2959     slow_result_path     = 3, // slow path: t.isInterrupted(clear_int)
2960     PATH_LIMIT
2961   };
2962 
2963   RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
2964   PhiNode*    result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
2965 
2966   RegionNode* slow_region = new (C) RegionNode(1);
2967   record_for_igvn(slow_region);






2968 
2969   // (a) Receiving thread must be the current thread.
2970   Node* rec_thr = argument(0);
2971   Node* tls_ptr = NULL;
2972   Node* cur_thr = generate_current_thread(tls_ptr);
2973   Node* cmp_thr = _gvn.transform( new (C) CmpPNode(cur_thr, rec_thr) );
2974   Node* bol_thr = _gvn.transform( new (C) BoolNode(cmp_thr, BoolTest::ne) );
2975 
2976   bool known_current_thread = (_gvn.type(bol_thr) == TypeInt::ZERO);
2977 
2978   generate_slow_guard(bol_thr, slow_region);
2979 
2980   // (b) Interrupt bit on TLS must be false.
2981   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
2982   Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
2983   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
2984 
2985   Node* init_mem = map()->memory();
2986 
2987   insert_mem_bar(Op_MemBarCPUOrder);
2988 
2989   Node* fast_mem = map()->memory();
2990 
2991   // Set the control input on the field _interrupted read to prevent it floating up.
2992   Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
2993   Node* cmp_bit = _gvn.transform( new (C) CmpINode(int_bit, intcon(0)) );
2994   Node* bol_bit = _gvn.transform( new (C) BoolNode(cmp_bit, BoolTest::ne) );
2995 
2996   IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2997 
2998   // First fast path:  if (!TLS._interrupted) return false;
2999   Node* false_bit = _gvn.transform( new (C) IfFalseNode(iff_bit) );
3000   result_rgn->init_req(no_int_result_path, false_bit);
3001   result_val->init_req(no_int_result_path, intcon(0));
3002 
3003   // drop through to next case
3004   set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)) );
3005 
3006   // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
3007   Node* clr_arg = argument(1);
3008   Node* cmp_arg = _gvn.transform( new (C) CmpINode(clr_arg, intcon(0)) );
3009   Node* bol_arg = _gvn.transform( new (C) BoolNode(cmp_arg, BoolTest::ne) );
3010   IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
3011 
3012   // Second fast path:  ... else if (!clear_int) return true;
3013   Node* false_arg = _gvn.transform( new (C) IfFalseNode(iff_arg) );
3014   result_rgn->init_req(no_clear_result_path, false_arg);
3015   result_val->init_req(no_clear_result_path, intcon(1));
3016 
3017   // drop through to next case
3018   set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)) );
3019 
3020   // (d) Otherwise, go to the slow path.
3021   slow_region->add_req(control());
3022   set_control( _gvn.transform(slow_region) );
3023 
3024   if (stopped()) {
3025     // There is no slow path.
3026     result_rgn->init_req(slow_result_path, top());
3027     result_val->init_req(slow_result_path, top());
3028   } else {
3029     set_all_memory(init_mem);
3030 
3031     // non-virtual because it is a private non-static
3032     CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
3033 
3034     Node* slow_val = set_results_for_java_call(slow_call);
3035     // this->control() comes from set_results_for_java_call
3036 
3037     // If we know that the result of the slow call will be true, tell the optimizer!
3038     if (known_current_thread)
3039       slow_val = intcon(1);
3040 
3041     Node* fast_io  = slow_call->in(TypeFunc::I_O);
3042 
3043     // These two phis are pre-filled with copies of of the fast IO and Memory
3044     PhiNode* result_mem  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3045     PhiNode* result_io   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
3046 
3047     result_rgn->init_req(slow_result_path, control());
3048     result_io ->init_req(slow_result_path, i_o());
3049     result_mem->init_req(slow_result_path, reset_memory());
3050     result_val->init_req(slow_result_path, slow_val);
3051 
3052     set_all_memory(_gvn.transform(result_mem));
3053     set_i_o(       _gvn.transform(result_io));
3054   }
3055 
3056   C->set_has_split_ifs(true); // Has chance for split-if optimization
3057   set_result(result_rgn, result_val);
3058   return true;
3059 }
3060 
3061 //---------------------------load_mirror_from_klass----------------------------
3062 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3063 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3064   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3065   return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT);
3066 }
3067 
3068 //-----------------------load_klass_from_mirror_common-------------------------
3069 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3070 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3071 // and branch to the given path on the region.
3072 // If never_see_null, take an uncommon trap on null, so we can optimistically
3073 // compile for the non-null case.


src/share/vm/opto/library_call.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File