2839
2840 const TypeFunc *tf = LockNode::lock_type();
2841 LockNode *lock = new (C, tf->domain()->cnt()) LockNode(C, tf);
2842
2843 lock->init_req( TypeFunc::Control, control() );
2844 lock->init_req( TypeFunc::Memory , mem );
2845 lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
2846 lock->init_req( TypeFunc::FramePtr, frameptr() );
2847 lock->init_req( TypeFunc::ReturnAdr, top() );
2848
2849 lock->init_req(TypeFunc::Parms + 0, obj);
2850 lock->init_req(TypeFunc::Parms + 1, box);
2851 lock->init_req(TypeFunc::Parms + 2, flock);
2852 add_safepoint_edges(lock);
2853
2854 lock = _gvn.transform( lock )->as_Lock();
2855
2856 // lock has no side-effects, sets few values
2857 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
2858
2859 insert_mem_bar(Op_MemBarAcquire);
2860
2861 // Add this to the worklist so that the lock can be eliminated
2862 record_for_igvn(lock);
2863
2864 #ifndef PRODUCT
2865 if (PrintLockStatistics) {
2866 // Update the counter for this lock. Don't bother using an atomic
2867 // operation since we don't require absolute accuracy.
2868 lock->create_lock_counter(map()->jvms());
2869 increment_counter(lock->counter()->addr());
2870 }
2871 #endif
2872
2873 return flock;
2874 }
2875
2876
2877 //------------------------------shared_unlock----------------------------------
2878 // Emit unlocking code.
2879 void GraphKit::shared_unlock(Node* box, Node* obj) {
2880 // bci is either a monitorenter bc or InvocationEntryBci
2881 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
2882 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
2883
2884 if( !GenerateSynchronizationCode )
2885 return;
2886 if (stopped()) { // Dead monitor?
2887 map()->pop_monitor(); // Kill monitor from debug info
2888 return;
2889 }
2890
2891 // Memory barrier to avoid floating things down past the locked region
2892 insert_mem_bar(Op_MemBarRelease);
2893
2894 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
2895 UnlockNode *unlock = new (C, tf->domain()->cnt()) UnlockNode(C, tf);
2896 uint raw_idx = Compile::AliasIdxRaw;
2897 unlock->init_req( TypeFunc::Control, control() );
2898 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
2899 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
2900 unlock->init_req( TypeFunc::FramePtr, frameptr() );
2901 unlock->init_req( TypeFunc::ReturnAdr, top() );
2902
2903 unlock->init_req(TypeFunc::Parms + 0, obj);
2904 unlock->init_req(TypeFunc::Parms + 1, box);
2905 unlock = _gvn.transform(unlock)->as_Unlock();
2906
2907 Node* mem = reset_memory();
2908
2909 // unlock has no side-effects, sets few values
2910 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
2911
2912 // Kill monitor from debug info
|
2839
2840 const TypeFunc *tf = LockNode::lock_type();
2841 LockNode *lock = new (C, tf->domain()->cnt()) LockNode(C, tf);
2842
2843 lock->init_req( TypeFunc::Control, control() );
2844 lock->init_req( TypeFunc::Memory , mem );
2845 lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
2846 lock->init_req( TypeFunc::FramePtr, frameptr() );
2847 lock->init_req( TypeFunc::ReturnAdr, top() );
2848
2849 lock->init_req(TypeFunc::Parms + 0, obj);
2850 lock->init_req(TypeFunc::Parms + 1, box);
2851 lock->init_req(TypeFunc::Parms + 2, flock);
2852 add_safepoint_edges(lock);
2853
2854 lock = _gvn.transform( lock )->as_Lock();
2855
2856 // lock has no side-effects, sets few values
2857 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
2858
2859 insert_mem_bar(Op_MemBarCPUOrder);
2860
2861 // Add this to the worklist so that the lock can be eliminated
2862 record_for_igvn(lock);
2863
2864 #ifndef PRODUCT
2865 if (PrintLockStatistics) {
2866 // Update the counter for this lock. Don't bother using an atomic
2867 // operation since we don't require absolute accuracy.
2868 lock->create_lock_counter(map()->jvms());
2869 increment_counter(lock->counter()->addr());
2870 }
2871 #endif
2872
2873 return flock;
2874 }
2875
2876
2877 //------------------------------shared_unlock----------------------------------
2878 // Emit unlocking code.
2879 void GraphKit::shared_unlock(Node* box, Node* obj) {
2880 // bci is either a monitorenter bc or InvocationEntryBci
2881 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
2882 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
2883
2884 if( !GenerateSynchronizationCode )
2885 return;
2886 if (stopped()) { // Dead monitor?
2887 map()->pop_monitor(); // Kill monitor from debug info
2888 return;
2889 }
2890
2891 // Memory barrier to avoid floating things down past the locked region
2892 insert_mem_bar(Op_MemBarCPUOrder);
2893
2894 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
2895 UnlockNode *unlock = new (C, tf->domain()->cnt()) UnlockNode(C, tf);
2896 uint raw_idx = Compile::AliasIdxRaw;
2897 unlock->init_req( TypeFunc::Control, control() );
2898 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
2899 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
2900 unlock->init_req( TypeFunc::FramePtr, frameptr() );
2901 unlock->init_req( TypeFunc::ReturnAdr, top() );
2902
2903 unlock->init_req(TypeFunc::Parms + 0, obj);
2904 unlock->init_req(TypeFunc::Parms + 1, box);
2905 unlock = _gvn.transform(unlock)->as_Unlock();
2906
2907 Node* mem = reset_memory();
2908
2909 // unlock has no side-effects, sets few values
2910 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
2911
2912 // Kill monitor from debug info
|