< prev index next >

src/hotspot/share/runtime/serviceThread.cpp

Print this page
rev 56044 : imported patch 8230184.patch
rev 56046 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch.
rev 56049 : Merge the remainder of the lock-free monitor list changes from v2.06 with v2.06a and v2.06b after running the changes through the edit scripts; merge pieces from dcubed.monitor_deflate_conc.v2.06d in dcubed.monitor_deflate_conc.v2.06[ac]; merge pieces from dcubed.monitor_deflate_conc.v2.06e into dcubed.monitor_deflate_conc.v2.06c; merge with jdk-14+11; test work around for test/jdk/tools/jlink/multireleasejar/JLinkMultiReleaseJarTest.java should not been needed anymore.


  92 void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
  93   OopStorage* const oopstorages[] = {
  94     JNIHandles::global_handles(),
  95     JNIHandles::weak_global_handles(),
  96     StringTable::weak_storage(),
  97     SystemDictionary::vm_global_oop_storage(),
  98     SystemDictionary::vm_weak_oop_storage()
  99   };
 100   const size_t oopstorage_count = ARRAY_SIZE(oopstorages);
 101 
 102   while (true) {
 103     bool sensors_changed = false;
 104     bool has_jvmti_events = false;
 105     bool has_gc_notification_event = false;
 106     bool has_dcmd_notification_event = false;
 107     bool stringtable_work = false;
 108     bool symboltable_work = false;
 109     bool resolved_method_table_work = false;
 110     bool protection_domain_table_work = false;
 111     bool oopstorage_work = false;

 112     JvmtiDeferredEvent jvmti_event;
 113     {
 114       // Need state transition ThreadBlockInVM so that this thread
 115       // will be handled by safepoint correctly when this thread is
 116       // notified at a safepoint.
 117 
 118       // This ThreadBlockInVM object is not also considered to be
 119       // suspend-equivalent because ServiceThread is not visible to
 120       // external suspension.
 121 
 122       ThreadBlockInVM tbivm(jt);
 123 
 124       MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
 125       // Process all available work on each (outer) iteration, rather than
 126       // only the first recognized bit of work, to avoid frequently true early
 127       // tests from potentially starving later work.  Hence the use of
 128       // arithmetic-or to combine results; we don't want short-circuiting.
 129       while (((sensors_changed = LowMemoryDetector::has_pending_requests()) |
 130               (has_jvmti_events = JvmtiDeferredEventQueue::has_events()) |
 131               (has_gc_notification_event = GCNotifier::has_event()) |
 132               (has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) |
 133               (stringtable_work = StringTable::has_work()) |
 134               (symboltable_work = SymbolTable::has_work()) |
 135               (resolved_method_table_work = ResolvedMethodTable::has_work()) |
 136               (protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
 137               (oopstorage_work = OopStorage::has_cleanup_work_and_reset())

 138              ) == 0) {
 139         // Wait until notified that there is some work to do.
 140         ml.wait();



 141       }
 142 
 143       if (has_jvmti_events) {
 144         jvmti_event = JvmtiDeferredEventQueue::dequeue();
 145       }
 146     }
 147 
 148     if (stringtable_work) {
 149       StringTable::do_concurrent_work(jt);
 150     }
 151 
 152     if (symboltable_work) {
 153       SymbolTable::do_concurrent_work(jt);
 154     }
 155 
 156     if (has_jvmti_events) {
 157       jvmti_event.post();
 158     }
 159 
 160     if (sensors_changed) {


 162     }
 163 
 164     if(has_gc_notification_event) {
 165       GCNotifier::sendNotification(CHECK);
 166     }
 167 
 168     if(has_dcmd_notification_event) {
 169       DCmdFactory::send_notification(CHECK);
 170     }
 171 
 172     if (resolved_method_table_work) {
 173       ResolvedMethodTable::do_concurrent_work(jt);
 174     }
 175 
 176     if (protection_domain_table_work) {
 177       SystemDictionary::pd_cache_table()->unlink();
 178     }
 179 
 180     if (oopstorage_work) {
 181       cleanup_oopstorages(oopstorages, oopstorage_count);





















 182     }
 183   }
 184 }
 185 
 186 bool ServiceThread::is_service_thread(Thread* thread) {
 187   return thread == _instance;
 188 }


  92 void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
  93   OopStorage* const oopstorages[] = {
  94     JNIHandles::global_handles(),
  95     JNIHandles::weak_global_handles(),
  96     StringTable::weak_storage(),
  97     SystemDictionary::vm_global_oop_storage(),
  98     SystemDictionary::vm_weak_oop_storage()
  99   };
 100   const size_t oopstorage_count = ARRAY_SIZE(oopstorages);
 101 
 102   while (true) {
 103     bool sensors_changed = false;
 104     bool has_jvmti_events = false;
 105     bool has_gc_notification_event = false;
 106     bool has_dcmd_notification_event = false;
 107     bool stringtable_work = false;
 108     bool symboltable_work = false;
 109     bool resolved_method_table_work = false;
 110     bool protection_domain_table_work = false;
 111     bool oopstorage_work = false;
 112     bool deflate_idle_monitors = false;
 113     JvmtiDeferredEvent jvmti_event;
 114     {
 115       // Need state transition ThreadBlockInVM so that this thread
 116       // will be handled by safepoint correctly when this thread is
 117       // notified at a safepoint.
 118 
 119       // This ThreadBlockInVM object is not also considered to be
 120       // suspend-equivalent because ServiceThread is not visible to
 121       // external suspension.
 122 
 123       ThreadBlockInVM tbivm(jt);
 124 
 125       MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
 126       // Process all available work on each (outer) iteration, rather than
 127       // only the first recognized bit of work, to avoid frequently true early
 128       // tests from potentially starving later work.  Hence the use of
 129       // arithmetic-or to combine results; we don't want short-circuiting.
 130       while (((sensors_changed = LowMemoryDetector::has_pending_requests()) |
 131               (has_jvmti_events = JvmtiDeferredEventQueue::has_events()) |
 132               (has_gc_notification_event = GCNotifier::has_event()) |
 133               (has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) |
 134               (stringtable_work = StringTable::has_work()) |
 135               (symboltable_work = SymbolTable::has_work()) |
 136               (resolved_method_table_work = ResolvedMethodTable::has_work()) |
 137               (protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
 138               (oopstorage_work = OopStorage::has_cleanup_work_and_reset()) |
 139               (deflate_idle_monitors = ObjectSynchronizer::is_async_deflation_needed())
 140              ) == 0) {
 141         // Wait until notified that there is some work to do.
 142         // If AsyncDeflateIdleMonitors, then we wait for
 143         // GuaranteedSafepointInterval so that is_async_deflation_needed()
 144         // is checked at the same interval.
 145         ml.wait(AsyncDeflateIdleMonitors ? GuaranteedSafepointInterval : 0);
 146       }
 147 
 148       if (has_jvmti_events) {
 149         jvmti_event = JvmtiDeferredEventQueue::dequeue();
 150       }
 151     }
 152 
 153     if (stringtable_work) {
 154       StringTable::do_concurrent_work(jt);
 155     }
 156 
 157     if (symboltable_work) {
 158       SymbolTable::do_concurrent_work(jt);
 159     }
 160 
 161     if (has_jvmti_events) {
 162       jvmti_event.post();
 163     }
 164 
 165     if (sensors_changed) {


 167     }
 168 
 169     if(has_gc_notification_event) {
 170       GCNotifier::sendNotification(CHECK);
 171     }
 172 
 173     if(has_dcmd_notification_event) {
 174       DCmdFactory::send_notification(CHECK);
 175     }
 176 
 177     if (resolved_method_table_work) {
 178       ResolvedMethodTable::do_concurrent_work(jt);
 179     }
 180 
 181     if (protection_domain_table_work) {
 182       SystemDictionary::pd_cache_table()->unlink();
 183     }
 184 
 185     if (oopstorage_work) {
 186       cleanup_oopstorages(oopstorages, oopstorage_count);
 187     }
 188 
 189     if (deflate_idle_monitors) {
 190       // Deflate any global idle monitors.
 191       ObjectSynchronizer::deflate_global_idle_monitors_using_JT();
 192 
 193       int count = 0;
 194       for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
 195         if (jt->om_in_use_count > 0 && !jt->is_exiting()) {
 196           // This JavaThread is using ObjectMonitors so deflate any that
 197           // are idle unless this JavaThread is exiting; do not race with
 198           // ObjectSynchronizer::om_flush().
 199           ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(jt);
 200           count++;
 201         }
 202       }
 203       if (count > 0) {
 204         log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
 205       }
 206       // The ServiceThread's async deflation request has been processed.
 207       ObjectSynchronizer::set_is_async_deflation_requested(false);
 208     }
 209   }
 210 }
 211 
 212 bool ServiceThread::is_service_thread(Thread* thread) {
 213   return thread == _instance;
 214 }
< prev index next >