1 /* 2 * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_PARALLEL_GCTASKMANAGER_HPP 26 #define SHARE_VM_GC_PARALLEL_GCTASKMANAGER_HPP 27 28 #include "runtime/mutex.hpp" 29 #include "utilities/growableArray.hpp" 30 31 // 32 // The GCTaskManager is a queue of GCTasks, and accessors 33 // to allow the queue to be accessed from many threads. 34 // 35 36 // Forward declarations of types defined in this file. 37 class GCTask; 38 class GCTaskQueue; 39 class SynchronizedGCTaskQueue; 40 class GCTaskManager; 41 // Some useful subclasses of GCTask. You can also make up your own. 42 class NoopGCTask; 43 class WaitForBarrierGCTask; 44 class IdleGCTask; 45 // A free list of Monitor*'s. 46 class MonitorSupply; 47 48 // Forward declarations of classes referenced in this file via pointer. 49 class GCTaskThread; 50 class Mutex; 51 class Monitor; 52 class ThreadClosure; 53 54 // The abstract base GCTask. 55 class GCTask : public ResourceObj { 56 public: 57 // Known kinds of GCTasks, for predicates. 58 class Kind : AllStatic { 59 public: 60 enum kind { 61 unknown_task, 62 ordinary_task, 63 wait_for_barrier_task, 64 noop_task, 65 idle_task 66 }; 67 static const char* to_string(kind value); 68 }; 69 private: 70 // Instance state. 71 Kind::kind _kind; // For runtime type checking. 72 uint _affinity; // Which worker should run task. 73 GCTask* _newer; // Tasks are on doubly-linked ... 74 GCTask* _older; // ... lists. 75 uint _gc_id; // GC Id to use for the thread that executes this task 76 public: 77 virtual char* name() { return (char *)"task"; } 78 79 uint gc_id() { return _gc_id; } 80 81 // Abstract do_it method 82 virtual void do_it(GCTaskManager* manager, uint which) = 0; 83 // Accessors 84 Kind::kind kind() const { 85 return _kind; 86 } 87 uint affinity() const { 88 return _affinity; 89 } 90 GCTask* newer() const { 91 return _newer; 92 } 93 void set_newer(GCTask* n) { 94 _newer = n; 95 } 96 GCTask* older() const { 97 return _older; 98 } 99 void set_older(GCTask* p) { 100 _older = p; 101 } 102 // Predicates. 103 bool is_ordinary_task() const { 104 return kind()==Kind::ordinary_task; 105 } 106 bool is_barrier_task() const { 107 return kind()==Kind::wait_for_barrier_task; 108 } 109 bool is_noop_task() const { 110 return kind()==Kind::noop_task; 111 } 112 bool is_idle_task() const { 113 return kind()==Kind::idle_task; 114 } 115 void print(const char* message) const PRODUCT_RETURN; 116 protected: 117 // Constructors: Only create subclasses. 118 // An ordinary GCTask. 119 GCTask(); 120 // A GCTask of a particular kind, usually barrier or noop. 121 GCTask(Kind::kind kind); 122 GCTask(Kind::kind kind, uint gc_id); 123 // We want a virtual destructor because virtual methods, 124 // but since ResourceObj's don't have their destructors 125 // called, we don't have one at all. Instead we have 126 // this method, which gets called by subclasses to clean up. 127 virtual void destruct(); 128 // Methods. 129 void initialize(Kind::kind kind, uint gc_id); 130 }; 131 132 // A doubly-linked list of GCTasks. 133 // The list is not synchronized, because sometimes we want to 134 // build up a list and then make it available to other threads. 135 // See also: SynchronizedGCTaskQueue. 136 class GCTaskQueue : public ResourceObj { 137 private: 138 // Instance state. 139 GCTask* _insert_end; // Tasks are enqueued at this end. 140 GCTask* _remove_end; // Tasks are dequeued from this end. 141 uint _length; // The current length of the queue. 142 const bool _is_c_heap_obj; // Is this a CHeapObj? 143 public: 144 // Factory create and destroy methods. 145 // Create as ResourceObj. 146 static GCTaskQueue* create(); 147 // Create as CHeapObj. 148 static GCTaskQueue* create_on_c_heap(); 149 // Destroyer. 150 static void destroy(GCTaskQueue* that); 151 // Accessors. 152 // These just examine the state of the queue. 153 bool is_empty() const { 154 assert(((insert_end() == NULL && remove_end() == NULL) || 155 (insert_end() != NULL && remove_end() != NULL)), 156 "insert_end and remove_end don't match"); 157 assert((insert_end() != NULL) || (_length == 0), "Not empty"); 158 return insert_end() == NULL; 159 } 160 uint length() const { 161 return _length; 162 } 163 // Methods. 164 // Enqueue one task. 165 void enqueue(GCTask* task); 166 // Enqueue a list of tasks. Empties the argument list. 167 void enqueue(GCTaskQueue* list); 168 // Dequeue one task. 169 GCTask* dequeue(); 170 // Dequeue one task, preferring one with affinity. 171 GCTask* dequeue(uint affinity); 172 protected: 173 // Constructor. Clients use factory, but there might be subclasses. 174 GCTaskQueue(bool on_c_heap); 175 // Destructor-like method. 176 // Because ResourceMark doesn't call destructors. 177 // This method cleans up like one. 178 virtual void destruct(); 179 // Accessors. 180 GCTask* insert_end() const { 181 return _insert_end; 182 } 183 void set_insert_end(GCTask* value) { 184 _insert_end = value; 185 } 186 GCTask* remove_end() const { 187 return _remove_end; 188 } 189 void set_remove_end(GCTask* value) { 190 _remove_end = value; 191 } 192 void increment_length() { 193 _length += 1; 194 } 195 void decrement_length() { 196 _length -= 1; 197 } 198 void set_length(uint value) { 199 _length = value; 200 } 201 bool is_c_heap_obj() const { 202 return _is_c_heap_obj; 203 } 204 // Methods. 205 void initialize(); 206 GCTask* remove(); // Remove from remove end. 207 GCTask* remove(GCTask* task); // Remove from the middle. 208 void print(const char* message) const PRODUCT_RETURN; 209 // Debug support 210 void verify_length() const PRODUCT_RETURN; 211 }; 212 213 // A GCTaskQueue that can be synchronized. 214 // This "has-a" GCTaskQueue and a mutex to do the exclusion. 215 class SynchronizedGCTaskQueue : public CHeapObj<mtGC> { 216 private: 217 // Instance state. 218 GCTaskQueue* _unsynchronized_queue; // Has-a unsynchronized queue. 219 Monitor * _lock; // Lock to control access. 220 public: 221 // Factory create and destroy methods. 222 static SynchronizedGCTaskQueue* create(GCTaskQueue* queue, Monitor * lock) { 223 return new SynchronizedGCTaskQueue(queue, lock); 224 } 225 static void destroy(SynchronizedGCTaskQueue* that) { 226 if (that != NULL) { 227 delete that; 228 } 229 } 230 // Accessors 231 GCTaskQueue* unsynchronized_queue() const { 232 return _unsynchronized_queue; 233 } 234 Monitor * lock() const { 235 return _lock; 236 } 237 // GCTaskQueue wrapper methods. 238 // These check that you hold the lock 239 // and then call the method on the queue. 240 bool is_empty() const { 241 guarantee(own_lock(), "don't own the lock"); 242 return unsynchronized_queue()->is_empty(); 243 } 244 void enqueue(GCTask* task) { 245 guarantee(own_lock(), "don't own the lock"); 246 unsynchronized_queue()->enqueue(task); 247 } 248 void enqueue(GCTaskQueue* list) { 249 guarantee(own_lock(), "don't own the lock"); 250 unsynchronized_queue()->enqueue(list); 251 } 252 GCTask* dequeue() { 253 guarantee(own_lock(), "don't own the lock"); 254 return unsynchronized_queue()->dequeue(); 255 } 256 GCTask* dequeue(uint affinity) { 257 guarantee(own_lock(), "don't own the lock"); 258 return unsynchronized_queue()->dequeue(affinity); 259 } 260 uint length() const { 261 guarantee(own_lock(), "don't own the lock"); 262 return unsynchronized_queue()->length(); 263 } 264 // For guarantees. 265 bool own_lock() const { 266 return lock()->owned_by_self(); 267 } 268 protected: 269 // Constructor. Clients use factory, but there might be subclasses. 270 SynchronizedGCTaskQueue(GCTaskQueue* queue, Monitor * lock); 271 // Destructor. Not virtual because no virtuals. 272 ~SynchronizedGCTaskQueue(); 273 }; 274 275 class WaitHelper VALUE_OBJ_CLASS_SPEC { 276 private: 277 Monitor* _monitor; 278 volatile bool _should_wait; 279 public: 280 WaitHelper(); 281 ~WaitHelper(); 282 void wait_for(bool reset); 283 void notify(); 284 void set_should_wait(bool value) { 285 _should_wait = value; 286 } 287 288 Monitor* monitor() const { 289 return _monitor; 290 } 291 bool should_wait() const { 292 return _should_wait; 293 } 294 void release_monitor(); 295 }; 296 297 // Dynamic number of GC threads 298 // 299 // GC threads wait in get_task() for work (i.e., a task) to perform. 300 // When the number of GC threads was static, the number of tasks 301 // created to do a job was equal to or greater than the maximum 302 // number of GC threads (ParallelGCThreads). The job might be divided 303 // into a number of tasks greater than the number of GC threads for 304 // load balancing (i.e., over partitioning). The last task to be 305 // executed by a GC thread in a job is a work stealing task. A 306 // GC thread that gets a work stealing task continues to execute 307 // that task until the job is done. In the static number of GC threads 308 // case, tasks are added to a queue (FIFO). The work stealing tasks are 309 // the last to be added. Once the tasks are added, the GC threads grab 310 // a task and go. A single thread can do all the non-work stealing tasks 311 // and then execute a work stealing and wait for all the other GC threads 312 // to execute their work stealing task. 313 // In the dynamic number of GC threads implementation, idle-tasks are 314 // created to occupy the non-participating or "inactive" threads. An 315 // idle-task makes the GC thread wait on a barrier that is part of the 316 // GCTaskManager. The GC threads that have been "idled" in a IdleGCTask 317 // are released once all the active GC threads have finished their work 318 // stealing tasks. The GCTaskManager does not wait for all the "idled" 319 // GC threads to resume execution. When those GC threads do resume 320 // execution in the course of the thread scheduling, they call get_tasks() 321 // as all the other GC threads do. Because all the "idled" threads are 322 // not required to execute in order to finish a job, it is possible for 323 // a GC thread to still be "idled" when the next job is started. Such 324 // a thread stays "idled" for the next job. This can result in a new 325 // job not having all the expected active workers. For example if on 326 // job requests 4 active workers out of a total of 10 workers so the 327 // remaining 6 are "idled", if the next job requests 6 active workers 328 // but all 6 of the "idled" workers are still idle, then the next job 329 // will only get 4 active workers. 330 // The implementation for the parallel old compaction phase has an 331 // added complication. In the static case parold partitions the chunks 332 // ready to be filled into stacks, one for each GC thread. A GC thread 333 // executing a draining task (drains the stack of ready chunks) 334 // claims a stack according to it's id (the unique ordinal value assigned 335 // to each GC thread). In the dynamic case not all GC threads will 336 // actively participate so stacks with ready to fill chunks can only be 337 // given to the active threads. An initial implementation chose stacks 338 // number 1-n to get the ready chunks and required that GC threads 339 // 1-n be the active workers. This was undesirable because it required 340 // certain threads to participate. In the final implementation a 341 // list of stacks equal in number to the active workers are filled 342 // with ready chunks. GC threads that participate get a stack from 343 // the task (DrainStacksCompactionTask), empty the stack, and then add it to a 344 // recycling list at the end of the task. If the same GC thread gets 345 // a second task, it gets a second stack to drain and returns it. The 346 // stacks are added to a recycling list so that later stealing tasks 347 // for this tasks can get a stack from the recycling list. Stealing tasks 348 // use the stacks in its work in a way similar to the draining tasks. 349 // A thread is not guaranteed to get anything but a stealing task and 350 // a thread that only gets a stealing task has to get a stack. A failed 351 // implementation tried to have the GC threads keep the stack they used 352 // during a draining task for later use in the stealing task but that didn't 353 // work because as noted a thread is not guaranteed to get a draining task. 354 // 355 // For PSScavenge and ParCompactionManager the GC threads are 356 // held in the GCTaskThread** _thread array in GCTaskManager. 357 358 359 class GCTaskManager : public CHeapObj<mtGC> { 360 friend class ParCompactionManager; 361 friend class PSParallelCompact; 362 friend class PSScavenge; 363 friend class PSRefProcTaskExecutor; 364 friend class RefProcTaskExecutor; 365 friend class GCTaskThread; 366 friend class IdleGCTask; 367 private: 368 // Instance state. 369 const uint _workers; // Number of workers. 370 Monitor* _monitor; // Notification of changes. 371 SynchronizedGCTaskQueue* _queue; // Queue of tasks. 372 GCTaskThread** _thread; // Array of worker threads. 373 uint _created_workers; // Number of workers created. 374 uint _active_workers; // Number of active workers. 375 uint _busy_workers; // Number of busy workers. 376 uint _blocking_worker; // The worker that's blocking. 377 bool* _resource_flag; // Array of flag per threads. 378 uint _delivered_tasks; // Count of delivered tasks. 379 uint _completed_tasks; // Count of completed tasks. 380 uint _barriers; // Count of barrier tasks. 381 uint _emptied_queue; // Times we emptied the queue. 382 NoopGCTask* _noop_task; // The NoopGCTask instance. 383 WaitHelper _wait_helper; // Used by inactive worker 384 volatile uint _idle_workers; // Number of idled workers 385 uint* _processor_assignment; // Worker to cpu mappings. May 386 // be used lazily 387 public: 388 // Factory create and destroy methods. 389 static GCTaskManager* create(uint workers) { 390 return new GCTaskManager(workers); 391 } 392 static void destroy(GCTaskManager* that) { 393 if (that != NULL) { 394 delete that; 395 } 396 } 397 // Accessors. 398 uint busy_workers() const { 399 return _busy_workers; 400 } 401 volatile uint idle_workers() const { 402 return _idle_workers; 403 } 404 // Pun between Monitor* and Mutex* 405 Monitor* monitor() const { 406 return _monitor; 407 } 408 Monitor * lock() const { 409 return _monitor; 410 } 411 WaitHelper* wait_helper() { 412 return &_wait_helper; 413 } 414 // Methods. 415 // Add the argument task to be run. 416 void add_task(GCTask* task); 417 // Add a list of tasks. Removes task from the argument list. 418 void add_list(GCTaskQueue* list); 419 // Claim a task for argument worker. 420 GCTask* get_task(uint which); 421 // Note the completion of a task by the argument worker. 422 void note_completion(uint which); 423 // Is the queue blocked from handing out new tasks? 424 bool is_blocked() const { 425 return (blocking_worker() != sentinel_worker()); 426 } 427 // Request that all workers release their resources. 428 void release_all_resources(); 429 // Ask if a particular worker should release its resources. 430 bool should_release_resources(uint which); // Predicate. 431 // Note the release of resources by the argument worker. 432 void note_release(uint which); 433 // Create IdleGCTasks for inactive workers and start workers 434 void task_idle_workers(); 435 // Release the workers in IdleGCTasks 436 void release_idle_workers(); 437 // Constants. 438 // A sentinel worker identifier. 439 static uint sentinel_worker() { 440 return (uint) -1; // Why isn't there a max_uint? 441 } 442 443 // Execute the task queue and wait for the completion. 444 void execute_and_wait(GCTaskQueue* list); 445 446 void print_task_time_stamps(); 447 void print_threads_on(outputStream* st); 448 void threads_do(ThreadClosure* tc); 449 450 protected: 451 // Constructors. Clients use factory, but there might be subclasses. 452 // Create a GCTaskManager with the appropriate number of workers. 453 GCTaskManager(uint workers); 454 // Make virtual if necessary. 455 ~GCTaskManager(); 456 // Accessors. 457 uint workers() const { 458 return _workers; 459 } 460 void set_active_workers(uint v) { 461 assert(v <= _workers, "Trying to set more workers active than there are"); 462 _active_workers = MIN2(v, _workers); 463 assert(v != 0, "Trying to set active workers to 0"); 464 _active_workers = MAX2(1U, _active_workers); 465 } 466 // Sets the number of threads that will be used in a collection 467 void set_active_gang(); 468 469 SynchronizedGCTaskQueue* queue() const { 470 return _queue; 471 } 472 NoopGCTask* noop_task() const { 473 return _noop_task; 474 } 475 // Bounds-checking per-thread data accessors. 476 GCTaskThread* thread(uint which); 477 void set_thread(uint which, GCTaskThread* value); 478 bool resource_flag(uint which); 479 void set_resource_flag(uint which, bool value); 480 // Modifier methods with some semantics. 481 // Is any worker blocking handing out new tasks? 482 uint blocking_worker() const { 483 return _blocking_worker; 484 } 485 void set_blocking_worker(uint value) { 486 _blocking_worker = value; 487 } 488 void set_unblocked() { 489 set_blocking_worker(sentinel_worker()); 490 } 491 // Count of busy workers. 492 void reset_busy_workers() { 493 _busy_workers = 0; 494 } 495 uint increment_busy_workers(); 496 uint decrement_busy_workers(); 497 // Count of tasks delivered to workers. 498 uint delivered_tasks() const { 499 return _delivered_tasks; 500 } 501 void increment_delivered_tasks() { 502 _delivered_tasks += 1; 503 } 504 void reset_delivered_tasks() { 505 _delivered_tasks = 0; 506 } 507 // Count of tasks completed by workers. 508 uint completed_tasks() const { 509 return _completed_tasks; 510 } 511 void increment_completed_tasks() { 512 _completed_tasks += 1; 513 } 514 void reset_completed_tasks() { 515 _completed_tasks = 0; 516 } 517 // Count of barrier tasks completed. 518 uint barriers() const { 519 return _barriers; 520 } 521 void increment_barriers() { 522 _barriers += 1; 523 } 524 void reset_barriers() { 525 _barriers = 0; 526 } 527 // Count of how many times the queue has emptied. 528 uint emptied_queue() const { 529 return _emptied_queue; 530 } 531 void increment_emptied_queue() { 532 _emptied_queue += 1; 533 } 534 void reset_emptied_queue() { 535 _emptied_queue = 0; 536 } 537 void increment_idle_workers() { 538 _idle_workers++; 539 } 540 void decrement_idle_workers() { 541 _idle_workers--; 542 } 543 // Other methods. 544 void initialize(); 545 546 public: 547 // Return true if all workers are currently active. 548 bool all_workers_active() { return workers() == active_workers(); } 549 uint active_workers() const { 550 return _active_workers; 551 } 552 uint created_workers() const { 553 return _created_workers; 554 } 555 // Create a GC worker and install into GCTaskManager 556 GCTaskThread* install_worker(uint worker_id); 557 // Add GC workers as needed. 558 void add_workers(bool initializing); 559 }; 560 561 // 562 // Some exemplary GCTasks. 563 // 564 565 // A noop task that does nothing, 566 // except take us around the GCTaskThread loop. 567 class NoopGCTask : public GCTask { 568 public: 569 // Factory create and destroy methods. 570 static NoopGCTask* create_on_c_heap(); 571 static void destroy(NoopGCTask* that); 572 573 virtual char* name() { return (char *)"noop task"; } 574 // Methods from GCTask. 575 void do_it(GCTaskManager* manager, uint which) { 576 // Nothing to do. 577 } 578 protected: 579 // Constructor. 580 NoopGCTask(); 581 // Destructor-like method. 582 void destruct(); 583 }; 584 585 // A WaitForBarrierGCTask is a GCTask 586 // with a method you can call to wait until 587 // the BarrierGCTask is done. 588 class WaitForBarrierGCTask : public GCTask { 589 friend class GCTaskManager; 590 friend class IdleGCTask; 591 private: 592 // Instance state. 593 WaitHelper _wait_helper; 594 WaitForBarrierGCTask(); 595 public: 596 virtual char* name() { return (char *) "waitfor-barrier-task"; } 597 598 // Factory create and destroy methods. 599 static WaitForBarrierGCTask* create(); 600 static void destroy(WaitForBarrierGCTask* that); 601 // Methods. 602 void do_it(GCTaskManager* manager, uint which); 603 protected: 604 // Destructor-like method. 605 void destruct(); 606 607 // Methods. 608 // Wait for this to be the only task running. 609 void do_it_internal(GCTaskManager* manager, uint which); 610 611 void wait_for(bool reset) { 612 _wait_helper.wait_for(reset); 613 } 614 }; 615 616 // Task that is used to idle a GC task when fewer than 617 // the maximum workers are wanted. 618 class IdleGCTask : public GCTask { 619 const bool _is_c_heap_obj; // Was allocated on the heap. 620 public: 621 bool is_c_heap_obj() { 622 return _is_c_heap_obj; 623 } 624 // Factory create and destroy methods. 625 static IdleGCTask* create(); 626 static IdleGCTask* create_on_c_heap(); 627 static void destroy(IdleGCTask* that); 628 629 virtual char* name() { return (char *)"idle task"; } 630 // Methods from GCTask. 631 virtual void do_it(GCTaskManager* manager, uint which); 632 protected: 633 // Constructor. 634 IdleGCTask(bool on_c_heap) : 635 GCTask(GCTask::Kind::idle_task), 636 _is_c_heap_obj(on_c_heap) { 637 // Nothing to do. 638 } 639 // Destructor-like method. 640 void destruct(); 641 }; 642 643 class MonitorSupply : public AllStatic { 644 private: 645 // State. 646 // Control multi-threaded access. 647 static Mutex* _lock; 648 // The list of available Monitor*'s. 649 static GrowableArray<Monitor*>* _freelist; 650 public: 651 // Reserve a Monitor*. 652 static Monitor* reserve(); 653 // Release a Monitor*. 654 static void release(Monitor* instance); 655 private: 656 // Accessors. 657 static Mutex* lock() { 658 return _lock; 659 } 660 static GrowableArray<Monitor*>* freelist() { 661 return _freelist; 662 } 663 }; 664 665 #endif // SHARE_VM_GC_PARALLEL_GCTASKMANAGER_HPP