1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "code/codeCache.hpp"
 26 #include "gc/shared/oopStorage.hpp"
 27 #include "gc/z/zAddress.hpp"
 28 #include "gc/z/zHeap.inline.hpp"
 29 #include "gc/z/zHeapIterator.hpp"
 30 #include "gc/z/zList.inline.hpp"
 31 #include "gc/z/zLock.inline.hpp"
 32 #include "gc/z/zMark.inline.hpp"
 33 #include "gc/z/zNMethodTable.hpp"
 34 #include "gc/z/zOopClosures.inline.hpp"
 35 #include "gc/z/zPage.inline.hpp"
 36 #include "gc/z/zPageTable.inline.hpp"
 37 #include "gc/z/zRelocationSet.inline.hpp"
 38 #include "gc/z/zResurrection.hpp"
 39 #include "gc/z/zRootsIterator.hpp"
 40 #include "gc/z/zStat.hpp"
 41 #include "gc/z/zTask.hpp"
 42 #include "gc/z/zThread.hpp"
 43 #include "gc/z/zTracer.inline.hpp"
 44 #include "gc/z/zVirtualMemory.inline.hpp"
 45 #include "gc/z/zWorkers.inline.hpp"
 46 #include "logging/log.hpp"
 47 #include "memory/resourceArea.hpp"
 48 #include "oops/oop.inline.hpp"
 49 #include "runtime/safepoint.hpp"
 50 #include "runtime/thread.hpp"
 51 #include "utilities/align.hpp"
 52 #include "utilities/debug.hpp"
 53 
 54 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
 55 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
 56 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
 57 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
 58 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
 59 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
 60 
 61 ZHeap* ZHeap::_heap = NULL;
 62 
 63 ZHeap::ZHeap() :
 64     _workers(),
 65     _object_allocator(_workers.nworkers()),
 66     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
 67     _pagetable(),
 68     _mark(&_workers, &_pagetable),
 69     _reference_processor(&_workers),
 70     _weak_roots_processor(&_workers),
 71     _relocate(&_workers),
 72     _relocation_set(),
 73     _serviceability(heap_min_size(), heap_max_size()) {
 74   // Install global heap instance
 75   assert(_heap == NULL, "Already initialized");
 76   _heap = this;
 77 
 78   // Update statistics
 79   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
 80 }
 81 
 82 size_t ZHeap::heap_min_size() const {
 83   const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
 84   return MIN2(aligned_min_size, heap_max_size());
 85 }
 86 
 87 size_t ZHeap::heap_max_size() const {
 88   const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
 89   return MIN2(aligned_max_size, ZAddressOffsetMax);
 90 }
 91 
 92 size_t ZHeap::heap_max_reserve_size() const {
 93   // Reserve one small page per worker plus one shared medium page. This is still just
 94   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
 95   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
 96   return MIN2(max_reserve_size, heap_max_size());
 97 }
 98 
 99 bool ZHeap::is_initialized() const {
100   return _page_allocator.is_initialized() && _mark.is_initialized();
101 }
102 
103 size_t ZHeap::min_capacity() const {
104   return heap_min_size();
105 }
106 
107 size_t ZHeap::max_capacity() const {
108   return _page_allocator.max_capacity();
109 }
110 
111 size_t ZHeap::current_max_capacity() const {
112   return _page_allocator.current_max_capacity();
113 }
114 
115 size_t ZHeap::capacity() const {
116   return _page_allocator.capacity();
117 }
118 
119 size_t ZHeap::max_reserve() const {
120   return _page_allocator.max_reserve();
121 }
122 
123 size_t ZHeap::used_high() const {
124   return _page_allocator.used_high();
125 }
126 
127 size_t ZHeap::used_low() const {
128   return _page_allocator.used_low();
129 }
130 
131 size_t ZHeap::used() const {
132   return _page_allocator.used();
133 }
134 
135 size_t ZHeap::allocated() const {
136   return _page_allocator.allocated();
137 }
138 
139 size_t ZHeap::reclaimed() const {
140   return _page_allocator.reclaimed();
141 }
142 
143 size_t ZHeap::tlab_capacity() const {
144   return capacity();
145 }
146 
147 size_t ZHeap::tlab_used() const {
148   return _object_allocator.used();
149 }
150 
151 size_t ZHeap::max_tlab_size() const {
152   return ZObjectSizeLimitSmall;
153 }
154 
155 size_t ZHeap::unsafe_max_tlab_alloc() const {
156   size_t size = _object_allocator.remaining();
157 
158   if (size < MinTLABSize) {
159     // The remaining space in the allocator is not enough to
160     // fit the smallest possible TLAB. This means that the next
161     // TLAB allocation will force the allocator to get a new
162     // backing page anyway, which in turn means that we can then
163     // fit the largest possible TLAB.
164     size = max_tlab_size();
165   }
166 
167   return MIN2(size, max_tlab_size());
168 }
169 
170 bool ZHeap::is_in(uintptr_t addr) const {
171   if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
172     return false;
173   }
174 
175   const ZPage* const page = _pagetable.get(addr);
176   if (page != NULL) {
177     return page->is_in(addr);
178   }
179 
180   return false;
181 }
182 
183 uintptr_t ZHeap::block_start(uintptr_t addr) const {
184   const ZPage* const page = _pagetable.get(addr);
185   return page->block_start(addr);
186 }
187 
188 size_t ZHeap::block_size(uintptr_t addr) const {
189   const ZPage* const page = _pagetable.get(addr);
190   return page->block_size(addr);
191 }
192 
193 bool ZHeap::block_is_obj(uintptr_t addr) const {
194   const ZPage* const page = _pagetable.get(addr);
195   return page->block_is_obj(addr);
196 }
197 
198 uint ZHeap::nconcurrent_worker_threads() const {
199   return _workers.nconcurrent();
200 }
201 
202 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
203   return _workers.nconcurrent_no_boost();
204 }
205 
206 void ZHeap::set_boost_worker_threads(bool boost) {
207   _workers.set_boost(boost);
208 }
209 
210 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
211   _workers.threads_do(tc);
212 }
213 
214 void ZHeap::print_worker_threads_on(outputStream* st) const {
215   _workers.print_threads_on(st);
216 }
217 
218 void ZHeap::out_of_memory() {
219   ResourceMark rm;
220 
221   ZStatInc(ZCounterOutOfMemory);
222   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
223 }
224 
225 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
226   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
227   if (page != NULL) {
228     // Update pagetable
229     _pagetable.insert(page);
230   }
231 
232   return page;
233 }
234 
235 void ZHeap::undo_alloc_page(ZPage* page) {
236   assert(page->is_allocating(), "Invalid page state");
237 
238   ZStatInc(ZCounterUndoPageAllocation);
239   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
240                 ZThread::id(), ZThread::name(), p2i(page), page->size());
241 
242   release_page(page, false /* reclaimed */);
243 }
244 
245 bool ZHeap::retain_page(ZPage* page) {
246   return page->inc_refcount();
247 }
248 
249 void ZHeap::release_page(ZPage* page, bool reclaimed) {
250   if (page->dec_refcount()) {
251     _page_allocator.free_page(page, reclaimed);
252   }
253 }
254 
255 void ZHeap::flip_views() {
256   // For debugging only
257   if (ZUnmapBadViews) {
258     // Flip pages
259     ZPageTableIterator iter(&_pagetable);
260     for (ZPage* page; iter.next(&page);) {
261       if (!page->is_detached()) {
262         _page_allocator.flip_page(page);
263       }
264     }
265 
266     // Flip pre-mapped memory
267     _page_allocator.flip_pre_mapped();
268   }
269 }
270 
271 void ZHeap::mark_start() {
272   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
273 
274   // Update statistics
275   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
276 
277   // Retire TLABs
278   _object_allocator.retire_tlabs();
279 
280   // Flip address view
281   ZAddressMasks::flip_to_marked();
282   flip_views();
283 
284   // Reset allocated/reclaimed/used statistics
285   _page_allocator.reset_statistics();
286 
287   // Reset encountered/dropped/enqueued statistics
288   _reference_processor.reset_statistics();
289 
290   // Enter mark phase
291   ZGlobalPhase = ZPhaseMark;
292 
293   // Reset marking information and mark roots
294   _mark.start();
295 
296   // Update statistics
297   ZStatHeap::set_at_mark_start(capacity(), used());
298 }
299 
300 void ZHeap::mark() {
301   _mark.mark();
302 }
303 
304 void ZHeap::mark_flush_and_free(Thread* thread) {
305   _mark.flush_and_free(thread);
306 }
307 
308 class ZFixupPartialLoadsTask : public ZTask {
309 private:
310   ZThreadRootsIterator _thread_roots;
311 
312 public:
313   ZFixupPartialLoadsTask() :
314       ZTask("ZFixupPartialLoadsTask"),
315       _thread_roots() {}
316 
317   virtual void work() {
318     ZMarkRootOopClosure cl;
319     _thread_roots.oops_do(&cl);
320   }
321 };
322 
323 void ZHeap::fixup_partial_loads() {
324   ZFixupPartialLoadsTask task;
325   _workers.run_parallel(&task);
326 }
327 
328 bool ZHeap::mark_end() {
329   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
330 
331   // C2 can generate code where a safepoint poll is inserted
332   // between a load and the associated load barrier. To handle
333   // this case we need to rescan the thread stack here to make
334   // sure such oops are marked.
335   fixup_partial_loads();
336 
337   // Try end marking
338   if (!_mark.end()) {
339     // Marking not completed, continue concurrent mark
340     return false;
341   }
342 
343   // Enter mark completed phase
344   ZGlobalPhase = ZPhaseMarkCompleted;
345 
346   // Update statistics
347   ZStatSample(ZSamplerHeapUsedAfterMark, used());
348   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
349 
350   // Block resurrection of weak/phantom references
351   ZResurrection::block();
352 
353   // Process weak roots
354   _weak_roots_processor.process_weak_roots();
355 
356   if (ClassUnloading) {
357     CodeCache::increment_unloading_cycle();
358   }
359 
360   // Verification
361   if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
362     Universe::verify();
363   }
364 
365   return true;
366 }
367 
368 void ZHeap::set_soft_reference_policy(bool clear) {
369   _reference_processor.set_soft_reference_policy(clear);
370 }
371 
372 class ZNoOpHandshakeClosure : public ThreadClosure {
373 public:
374   void do_thread(Thread* thread) { }
375 };
376 
377 void ZHeap::mutator_rendezvous() {
378   ZNoOpHandshakeClosure cl;
379   Handshake::execute(&cl);
380 }
381 
382 void ZHeap::process_non_strong_references() {
383   // Process Soft/Weak/Final/PhantomReferences
384   _reference_processor.process_references();
385 
386   // Process concurrent weak roots
387   _weak_roots_processor.process_concurrent_weak_roots();
388 
389   if (ClassUnloading) {
390     {
391       // 1. This is the unlinking phase; remove references to stale metadata and nmethods
392       ZPhantomIsAliveObjectClosure is_alive;
393 
394       // Unlink the classes.
395       bool unloading_occurred = SystemDictionary::do_unloading(ZStatPhase::timer(), true /* do_cleaning */);
396 
397       // Unload the nmethods.
398       ZNMethodTable::do_unloading(&_workers, &is_alive, unloading_occurred);
399 
400       // Unlink dead klasses from subklass/sibling/implementor lists.
401       Klass::clean_weak_klass_links(unloading_occurred);
402     }
403 
404     // Make sure the old links are no longer observable before purging
405     mutator_rendezvous();
406 
407     {
408       // 2. This is the purging phase; delete the stale metadata that was unlinked
409 
410       // Purge the metaspace
411       ClassLoaderDataGraph::purge();
412       // MetaspaceUtils::verify_metrics();
413       // Resize metaspace
414       MetaspaceGC::compute_new_size();
415     }
416   }
417 
418   // Unblock resurrection of weak/phantom references
419   ZResurrection::unblock();
420 
421   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
422   // must be done after unblocking resurrection. Otherwise the
423   // Finalizer thread could call Reference.get() on the Finalizers
424   // that were just enqueued, which would incorrectly return null
425   // during the resurrection block window, since such referents
426   // are only Finalizable marked.
427   _reference_processor.enqueue_references();
428 }
429 
430 void ZHeap::destroy_detached_pages() {
431   ZList<ZPage> list;
432 
433   _page_allocator.flush_detached_pages(&list);
434 
435   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
436     // Remove pagetable entry
437     _pagetable.remove(page);
438 
439     // Delete the page
440     _page_allocator.destroy_page(page);
441   }
442 }
443 
444 void ZHeap::select_relocation_set() {
445   // Register relocatable pages with selector
446   ZRelocationSetSelector selector;
447   ZPageTableIterator iter(&_pagetable);
448   for (ZPage* page; iter.next(&page);) {
449     if (!page->is_relocatable()) {
450       // Not relocatable, don't register
451       continue;
452     }
453 
454     if (page->is_marked()) {
455       // Register live page
456       selector.register_live_page(page);
457     } else {
458       // Register garbage page
459       selector.register_garbage_page(page);
460 
461       // Reclaim page immediately
462       release_page(page, true /* reclaimed */);
463     }
464   }
465 
466   // Select pages to relocate
467   selector.select(&_relocation_set);
468 
469   // Update statistics
470   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
471   ZStatHeap::set_at_select_relocation_set(selector.live(),
472                                           selector.garbage(),
473                                           reclaimed());
474 }
475 
476 void ZHeap::prepare_relocation_set() {
477   ZRelocationSetIterator iter(&_relocation_set);
478   for (ZPage* page; iter.next(&page);) {
479     // Prepare for relocation
480     page->set_forwarding();
481 
482     // Update pagetable
483     _pagetable.set_relocating(page);
484   }
485 }
486 
487 void ZHeap::reset_relocation_set() {
488   ZRelocationSetIterator iter(&_relocation_set);
489   for (ZPage* page; iter.next(&page);) {
490     // Reset relocation information
491     page->reset_forwarding();
492 
493     // Update pagetable
494     _pagetable.clear_relocating(page);
495   }
496 }
497 
498 void ZHeap::relocate_start() {
499   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
500 
501   // Update statistics
502   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
503 
504   // Flip address view
505   ZAddressMasks::flip_to_remapped();
506   flip_views();
507 
508   // Remap TLABs
509   _object_allocator.remap_tlabs();
510 
511   // Enter relocate phase
512   ZGlobalPhase = ZPhaseRelocate;
513 
514   // Update statistics
515   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
516 
517   // Remap/Relocate roots
518   _relocate.start();
519 }
520 
521 uintptr_t ZHeap::relocate_object(uintptr_t addr) {
522   assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
523   ZPage* const page = _pagetable.get(addr);
524   const bool retained = retain_page(page);
525   const uintptr_t new_addr = page->relocate_object(addr);
526   if (retained) {
527     release_page(page, true /* reclaimed */);
528   }
529 
530   return new_addr;
531 }
532 
533 uintptr_t ZHeap::forward_object(uintptr_t addr) {
534   assert(ZGlobalPhase == ZPhaseMark ||
535          ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed");
536   ZPage* const page = _pagetable.get(addr);
537   return page->forward_object(addr);
538 }
539 
540 void ZHeap::relocate() {
541   // Relocate relocation set
542   const bool success = _relocate.relocate(&_relocation_set);
543 
544   // Update statistics
545   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
546   ZStatRelocation::set_at_relocate_end(success);
547   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
548                                  used(), used_high(), used_low());
549 }
550 
551 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
552   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
553 
554   ZHeapIterator iter(visit_referents);
555   iter.objects_do(cl);
556 }
557 
558 void ZHeap::serviceability_initialize() {
559   _serviceability.initialize();
560 }
561 
562 GCMemoryManager* ZHeap::serviceability_memory_manager() {
563   return _serviceability.memory_manager();
564 }
565 
566 MemoryPool* ZHeap::serviceability_memory_pool() {
567   return _serviceability.memory_pool();
568 }
569 
570 ZServiceabilityCounters* ZHeap::serviceability_counters() {
571   return _serviceability.counters();
572 }
573 
574 void ZHeap::print_on(outputStream* st) const {
575   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
576                used() / M,
577                capacity() / M,
578                max_capacity() / M);
579   MetaspaceUtils::print_on(st);
580 }
581 
582 void ZHeap::print_extended_on(outputStream* st) const {
583   print_on(st);
584   st->cr();
585 
586   ZPageTableIterator iter(&_pagetable);
587   for (ZPage* page; iter.next(&page);) {
588     page->print_on(st);
589   }
590 
591   st->cr();
592 }
593 
594 class ZVerifyRootsTask : public ZTask {
595 private:
596   ZRootsIterator     _strong_roots;
597   ZWeakRootsIterator _weak_roots;
598 
599 public:
600   ZVerifyRootsTask() :
601       ZTask("ZVerifyRootsTask"),
602       _strong_roots(false),
603       _weak_roots() {}
604 
605   virtual void work() {
606     ZVerifyRootOopClosure cl;
607     _strong_roots.oops_do(&cl);
608     _weak_roots.oops_do(&cl);
609   }
610 };
611 
612 void ZHeap::verify() {
613   // Heap verification can only be done between mark end and
614   // relocate start. This is the only window where all oop are
615   // good and the whole heap is in a consistent state.
616   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
617 
618   {
619     ZVerifyRootsTask task;
620     _workers.run_parallel(&task);
621   }
622 
623   {
624     ZVerifyObjectClosure cl;
625     object_iterate(&cl, false /* visit_referents */);
626   }
627 }