1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "gc/shared/oopStorage.hpp"
 26 #include "gc/z/zAddress.hpp"
 27 #include "gc/z/zGlobals.hpp"
 28 #include "gc/z/zHeap.inline.hpp"
 29 #include "gc/z/zHeapIterator.hpp"
 30 #include "gc/z/zList.inline.hpp"
 31 #include "gc/z/zLock.inline.hpp"
 32 #include "gc/z/zMark.inline.hpp"
 33 #include "gc/z/zOopClosures.inline.hpp"
 34 #include "gc/z/zPage.inline.hpp"
 35 #include "gc/z/zPageTable.inline.hpp"
 36 #include "gc/z/zRelocationSet.inline.hpp"
 37 #include "gc/z/zResurrection.hpp"
 38 #include "gc/z/zRootsIterator.hpp"
 39 #include "gc/z/zStat.hpp"
 40 #include "gc/z/zTask.hpp"
 41 #include "gc/z/zThread.hpp"
 42 #include "gc/z/zTracer.inline.hpp"
 43 #include "gc/z/zVirtualMemory.inline.hpp"
 44 #include "gc/z/zWorkers.inline.hpp"
 45 #include "logging/log.hpp"
 46 #include "memory/resourceArea.hpp"
 47 #include "oops/oop.inline.hpp"
 48 #include "runtime/safepoint.hpp"
 49 #include "runtime/thread.hpp"
 50 #include "utilities/align.hpp"
 51 #include "utilities/debug.hpp"
 52 
 53 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
 54 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
 55 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
 56 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
 57 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
 58 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
 59 
 60 ZHeap* ZHeap::_heap = NULL;
 61 
 62 ZHeap::ZHeap() :
 63     _workers(),
 64     _object_allocator(_workers.nworkers()),
 65     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
 66     _pagetable(),
 67     _mark(&_workers, &_pagetable),
 68     _reference_processor(&_workers),
 69     _weak_roots_processor(&_workers),
 70     _relocate(&_workers),
 71     _relocation_set(),
 72     _serviceability(heap_min_size(), heap_max_size()) {
 73   // Install global heap instance
 74   assert(_heap == NULL, "Already initialized");
 75   _heap = this;
 76 
 77   // Update statistics
 78   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
 79 }
 80 
 81 size_t ZHeap::heap_min_size() const {
 82   const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
 83   return MIN2(aligned_min_size, heap_max_size());
 84 }
 85 
 86 size_t ZHeap::heap_max_size() const {
 87   const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
 88   return MIN2(aligned_max_size, ZAddressOffsetMax);
 89 }
 90 
 91 size_t ZHeap::heap_max_reserve_size() const {
 92   // Reserve one small page per worker plus one shared medium page. This is still just
 93   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
 94   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
 95   return MIN2(max_reserve_size, heap_max_size());
 96 }
 97 
 98 bool ZHeap::is_initialized() const {
 99   return _page_allocator.is_initialized() && _mark.is_initialized();
100 }
101 
102 size_t ZHeap::min_capacity() const {
103   return heap_min_size();
104 }
105 
106 size_t ZHeap::max_capacity() const {
107   return _page_allocator.max_capacity();
108 }
109 
110 size_t ZHeap::current_max_capacity() const {
111   return _page_allocator.current_max_capacity();
112 }
113 
114 size_t ZHeap::capacity() const {
115   return _page_allocator.capacity();
116 }
117 
118 size_t ZHeap::max_reserve() const {
119   return _page_allocator.max_reserve();
120 }
121 
122 size_t ZHeap::used_high() const {
123   return _page_allocator.used_high();
124 }
125 
126 size_t ZHeap::used_low() const {
127   return _page_allocator.used_low();
128 }
129 
130 size_t ZHeap::used() const {
131   return _page_allocator.used();
132 }
133 
134 size_t ZHeap::allocated() const {
135   return _page_allocator.allocated();
136 }
137 
138 size_t ZHeap::reclaimed() const {
139   return _page_allocator.reclaimed();
140 }
141 
142 size_t ZHeap::tlab_capacity() const {
143   return capacity();
144 }
145 
146 size_t ZHeap::tlab_used() const {
147   return _object_allocator.used();
148 }
149 
150 size_t ZHeap::max_tlab_size() const {
151   return ZObjectSizeLimitSmall;
152 }
153 
154 size_t ZHeap::unsafe_max_tlab_alloc() const {
155   size_t size = _object_allocator.remaining();
156 
157   if (size < MinTLABSize) {
158     // The remaining space in the allocator is not enough to
159     // fit the smallest possible TLAB. This means that the next
160     // TLAB allocation will force the allocator to get a new
161     // backing page anyway, which in turn means that we can then
162     // fit the largest possible TLAB.
163     size = max_tlab_size();
164   }
165 
166   return MIN2(size, max_tlab_size());
167 }
168 
169 bool ZHeap::is_in(uintptr_t addr) const {
170   if (addr < ZAddressReservedStart() || addr >= ZAddressReservedEnd()) {
171     return false;
172   }
173 
174   const ZPage* const page = _pagetable.get(addr);
175   if (page != NULL) {
176     return page->is_in(addr);
177   }
178 
179   return false;
180 }
181 
182 uintptr_t ZHeap::block_start(uintptr_t addr) const {
183   const ZPage* const page = _pagetable.get(addr);
184   return page->block_start(addr);
185 }
186 
187 size_t ZHeap::block_size(uintptr_t addr) const {
188   const ZPage* const page = _pagetable.get(addr);
189   return page->block_size(addr);
190 }
191 
192 bool ZHeap::block_is_obj(uintptr_t addr) const {
193   const ZPage* const page = _pagetable.get(addr);
194   return page->block_is_obj(addr);
195 }
196 
197 uint ZHeap::nconcurrent_worker_threads() const {
198   return _workers.nconcurrent();
199 }
200 
201 uint ZHeap::nconcurrent_no_boost_worker_threads() const {
202   return _workers.nconcurrent_no_boost();
203 }
204 
205 void ZHeap::set_boost_worker_threads(bool boost) {
206   _workers.set_boost(boost);
207 }
208 
209 void ZHeap::worker_threads_do(ThreadClosure* tc) const {
210   _workers.threads_do(tc);
211 }
212 
213 void ZHeap::print_worker_threads_on(outputStream* st) const {
214   _workers.print_threads_on(st);
215 }
216 
217 void ZHeap::out_of_memory() {
218   ResourceMark rm;
219 
220   ZStatInc(ZCounterOutOfMemory);
221   log_info(gc)("Out Of Memory (%s)", Thread::current()->name());
222 }
223 
224 ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
225   ZPage* const page = _page_allocator.alloc_page(type, size, flags);
226   if (page != NULL) {
227     // Update pagetable
228     _pagetable.insert(page);
229   }
230 
231   return page;
232 }
233 
234 void ZHeap::undo_alloc_page(ZPage* page) {
235   assert(page->is_allocating(), "Invalid page state");
236 
237   ZStatInc(ZCounterUndoPageAllocation);
238   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
239                 ZThread::id(), ZThread::name(), p2i(page), page->size());
240 
241   release_page(page, false /* reclaimed */);
242 }
243 
244 bool ZHeap::retain_page(ZPage* page) {
245   return page->inc_refcount();
246 }
247 
248 void ZHeap::release_page(ZPage* page, bool reclaimed) {
249   if (page->dec_refcount()) {
250     _page_allocator.free_page(page, reclaimed);
251   }
252 }
253 
254 void ZHeap::flip_views() {
255   // For debugging only
256   if (ZUnmapBadViews) {
257     // Flip pages
258     ZPageTableIterator iter(&_pagetable);
259     for (ZPage* page; iter.next(&page);) {
260       if (!page->is_detached()) {
261         _page_allocator.flip_page(page);
262       }
263     }
264 
265     // Flip pre-mapped memory
266     _page_allocator.flip_pre_mapped();
267   }
268 }
269 
270 void ZHeap::mark_start() {
271   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
272 
273   // Update statistics
274   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
275 
276   // Retire TLABs
277   _object_allocator.retire_tlabs();
278 
279   // Flip address view
280   ZAddressMasks::flip_to_marked();
281   flip_views();
282 
283   // Reset allocated/reclaimed/used statistics
284   _page_allocator.reset_statistics();
285 
286   // Reset encountered/dropped/enqueued statistics
287   _reference_processor.reset_statistics();
288 
289   // Enter mark phase
290   ZGlobalPhase = ZPhaseMark;
291 
292   // Reset marking information and mark roots
293   _mark.start();
294 
295   // Update statistics
296   ZStatHeap::set_at_mark_start(capacity(), used());
297 }
298 
299 void ZHeap::mark() {
300   _mark.mark();
301 }
302 
303 void ZHeap::mark_flush_and_free(Thread* thread) {
304   _mark.flush_and_free(thread);
305 }
306 
307 class ZFixupPartialLoadsTask : public ZTask {
308 private:
309   ZThreadRootsIterator _thread_roots;
310 
311 public:
312   ZFixupPartialLoadsTask() :
313       ZTask("ZFixupPartialLoadsTask"),
314       _thread_roots() {}
315 
316   virtual void work() {
317     ZMarkRootOopClosure cl;
318     _thread_roots.oops_do(&cl);
319   }
320 };
321 
322 void ZHeap::fixup_partial_loads() {
323   ZFixupPartialLoadsTask task;
324   _workers.run_parallel(&task);
325 }
326 
327 bool ZHeap::mark_end() {
328   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
329 
330   // C2 can generate code where a safepoint poll is inserted
331   // between a load and the associated load barrier. To handle
332   // this case we need to rescan the thread stack here to make
333   // sure such oops are marked.
334   fixup_partial_loads();
335 
336   // Try end marking
337   if (!_mark.end()) {
338     // Marking not completed, continue concurrent mark
339     return false;
340   }
341 
342   // Enter mark completed phase
343   ZGlobalPhase = ZPhaseMarkCompleted;
344 
345   // Resize metaspace
346   MetaspaceGC::compute_new_size();
347 
348   // Update statistics
349   ZStatSample(ZSamplerHeapUsedAfterMark, used());
350   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
351 
352   // Block resurrection of weak/phantom references
353   ZResurrection::block();
354 
355   // Process weak roots
356   _weak_roots_processor.process_weak_roots();
357 
358   // Verification
359   if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
360     Universe::verify();
361   }
362 
363   return true;
364 }
365 
366 void ZHeap::set_soft_reference_policy(bool clear) {
367   _reference_processor.set_soft_reference_policy(clear);
368 }
369 
370 void ZHeap::process_non_strong_references() {
371   // Process Soft/Weak/Final/PhantomReferences
372   _reference_processor.process_references();
373 
374   // Process concurrent weak roots
375   _weak_roots_processor.process_concurrent_weak_roots();
376 
377   // Unblock resurrection of weak/phantom references
378   ZResurrection::unblock();
379 
380   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
381   // must be done after unblocking resurrection. Otherwise the
382   // Finalizer thread could call Reference.get() on the Finalizers
383   // that were just enqueued, which would incorrectly return null
384   // during the resurrection block window, since such referents
385   // are only Finalizable marked.
386   _reference_processor.enqueue_references();
387 }
388 
389 void ZHeap::destroy_detached_pages() {
390   ZList<ZPage> list;
391 
392   _page_allocator.flush_detached_pages(&list);
393 
394   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
395     // Remove pagetable entry
396     _pagetable.remove(page);
397 
398     // Delete the page
399     _page_allocator.destroy_page(page);
400   }
401 }
402 
403 void ZHeap::select_relocation_set() {
404   // Register relocatable pages with selector
405   ZRelocationSetSelector selector;
406   ZPageTableIterator iter(&_pagetable);
407   for (ZPage* page; iter.next(&page);) {
408     if (!page->is_relocatable()) {
409       // Not relocatable, don't register
410       continue;
411     }
412 
413     if (page->is_marked()) {
414       // Register live page
415       selector.register_live_page(page);
416     } else {
417       // Register garbage page
418       selector.register_garbage_page(page);
419 
420       // Reclaim page immediately
421       release_page(page, true /* reclaimed */);
422     }
423   }
424 
425   // Select pages to relocate
426   selector.select(&_relocation_set);
427 
428   // Update statistics
429   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
430   ZStatHeap::set_at_select_relocation_set(selector.live(),
431                                           selector.garbage(),
432                                           reclaimed());
433 }
434 
435 void ZHeap::prepare_relocation_set() {
436   ZRelocationSetIterator iter(&_relocation_set);
437   for (ZPage* page; iter.next(&page);) {
438     // Prepare for relocation
439     page->set_forwarding();
440 
441     // Update pagetable
442     _pagetable.set_relocating(page);
443   }
444 }
445 
446 void ZHeap::reset_relocation_set() {
447   ZRelocationSetIterator iter(&_relocation_set);
448   for (ZPage* page; iter.next(&page);) {
449     // Reset relocation information
450     page->reset_forwarding();
451 
452     // Update pagetable
453     _pagetable.clear_relocating(page);
454   }
455 }
456 
457 void ZHeap::relocate_start() {
458   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
459 
460   // Update statistics
461   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
462 
463   // Flip address view
464   ZAddressMasks::flip_to_remapped();
465   flip_views();
466 
467   // Remap TLABs
468   _object_allocator.remap_tlabs();
469 
470   // Enter relocate phase
471   ZGlobalPhase = ZPhaseRelocate;
472 
473   // Update statistics
474   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
475 
476   // Remap/Relocate roots
477   _relocate.start();
478 }
479 
480 uintptr_t ZHeap::relocate_object(uintptr_t addr) {
481   assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
482   ZPage* const page = _pagetable.get(addr);
483   const bool retained = retain_page(page);
484   const uintptr_t new_addr = page->relocate_object(addr);
485   if (retained) {
486     release_page(page, true /* reclaimed */);
487   }
488 
489   return new_addr;
490 }
491 
492 uintptr_t ZHeap::forward_object(uintptr_t addr) {
493   assert(ZGlobalPhase == ZPhaseMark ||
494          ZGlobalPhase == ZPhaseMarkCompleted, "Forward not allowed");
495   ZPage* const page = _pagetable.get(addr);
496   return page->forward_object(addr);
497 }
498 
499 void ZHeap::relocate() {
500   // Relocate relocation set
501   const bool success = _relocate.relocate(&_relocation_set);
502 
503   // Update statistics
504   ZStatSample(ZSamplerHeapUsedAfterRelocation, used());
505   ZStatRelocation::set_at_relocate_end(success);
506   ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
507                                  used(), used_high(), used_low());
508 }
509 
510 void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
511   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
512 
513   ZHeapIterator iter(visit_referents);
514   iter.objects_do(cl);
515 }
516 
517 void ZHeap::serviceability_initialize() {
518   _serviceability.initialize();
519 }
520 
521 GCMemoryManager* ZHeap::serviceability_memory_manager() {
522   return _serviceability.memory_manager();
523 }
524 
525 MemoryPool* ZHeap::serviceability_memory_pool() {
526   return _serviceability.memory_pool();
527 }
528 
529 ZServiceabilityCounters* ZHeap::serviceability_counters() {
530   return _serviceability.counters();
531 }
532 
533 void ZHeap::print_on(outputStream* st) const {
534   st->print_cr(" ZHeap           used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M",
535                used() / M,
536                capacity() / M,
537                max_capacity() / M);
538   MetaspaceUtils::print_on(st);
539 }
540 
541 void ZHeap::print_extended_on(outputStream* st) const {
542   print_on(st);
543   st->cr();
544 
545   ZPageTableIterator iter(&_pagetable);
546   for (ZPage* page; iter.next(&page);) {
547     page->print_on(st);
548   }
549 
550   st->cr();
551 }
552 
553 class ZVerifyRootsTask : public ZTask {
554 private:
555   ZRootsIterator     _strong_roots;
556   ZWeakRootsIterator _weak_roots;
557 
558 public:
559   ZVerifyRootsTask() :
560       ZTask("ZVerifyRootsTask"),
561       _strong_roots(),
562       _weak_roots() {}
563 
564   virtual void work() {
565     ZVerifyRootOopClosure cl;
566     _strong_roots.oops_do(&cl);
567     _weak_roots.oops_do(&cl);
568   }
569 };
570 
571 void ZHeap::verify() {
572   // Heap verification can only be done between mark end and
573   // relocate start. This is the only window where all oop are
574   // good and the whole heap is in a consistent state.
575   guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
576 
577   {
578     ZVerifyRootsTask task;
579     _workers.run_parallel(&task);
580   }
581 
582   {
583     ZVerifyObjectClosure cl;
584     object_iterate(&cl, false /* visit_referents */);
585   }
586 }