1 /*
  2  * virtualSpaceList.cpp
  3  *
  4  *  Created on: May 6, 2018
  5  *      Author: thomas
  6  */
  7 
  8 
  9 #include "precompiled.hpp"
 10 #include "logging/log.hpp"
 11 #include "logging/logStream.hpp"
 12 #include "memory/metaspace.hpp"
 13 #include "memory/metaspace/chunkManager.hpp"
 14 #include "memory/metaspace/metachunk.hpp"
 15 #include "memory/metaspace/metaspaceCommon.hpp"
 16 #include "memory/metaspace/virtualSpaceList.hpp"
 17 #include "memory/metaspace/virtualSpaceNode.hpp"
 18 #include "runtime/orderAccess.hpp"
 19 #include "runtime/mutexLocker.hpp"
 20 #include "runtime/safepoint.hpp"
 21 
 22 namespace metaspace {
 23 
 24 
 25 VirtualSpaceList::~VirtualSpaceList() {
 26   VirtualSpaceListIterator iter(virtual_space_list());
 27   while (iter.repeat()) {
 28     VirtualSpaceNode* vsl = iter.get_next();
 29     delete vsl;
 30   }
 31 }
 32 
 33 void VirtualSpaceList::inc_reserved_words(size_t v) {
 34   assert_lock_strong(MetaspaceExpand_lock);
 35   _reserved_words = _reserved_words + v;
 36 }
 37 void VirtualSpaceList::dec_reserved_words(size_t v) {
 38   assert_lock_strong(MetaspaceExpand_lock);
 39   _reserved_words = _reserved_words - v;
 40 }
 41 
 42 #define assert_committed_below_limit()                        \
 43   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
 44          "Too much committed memory. Committed: " SIZE_FORMAT \
 45          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
 46           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
 47 
 48 void VirtualSpaceList::inc_committed_words(size_t v) {
 49   assert_lock_strong(MetaspaceExpand_lock);
 50   _committed_words = _committed_words + v;
 51 
 52   assert_committed_below_limit();
 53 }
 54 void VirtualSpaceList::dec_committed_words(size_t v) {
 55   assert_lock_strong(MetaspaceExpand_lock);
 56   _committed_words = _committed_words - v;
 57 
 58   assert_committed_below_limit();
 59 }
 60 
 61 void VirtualSpaceList::inc_virtual_space_count() {
 62   assert_lock_strong(MetaspaceExpand_lock);
 63   _virtual_space_count++;
 64 }
 65 
 66 void VirtualSpaceList::dec_virtual_space_count() {
 67   assert_lock_strong(MetaspaceExpand_lock);
 68   _virtual_space_count--;
 69 }
 70 
 71 // Walk the list of VirtualSpaceNodes and delete
 72 // nodes with a 0 container_count.  Remove Metachunks in
 73 // the node from their respective freelists.
 74 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
 75   assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
 76   assert_lock_strong(MetaspaceExpand_lock);
 77   // Don't use a VirtualSpaceListIterator because this
 78   // list is being changed and a straightforward use of an iterator is not safe.
 79   VirtualSpaceNode* purged_vsl = NULL;
 80   VirtualSpaceNode* prev_vsl = virtual_space_list();
 81   VirtualSpaceNode* next_vsl = prev_vsl;
 82   while (next_vsl != NULL) {
 83     VirtualSpaceNode* vsl = next_vsl;
 84     DEBUG_ONLY(vsl->verify_container_count();)
 85     next_vsl = vsl->next();
 86     // Don't free the current virtual space since it will likely
 87     // be needed soon.
 88     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
 89       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
 90                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
 91       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
 92       // Unlink it from the list
 93       if (prev_vsl == vsl) {
 94         // This is the case of the current node being the first node.
 95         assert(vsl == virtual_space_list(), "Expected to be the first node");
 96         set_virtual_space_list(vsl->next());
 97       } else {
 98         prev_vsl->set_next(vsl->next());
 99       }
100 
101       vsl->purge(chunk_manager);
102       dec_reserved_words(vsl->reserved_words());
103       dec_committed_words(vsl->committed_words());
104       dec_virtual_space_count();
105       purged_vsl = vsl;
106       delete vsl;
107     } else {
108       prev_vsl = vsl;
109     }
110   }
111 #ifdef ASSERT
112   if (purged_vsl != NULL) {
113     // List should be stable enough to use an iterator here.
114     VirtualSpaceListIterator iter(virtual_space_list());
115     while (iter.repeat()) {
116       VirtualSpaceNode* vsl = iter.get_next();
117       assert(vsl != purged_vsl, "Purge of vsl failed");
118     }
119   }
120 #endif
121 }
122 
123 
124 // This function looks at the mmap regions in the metaspace without locking.
125 // The chunks are added with store ordering and not deleted except for at
126 // unloading time during a safepoint.
127 bool VirtualSpaceList::contains(const void* ptr) {
128   MutexLockerEx cl(MetaspaceExpand_lock,
129                    Mutex::_no_safepoint_check_flag);
130   // List should be stable enough to use an iterator here because removing virtual
131   // space nodes is only allowed at a safepoint.
132   VirtualSpaceListIterator iter(virtual_space_list());
133   while (iter.repeat()) {
134     VirtualSpaceNode* vsn = iter.get_next();
135     if (vsn->contains(ptr)) {
136       return true;
137     }
138   }
139   return false;
140 }
141 
142 void VirtualSpaceList::retire_current_virtual_space() {
143   assert_lock_strong(MetaspaceExpand_lock);
144 
145   VirtualSpaceNode* vsn = current_virtual_space();
146 
147   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
148                                   Metaspace::chunk_manager_metadata();
149 
150   vsn->retire(cm);
151 }
152 
153 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
154                                    _virtual_space_list(NULL),
155                                    _current_virtual_space(NULL),
156                                    _is_class(false),
157                                    _reserved_words(0),
158                                    _committed_words(0),
159                                    _virtual_space_count(0) {
160   MutexLockerEx cl(MetaspaceExpand_lock,
161                    Mutex::_no_safepoint_check_flag);
162   create_new_virtual_space(word_size);
163 }
164 
165 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
166                                    _virtual_space_list(NULL),
167                                    _current_virtual_space(NULL),
168                                    _is_class(true),
169                                    _reserved_words(0),
170                                    _committed_words(0),
171                                    _virtual_space_count(0) {
172   MutexLockerEx cl(MetaspaceExpand_lock,
173                    Mutex::_no_safepoint_check_flag);
174   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
175   bool succeeded = class_entry->initialize();
176   if (succeeded) {
177     link_vs(class_entry);
178   }
179 }
180 
181 size_t VirtualSpaceList::free_bytes() {
182   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
183 }
184 
185 // Allocate another meta virtual space and add it to the list.
186 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
187   assert_lock_strong(MetaspaceExpand_lock);
188 
189   if (is_class()) {
190     assert(false, "We currently don't support more than one VirtualSpace for"
191                   " the compressed class space. The initialization of the"
192                   " CCS uses another code path and should not hit this path.");
193     return false;
194   }
195 
196   if (vs_word_size == 0) {
197     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
198     return false;
199   }
200 
201   // Reserve the space
202   size_t vs_byte_size = vs_word_size * BytesPerWord;
203   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
204 
205   // Allocate the meta virtual space and initialize it.
206   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
207   if (!new_entry->initialize()) {
208     delete new_entry;
209     return false;
210   } else {
211     assert(new_entry->reserved_words() == vs_word_size,
212         "Reserved memory size differs from requested memory size");
213     // ensure lock-free iteration sees fully initialized node
214     OrderAccess::storestore();
215     link_vs(new_entry);
216     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
217     return true;
218   }
219 }
220 
221 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
222   if (virtual_space_list() == NULL) {
223       set_virtual_space_list(new_entry);
224   } else {
225     current_virtual_space()->set_next(new_entry);
226   }
227   set_current_virtual_space(new_entry);
228   inc_reserved_words(new_entry->reserved_words());
229   inc_committed_words(new_entry->committed_words());
230   inc_virtual_space_count();
231 #ifdef ASSERT
232   new_entry->mangle();
233 #endif
234   LogTarget(Trace, gc, metaspace) lt;
235   if (lt.is_enabled()) {
236     LogStream ls(lt);
237     VirtualSpaceNode* vsl = current_virtual_space();
238     ResourceMark rm;
239     vsl->print_on(&ls);
240   }
241 }
242 
243 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
244                                       size_t min_words,
245                                       size_t preferred_words) {
246   size_t before = node->committed_words();
247 
248   bool result = node->expand_by(min_words, preferred_words);
249 
250   size_t after = node->committed_words();
251 
252   // after and before can be the same if the memory was pre-committed.
253   assert(after >= before, "Inconsistency");
254   inc_committed_words(after - before);
255 
256   return result;
257 }
258 
259 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
260   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
261   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
262   assert(min_words <= preferred_words, "Invalid arguments");
263 
264   const char* const class_or_not = (is_class() ? "class" : "non-class");
265 
266   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
267     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
268               class_or_not);
269     return  false;
270   }
271 
272   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
273   if (allowed_expansion_words < min_words) {
274     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
275               class_or_not);
276     return false;
277   }
278 
279   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
280 
281   // Commit more memory from the the current virtual space.
282   bool vs_expanded = expand_node_by(current_virtual_space(),
283                                     min_words,
284                                     max_expansion_words);
285   if (vs_expanded) {
286      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
287                class_or_not);
288      return true;
289   }
290   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
291             class_or_not);
292   retire_current_virtual_space();
293 
294   // Get another virtual space.
295   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
296   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
297 
298   if (create_new_virtual_space(grow_vs_words)) {
299     if (current_virtual_space()->is_pre_committed()) {
300       // The memory was pre-committed, so we are done here.
301       assert(min_words <= current_virtual_space()->committed_words(),
302           "The new VirtualSpace was pre-committed, so it"
303           "should be large enough to fit the alloc request.");
304       return true;
305     }
306 
307     return expand_node_by(current_virtual_space(),
308                           min_words,
309                           max_expansion_words);
310   }
311 
312   return false;
313 }
314 
315 // Given a chunk, calculate the largest possible padding space which
316 // could be required when allocating it.
317 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
318   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
319   if (chunk_type != HumongousIndex) {
320     // Normal, non-humongous chunks are allocated at chunk size
321     // boundaries, so the largest padding space required would be that
322     // minus the smallest chunk size.
323     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
324     return chunk_word_size - smallest_chunk_size;
325   } else {
326     // Humongous chunks are allocated at smallest-chunksize
327     // boundaries, so there is no padding required.
328     return 0;
329   }
330 }
331 
332 
333 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
334 
335   // Allocate a chunk out of the current virtual space.
336   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
337 
338   if (next != NULL) {
339     return next;
340   }
341 
342   // The expand amount is currently only determined by the requested sizes
343   // and not how much committed memory is left in the current virtual space.
344 
345   // We must have enough space for the requested size and any
346   // additional reqired padding chunks.
347   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
348 
349   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
350   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
351   if (min_word_size >= preferred_word_size) {
352     // Can happen when humongous chunks are allocated.
353     preferred_word_size = min_word_size;
354   }
355 
356   bool expanded = expand_by(min_word_size, preferred_word_size);
357   if (expanded) {
358     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
359     assert(next != NULL, "The allocation was expected to succeed after the expansion");
360   }
361 
362    return next;
363 }
364 
365 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
366   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
367       _virtual_space_count, p2i(_current_virtual_space));
368   VirtualSpaceListIterator iter(virtual_space_list());
369   while (iter.repeat()) {
370     st->cr();
371     VirtualSpaceNode* node = iter.get_next();
372     node->print_on(st, scale);
373   }
374 }
375 
376 void VirtualSpaceList::print_map(outputStream* st) const {
377   VirtualSpaceNode* list = virtual_space_list();
378   VirtualSpaceListIterator iter(list);
379   unsigned i = 0;
380   while (iter.repeat()) {
381     st->print_cr("Node %u:", i);
382     VirtualSpaceNode* node = iter.get_next();
383     node->print_map(st, this->is_class());
384     i ++;
385   }
386 }
387 
388 } // namespace metaspace