86 }
87
88 // Walk the list of VirtualSpaceNodes and delete
89 // nodes with a 0 container_count. Remove Metachunks in
90 // the node from their respective freelists.
91 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
92 assert_lock_strong(MetaspaceExpand_lock);
93 // Don't use a VirtualSpaceListIterator because this
94 // list is being changed and a straightforward use of an iterator is not safe.
95 VirtualSpaceNode* prev_vsl = virtual_space_list();
96 VirtualSpaceNode* next_vsl = prev_vsl;
97 int num_purged_nodes = 0;
98 while (next_vsl != NULL) {
99 VirtualSpaceNode* vsl = next_vsl;
100 DEBUG_ONLY(vsl->verify(false);)
101 next_vsl = vsl->next();
102 // Don't free the current virtual space since it will likely
103 // be needed soon.
104 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
105 log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
106 ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
107 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
108 // Unlink it from the list
109 if (prev_vsl == vsl) {
110 // This is the case of the current node being the first node.
111 assert(vsl == virtual_space_list(), "Expected to be the first node");
112 set_virtual_space_list(vsl->next());
113 } else {
114 prev_vsl->set_next(vsl->next());
115 }
116
117 vsl->purge(chunk_manager);
118 dec_reserved_words(vsl->reserved_words());
119 dec_committed_words(vsl->committed_words());
120 dec_virtual_space_count();
121 delete vsl;
122 num_purged_nodes ++;
123 } else {
124 prev_vsl = vsl;
125 }
126 }
|
86 }
87
88 // Walk the list of VirtualSpaceNodes and delete
89 // nodes with a 0 container_count. Remove Metachunks in
90 // the node from their respective freelists.
91 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
92 assert_lock_strong(MetaspaceExpand_lock);
93 // Don't use a VirtualSpaceListIterator because this
94 // list is being changed and a straightforward use of an iterator is not safe.
95 VirtualSpaceNode* prev_vsl = virtual_space_list();
96 VirtualSpaceNode* next_vsl = prev_vsl;
97 int num_purged_nodes = 0;
98 while (next_vsl != NULL) {
99 VirtualSpaceNode* vsl = next_vsl;
100 DEBUG_ONLY(vsl->verify(false);)
101 next_vsl = vsl->next();
102 // Don't free the current virtual space since it will likely
103 // be needed soon.
104 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
105 log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
106 ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->committed_words(), vsl->used_words());
107 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
108 // Unlink it from the list
109 if (prev_vsl == vsl) {
110 // This is the case of the current node being the first node.
111 assert(vsl == virtual_space_list(), "Expected to be the first node");
112 set_virtual_space_list(vsl->next());
113 } else {
114 prev_vsl->set_next(vsl->next());
115 }
116
117 vsl->purge(chunk_manager);
118 dec_reserved_words(vsl->reserved_words());
119 dec_committed_words(vsl->committed_words());
120 dec_virtual_space_count();
121 delete vsl;
122 num_purged_nodes ++;
123 } else {
124 prev_vsl = vsl;
125 }
126 }
|