8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoader.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "gc/shared/gcLocker.inline.hpp"
29 #include "gc/shared/genCollectedHeap.hpp"
30 #include "gc/shared/vmGCOperations.hpp"
31 #include "memory/oopFactory.hpp"
32 #include "oops/instanceKlass.hpp"
33 #include "oops/instanceRefKlass.hpp"
34 #include "runtime/handles.inline.hpp"
35 #include "runtime/init.hpp"
36 #include "runtime/interfaceSupport.hpp"
37 #include "utilities/dtrace.hpp"
38 #include "utilities/macros.hpp"
39 #include "utilities/preserveException.hpp"
40 #if INCLUDE_ALL_GCS
41 #include "gc/g1/g1CollectedHeap.inline.hpp"
42 #endif // INCLUDE_ALL_GCS
43
44 VM_GC_Operation::~VM_GC_Operation() {
45 CollectedHeap* ch = Universe::heap();
46 ch->collector_policy()->set_all_soft_refs_clear(false);
47 }
170 SvcGCMarker sgcm(SvcGCMarker::MINOR);
171
172 GenCollectedHeap* gch = GenCollectedHeap::heap();
173 GCCauseSetter gccs(gch, _gc_cause);
174 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
175 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
176
177 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
178 set_gc_locked();
179 }
180 }
181
182 void VM_GenCollectFull::doit() {
183 SvcGCMarker sgcm(SvcGCMarker::FULL);
184
185 GenCollectedHeap* gch = GenCollectedHeap::heap();
186 GCCauseSetter gccs(gch, _gc_cause);
187 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
188 }
189
190 // Returns true iff concurrent GCs unloads metadata.
191 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
192 #if INCLUDE_ALL_GCS
193 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
194 MetaspaceGC::set_should_concurrent_collect(true);
195 return true;
196 }
197
198 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
199 G1CollectedHeap* g1h = G1CollectedHeap::heap();
200 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
201
202 GCCauseSetter x(g1h, _gc_cause);
203
204 // At this point we are supposed to start a concurrent cycle. We
205 // will do so if one is not already in progress.
206 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
207
208 if (should_start) {
209 double pause_target = g1h->g1_policy()->max_pause_time_ms();
272 }
273
274 // If expansion failed, do a last-ditch collection and try allocating
275 // again. A last-ditch collection will clear softrefs. This
276 // behavior is similar to the last-ditch collection done for perm
277 // gen when it was full and a collection for failed allocation
278 // did not free perm gen space.
279 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
280 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
281 if (_result != NULL) {
282 return;
283 }
284
285 if (Verbose && PrintGCDetails) {
286 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
287 SIZE_FORMAT, _size);
288 }
289
290 if (GC_locker::is_active_and_needs_gc()) {
291 set_gc_locked();
292 }
293 }
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoader.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "gc/shared/allocTracer.hpp"
29 #include "gc/shared/gcLocker.inline.hpp"
30 #include "gc/shared/genCollectedHeap.hpp"
31 #include "gc/shared/vmGCOperations.hpp"
32 #include "memory/oopFactory.hpp"
33 #include "oops/instanceKlass.hpp"
34 #include "oops/instanceRefKlass.hpp"
35 #include "runtime/handles.inline.hpp"
36 #include "runtime/init.hpp"
37 #include "runtime/interfaceSupport.hpp"
38 #include "utilities/dtrace.hpp"
39 #include "utilities/macros.hpp"
40 #include "utilities/preserveException.hpp"
41 #if INCLUDE_ALL_GCS
42 #include "gc/g1/g1CollectedHeap.inline.hpp"
43 #endif // INCLUDE_ALL_GCS
44
45 VM_GC_Operation::~VM_GC_Operation() {
46 CollectedHeap* ch = Universe::heap();
47 ch->collector_policy()->set_all_soft_refs_clear(false);
48 }
171 SvcGCMarker sgcm(SvcGCMarker::MINOR);
172
173 GenCollectedHeap* gch = GenCollectedHeap::heap();
174 GCCauseSetter gccs(gch, _gc_cause);
175 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
176 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
177
178 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
179 set_gc_locked();
180 }
181 }
182
183 void VM_GenCollectFull::doit() {
184 SvcGCMarker sgcm(SvcGCMarker::FULL);
185
186 GenCollectedHeap* gch = GenCollectedHeap::heap();
187 GCCauseSetter gccs(gch, _gc_cause);
188 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
189 }
190
191 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
192 size_t size,
193 Metaspace::MetadataType mdtype,
194 uint gc_count_before,
195 uint full_gc_count_before,
196 GCCause::Cause gc_cause)
197 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
198 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
199 assert(_size != 0, "An allocation should always be requested with this operation.");
200 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize);
201 }
202
203 // Returns true iff concurrent GCs unloads metadata.
204 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
205 #if INCLUDE_ALL_GCS
206 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
207 MetaspaceGC::set_should_concurrent_collect(true);
208 return true;
209 }
210
211 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
212 G1CollectedHeap* g1h = G1CollectedHeap::heap();
213 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
214
215 GCCauseSetter x(g1h, _gc_cause);
216
217 // At this point we are supposed to start a concurrent cycle. We
218 // will do so if one is not already in progress.
219 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
220
221 if (should_start) {
222 double pause_target = g1h->g1_policy()->max_pause_time_ms();
285 }
286
287 // If expansion failed, do a last-ditch collection and try allocating
288 // again. A last-ditch collection will clear softrefs. This
289 // behavior is similar to the last-ditch collection done for perm
290 // gen when it was full and a collection for failed allocation
291 // did not free perm gen space.
292 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
293 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
294 if (_result != NULL) {
295 return;
296 }
297
298 if (Verbose && PrintGCDetails) {
299 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
300 SIZE_FORMAT, _size);
301 }
302
303 if (GC_locker::is_active_and_needs_gc()) {
304 set_gc_locked();
305 }
306 }
307
308 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
309 : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
310 // Only report if operation was really caused by an allocation.
311 if (_word_size != 0) {
312 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize);
313 }
314 }
|