1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
158 // of the old generation.
159 HeapWord* failed_mem_allocate(size_t size);
160
161 // Support for System.gc()
162 void collect(GCCause::Cause cause);
163
164 // These also should be called by the vm thread at a safepoint (e.g., from a
165 // VM operation).
166 //
167 // The first collects the young generation only, unless the scavenge fails; it
168 // will then attempt a full gc. The second collects the entire heap; if
169 // maximum_compaction is true, it will compact everything and clear all soft
170 // references.
171 inline void invoke_scavenge();
172
173 // Perform a full collection
174 virtual void do_full_collection(bool clear_all_soft_refs);
175
176 bool supports_inline_contig_alloc() const { return !UseNUMA; }
177
178 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
179 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
180
181 void ensure_parsability(bool retire_tlabs);
182 void accumulate_statistics_all_tlabs();
183 void resize_all_tlabs();
184
185 bool supports_tlab_allocation() const { return true; }
186
187 size_t tlab_capacity(Thread* thr) const;
188 size_t tlab_used(Thread* thr) const;
189 size_t unsafe_max_tlab_alloc(Thread* thr) const;
190
191 // Can a compiler initialize a new object without store barriers?
192 // This permission only extends from the creation of a new object
193 // via a TLAB up to the first subsequent safepoint.
194 virtual bool can_elide_tlab_store_barriers() const {
195 return true;
196 }
197
198 virtual bool card_mark_must_follow_store() const {
|
1 /*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
158 // of the old generation.
159 HeapWord* failed_mem_allocate(size_t size);
160
161 // Support for System.gc()
162 void collect(GCCause::Cause cause);
163
164 // These also should be called by the vm thread at a safepoint (e.g., from a
165 // VM operation).
166 //
167 // The first collects the young generation only, unless the scavenge fails; it
168 // will then attempt a full gc. The second collects the entire heap; if
169 // maximum_compaction is true, it will compact everything and clear all soft
170 // references.
171 inline void invoke_scavenge();
172
173 // Perform a full collection
174 virtual void do_full_collection(bool clear_all_soft_refs);
175
176 bool supports_inline_contig_alloc() const { return !UseNUMA; }
177
178 HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; }
179 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
180
181 void ensure_parsability(bool retire_tlabs);
182 void accumulate_statistics_all_tlabs();
183 void resize_all_tlabs();
184
185 bool supports_tlab_allocation() const { return true; }
186
187 size_t tlab_capacity(Thread* thr) const;
188 size_t tlab_used(Thread* thr) const;
189 size_t unsafe_max_tlab_alloc(Thread* thr) const;
190
191 // Can a compiler initialize a new object without store barriers?
192 // This permission only extends from the creation of a new object
193 // via a TLAB up to the first subsequent safepoint.
194 virtual bool can_elide_tlab_store_barriers() const {
195 return true;
196 }
197
198 virtual bool card_mark_must_follow_store() const {
|