1 /*
2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1PageBasedVirtualSpace.hpp"
27 #include "gc/shared/workgroup.hpp"
28 #include "oops/markWord.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/atomic.hpp"
31 #include "runtime/os.inline.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/align.hpp"
34 #include "utilities/bitMap.inline.hpp"
35
36 G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
37 _low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0),
38 _committed(mtGC), _dirty(mtGC), _special(false), _executable(false) {
39 initialize_with_page_size(rs, used_size, page_size);
40 }
41
42 void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
43 guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
44
45 vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
46 vmassert(page_size > 0, "Page size must be non-zero.");
47
48 guarantee(is_aligned(rs.base(), page_size),
49 "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
50 guarantee(is_aligned(used_size, os::vm_page_size()),
51 "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
52 guarantee(used_size <= rs.size(),
53 "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
54 guarantee(is_aligned(rs.size(), page_size),
55 "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size);
56
57 _low_boundary = rs.base();
58 _high_boundary = _low_boundary + used_size;
59
60 _special = rs.special();
61 _executable = rs.executable();
62
191
192 bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
193 // We need to make sure to commit all pages covered by the given area.
194 guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
195
196 bool zero_filled = true;
197 size_t end_page = start_page + size_in_pages;
198
199 if (_special) {
200 // Check for dirty pages and update zero_filled if any found.
201 if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
202 zero_filled = false;
203 _dirty.clear_range(start_page, end_page);
204 }
205 } else {
206 commit_internal(start_page, end_page);
207 }
208 _committed.set_range(start_page, end_page);
209
210 return zero_filled;
211 }
212
213 void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
214 guarantee(start_page < end_page,
215 "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);
216
217 char* start_addr = page_start(start_page);
218 os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
219 }
220
221 void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
222 guarantee(is_area_committed(start_page, size_in_pages), "checking");
223
224 size_t end_page = start_page + size_in_pages;
225 if (_special) {
226 // Mark that memory is dirty. If committed again the memory might
227 // need to be cleared explicitly.
228 _dirty.set_range(start_page, end_page);
229 } else {
230 uncommit_internal(start_page, end_page);
|
1 /*
2 * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1MemoryNodeManager.hpp"
27 #include "gc/g1/g1PageBasedVirtualSpace.hpp"
28 #include "gc/shared/workgroup.hpp"
29 #include "oops/markWord.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/os.inline.hpp"
33 #include "services/memTracker.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/bitMap.inline.hpp"
36
37 G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size, MemoryType type) :
38 _low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0),
39 _committed(mtGC), _dirty(mtGC), _special(false), _executable(false), _memory_type(type) {
40 initialize_with_page_size(rs, used_size, page_size, type);
41 }
42
43 void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size, MemoryType type) {
44 guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
45
46 vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
47 vmassert(page_size > 0, "Page size must be non-zero.");
48
49 guarantee(is_aligned(rs.base(), page_size),
50 "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
51 guarantee(is_aligned(used_size, os::vm_page_size()),
52 "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
53 guarantee(used_size <= rs.size(),
54 "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
55 guarantee(is_aligned(rs.size(), page_size),
56 "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size);
57
58 _low_boundary = rs.base();
59 _high_boundary = _low_boundary + used_size;
60
61 _special = rs.special();
62 _executable = rs.executable();
63
192
193 bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
194 // We need to make sure to commit all pages covered by the given area.
195 guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
196
197 bool zero_filled = true;
198 size_t end_page = start_page + size_in_pages;
199
200 if (_special) {
201 // Check for dirty pages and update zero_filled if any found.
202 if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
203 zero_filled = false;
204 _dirty.clear_range(start_page, end_page);
205 }
206 } else {
207 commit_internal(start_page, end_page);
208 }
209 _committed.set_range(start_page, end_page);
210
211 return zero_filled;
212 }
213
214 void G1PageBasedVirtualSpace::request_memory_on_node(size_t start_page, size_t size_in_pages, uint node_index) {
215 // Only request if this space is for java heap.
216 if (_memory_type == mtJavaHeap) {
217 // We need to make sure the given area is committed.
218 guarantee(is_area_committed(start_page, size_in_pages), "Specified area is not committed");
219
220 char* start_addr = page_start(start_page);
221 G1MemoryNodeManager::mgr()->request_memory_on_node(start_addr, size_in_pages * _page_size, node_index);
222 }
223 }
224
225 void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
226 guarantee(start_page < end_page,
227 "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);
228
229 char* start_addr = page_start(start_page);
230 os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
231 }
232
233 void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
234 guarantee(is_area_committed(start_page, size_in_pages), "checking");
235
236 size_t end_page = start_page + size_in_pages;
237 if (_special) {
238 // Mark that memory is dirty. If committed again the memory might
239 // need to be cleared explicitly.
240 _dirty.set_range(start_page, end_page);
241 } else {
242 uncommit_internal(start_page, end_page);
|