1 /*
2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/z/zCollectedHeap.hpp"
26 #include "gc/z/zGlobals.hpp"
27 #include "gc/z/zHeap.inline.hpp"
28 #include "gc/z/zObjectAllocator.hpp"
29 #include "gc/z/zPage.inline.hpp"
30 #include "gc/z/zStat.hpp"
31 #include "gc/z/zThread.inline.hpp"
32 #include "gc/z/zUtils.inline.hpp"
33 #include "gc/z/zValue.inline.hpp"
34 #include "logging/log.hpp"
35 #include "runtime/atomic.hpp"
36 #include "runtime/safepoint.hpp"
37 #include "runtime/thread.hpp"
38 #include "runtime/threadSMR.hpp"
39 #include "utilities/align.hpp"
40 #include "utilities/debug.hpp"
41
42 static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
43 static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
44
45 ZObjectAllocator::ZObjectAllocator() :
46 _used(0),
47 _undone(0),
48 _shared_medium_page(NULL),
49 _shared_small_page(NULL),
50 _worker_small_page(NULL) {}
51
52 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
53 ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
54 if (page != NULL) {
55 // Increment used bytes
56 Atomic::add(size, _used.addr());
57 }
58
59 return page;
60 }
61
62 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
63 // Increment undone bytes
64 Atomic::add(page->size(), _undone.addr());
65
66 ZHeap::heap()->undo_alloc_page(page);
67 }
68
69 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
70 uint8_t page_type,
71 size_t page_size,
72 size_t size,
73 ZAllocationFlags flags) {
74 uintptr_t addr = 0;
75 ZPage* page = *shared_page;
76
77 if (page != NULL) {
78 addr = page->alloc_object_atomic(size);
79 }
80
81 if (addr == 0) {
82 // Allocate new page
83 ZPage* const new_page = alloc_page(page_type, page_size, flags);
84 if (new_page != NULL) {
85 // Allocate object before installing the new page
86 addr = new_page->alloc_object(size);
87
88 retry:
89 // Install new page
90 ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
91 if (prev_page != page) {
92 if (prev_page == NULL) {
93 // Previous page was retired, retry installing the new page
94 page = prev_page;
95 goto retry;
125 ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
126 if (page != NULL) {
127 // Allocate the object
128 addr = page->alloc_object(size);
129 }
130
131 return addr;
132 }
133
134 uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
135 return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
136 }
137
138 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
139 assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
140 "Should be a Java, VM or Runtime worker thread");
141
142 // Non-worker small page allocation can never use the reserve
143 flags.set_no_reserve();
144
145 return alloc_object_in_shared_page(_shared_small_page.addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
146 }
147
148 uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
149 assert(ZThread::is_worker(), "Should be a worker thread");
150
151 ZPage* page = _worker_small_page.get();
152 uintptr_t addr = 0;
153
154 if (page != NULL) {
155 addr = page->alloc_object(size);
156 }
157
158 if (addr == 0) {
159 // Allocate new page
160 page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
161 if (page != NULL) {
162 addr = page->alloc_object(size);
163 }
164 _worker_small_page.set(page);
165 }
277 size_t ZObjectAllocator::used() const {
278 size_t total_used = 0;
279 size_t total_undone = 0;
280
281 ZPerCPUConstIterator<size_t> iter_used(&_used);
282 for (const size_t* cpu_used; iter_used.next(&cpu_used);) {
283 total_used += *cpu_used;
284 }
285
286 ZPerCPUConstIterator<size_t> iter_undone(&_undone);
287 for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) {
288 total_undone += *cpu_undone;
289 }
290
291 return total_used - total_undone;
292 }
293
294 size_t ZObjectAllocator::remaining() const {
295 assert(ZThread::is_java(), "Should be a Java thread");
296
297 ZPage* page = _shared_small_page.get();
298 if (page != NULL) {
299 return page->remaining();
300 }
301
302 return 0;
303 }
304
305 void ZObjectAllocator::retire_pages() {
306 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
307
308 // Reset used and undone bytes
309 _used.set_all(0);
310 _undone.set_all(0);
311
312 // Reset allocation pages
313 _shared_medium_page.set(NULL);
314 _shared_small_page.set_all(NULL);
315 _worker_small_page.set_all(NULL);
316 }
|
1 /*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/z/zCollectedHeap.hpp"
26 #include "gc/z/zGlobals.hpp"
27 #include "gc/z/zHeap.inline.hpp"
28 #include "gc/z/zHeuristics.hpp"
29 #include "gc/z/zObjectAllocator.hpp"
30 #include "gc/z/zPage.inline.hpp"
31 #include "gc/z/zStat.hpp"
32 #include "gc/z/zThread.inline.hpp"
33 #include "gc/z/zUtils.inline.hpp"
34 #include "gc/z/zValue.inline.hpp"
35 #include "logging/log.hpp"
36 #include "runtime/atomic.hpp"
37 #include "runtime/safepoint.hpp"
38 #include "runtime/thread.hpp"
39 #include "runtime/threadSMR.hpp"
40 #include "utilities/align.hpp"
41 #include "utilities/debug.hpp"
42
43 static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
44 static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
45
46 ZObjectAllocator::ZObjectAllocator() :
47 _per_cpu_shared_small_page(ZHeuristics::per_cpu_shared_small_page()),
48 _used(0),
49 _undone(0),
50 _shared_medium_page(NULL),
51 _shared_small_page(NULL),
52 _worker_small_page(NULL) {}
53
54 ZPage** ZObjectAllocator::shared_small_page_addr() {
55 return _per_cpu_shared_small_page ? _shared_small_page.addr() : _shared_small_page.addr(0);
56 }
57
58 ZPage* const* ZObjectAllocator::shared_small_page_addr() const {
59 return _per_cpu_shared_small_page ? _shared_small_page.addr() : _shared_small_page.addr(0);
60 }
61
62 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
63 ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
64 if (page != NULL) {
65 // Increment used bytes
66 Atomic::add(size, _used.addr());
67 }
68
69 return page;
70 }
71
72 void ZObjectAllocator::undo_alloc_page(ZPage* page) {
73 // Increment undone bytes
74 Atomic::add(page->size(), _undone.addr());
75
76 ZHeap::heap()->undo_alloc_page(page);
77 }
78
79 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
80 uint8_t page_type,
81 size_t page_size,
82 size_t size,
83 ZAllocationFlags flags) {
84 uintptr_t addr = 0;
85 ZPage* page = OrderAccess::load_acquire(shared_page);
86
87 if (page != NULL) {
88 addr = page->alloc_object_atomic(size);
89 }
90
91 if (addr == 0) {
92 // Allocate new page
93 ZPage* const new_page = alloc_page(page_type, page_size, flags);
94 if (new_page != NULL) {
95 // Allocate object before installing the new page
96 addr = new_page->alloc_object(size);
97
98 retry:
99 // Install new page
100 ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
101 if (prev_page != page) {
102 if (prev_page == NULL) {
103 // Previous page was retired, retry installing the new page
104 page = prev_page;
105 goto retry;
135 ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
136 if (page != NULL) {
137 // Allocate the object
138 addr = page->alloc_object(size);
139 }
140
141 return addr;
142 }
143
144 uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
145 return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
146 }
147
148 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
149 assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
150 "Should be a Java, VM or Runtime worker thread");
151
152 // Non-worker small page allocation can never use the reserve
153 flags.set_no_reserve();
154
155 return alloc_object_in_shared_page(shared_small_page_addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
156 }
157
158 uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
159 assert(ZThread::is_worker(), "Should be a worker thread");
160
161 ZPage* page = _worker_small_page.get();
162 uintptr_t addr = 0;
163
164 if (page != NULL) {
165 addr = page->alloc_object(size);
166 }
167
168 if (addr == 0) {
169 // Allocate new page
170 page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
171 if (page != NULL) {
172 addr = page->alloc_object(size);
173 }
174 _worker_small_page.set(page);
175 }
287 size_t ZObjectAllocator::used() const {
288 size_t total_used = 0;
289 size_t total_undone = 0;
290
291 ZPerCPUConstIterator<size_t> iter_used(&_used);
292 for (const size_t* cpu_used; iter_used.next(&cpu_used);) {
293 total_used += *cpu_used;
294 }
295
296 ZPerCPUConstIterator<size_t> iter_undone(&_undone);
297 for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) {
298 total_undone += *cpu_undone;
299 }
300
301 return total_used - total_undone;
302 }
303
304 size_t ZObjectAllocator::remaining() const {
305 assert(ZThread::is_java(), "Should be a Java thread");
306
307 const ZPage* const page = OrderAccess::load_acquire(shared_small_page_addr());
308 if (page != NULL) {
309 return page->remaining();
310 }
311
312 return 0;
313 }
314
315 void ZObjectAllocator::retire_pages() {
316 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
317
318 // Reset used and undone bytes
319 _used.set_all(0);
320 _undone.set_all(0);
321
322 // Reset allocation pages
323 _shared_medium_page.set(NULL);
324 _shared_small_page.set_all(NULL);
325 _worker_small_page.set_all(NULL);
326 }
|