1 /*
2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
55 // This ought to be just "allocate", because of the lock above, but that
56 // ContiguousSpace::allocate asserts that either the allocating thread
57 // holds the heap lock or it is the VM thread and we're at a safepoint.
58 // The best I (dld) could figure was to put a field in ContiguousSpace
59 // meaning "locking at safepoint taken care of", and set/reset that
60 // here. But this will do for now, especially in light of the comment
61 // above. Perhaps in the future some lock-free manner of keeping the
62 // coordination.
63 HeapWord* res = ContiguousSpace::par_allocate(size);
64 if (res != NULL) {
65 _offsets.alloc_block(res, size);
66 }
67 return res;
68 }
69
70 inline HeapWord*
71 OffsetTableContigSpace::block_start_const(const void* p) const {
72 return _offsets.block_start(p);
73 }
74
75 template <class SpaceType>
76 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
77 // Compute the new addresses for the live objects and store it in the mark
78 // Used by universe::mark_sweep_phase2()
79 HeapWord* compact_top; // This is where we are currently compacting to.
80
81 // We're sure to be here before any objects are compacted into this
82 // space, so this is a good time to initialize this:
83 space->set_compaction_top(space->bottom());
84
85 if (cp->space == NULL) {
86 assert(cp->gen != NULL, "need a generation");
87 assert(cp->threshold == NULL, "just checking");
88 assert(cp->gen->first_compaction_space() == space, "just checking");
89 cp->space = cp->gen->first_compaction_space();
90 compact_top = cp->space->bottom();
91 cp->space->set_compaction_top(compact_top);
92 cp->threshold = cp->space->initialize_threshold();
93 } else {
94 compact_top = cp->space->compaction_top();
314 debug_only(prev_q = q);
315 q += size;
316 }
317 }
318
319 // Let's remember if we were empty before we did the compaction.
320 bool was_empty = space->used_region().is_empty();
321 // Reset space after compaction is complete
322 space->reset_after_compaction();
323 // We do this clear, below, since it has overloaded meanings for some
324 // space subtypes. For example, OffsetTableContigSpace's that were
325 // compacted into will have had their offset table thresholds updated
326 // continuously, but those that weren't need to have their thresholds
327 // re-initialized. Also mangles unused area for debugging.
328 if (space->used_region().is_empty()) {
329 if (!was_empty) space->clear(SpaceDecorator::Mangle);
330 } else {
331 if (ZapUnusedHeapArea) space->mangle_unused_area();
332 }
333 }
334 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
|
1 /*
2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
55 // This ought to be just "allocate", because of the lock above, but that
56 // ContiguousSpace::allocate asserts that either the allocating thread
57 // holds the heap lock or it is the VM thread and we're at a safepoint.
58 // The best I (dld) could figure was to put a field in ContiguousSpace
59 // meaning "locking at safepoint taken care of", and set/reset that
60 // here. But this will do for now, especially in light of the comment
61 // above. Perhaps in the future some lock-free manner of keeping the
62 // coordination.
63 HeapWord* res = ContiguousSpace::par_allocate(size);
64 if (res != NULL) {
65 _offsets.alloc_block(res, size);
66 }
67 return res;
68 }
69
70 inline HeapWord*
71 OffsetTableContigSpace::block_start_const(const void* p) const {
72 return _offsets.block_start(p);
73 }
74
75 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
76 return oop(addr)->size();
77 }
78
79 template <class SpaceType>
80 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
81 // Compute the new addresses for the live objects and store it in the mark
82 // Used by universe::mark_sweep_phase2()
83 HeapWord* compact_top; // This is where we are currently compacting to.
84
85 // We're sure to be here before any objects are compacted into this
86 // space, so this is a good time to initialize this:
87 space->set_compaction_top(space->bottom());
88
89 if (cp->space == NULL) {
90 assert(cp->gen != NULL, "need a generation");
91 assert(cp->threshold == NULL, "just checking");
92 assert(cp->gen->first_compaction_space() == space, "just checking");
93 cp->space = cp->gen->first_compaction_space();
94 compact_top = cp->space->bottom();
95 cp->space->set_compaction_top(compact_top);
96 cp->threshold = cp->space->initialize_threshold();
97 } else {
98 compact_top = cp->space->compaction_top();
318 debug_only(prev_q = q);
319 q += size;
320 }
321 }
322
323 // Let's remember if we were empty before we did the compaction.
324 bool was_empty = space->used_region().is_empty();
325 // Reset space after compaction is complete
326 space->reset_after_compaction();
327 // We do this clear, below, since it has overloaded meanings for some
328 // space subtypes. For example, OffsetTableContigSpace's that were
329 // compacted into will have had their offset table thresholds updated
330 // continuously, but those that weren't need to have their thresholds
331 // re-initialized. Also mangles unused area for debugging.
332 if (space->used_region().is_empty()) {
333 if (!was_empty) space->clear(SpaceDecorator::Mangle);
334 } else {
335 if (ZapUnusedHeapArea) space->mangle_unused_area();
336 }
337 }
338
339 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
340 return oop(addr)->size();
341 }
342
343 #endif // SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
|