1 /*
2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
33 class MutableSpace;
34 class PSOldGen;
35 class ParCompactionManager;
36 class ObjectStartArray;
37 class ParallelCompactData;
38 class ParMarkBitMap;
39
40 class ParCompactionManager : public CHeapObj<mtGC> {
41 friend class ParallelTaskTerminator;
42 friend class ParMarkBitMap;
43 friend class PSParallelCompact;
44 friend class CompactionWithStealingTask;
45 friend class UpdateAndFillClosure;
46 friend class RefProcTaskExecutor;
47 friend class PCRefProcTask;
48 friend class MarkFromRootsTask;
49 friend class UpdateDensePrefixAndCompactionTask;
50
51 public:
52
53 // ------------------------ Don't putback if not needed
54 // Actions that the compaction manager should take.
55 enum Action {
56 Update,
57 Copy,
58 UpdateAndCopy,
59 CopyAndUpdate,
60 NotValid
61 };
62 // ------------------------ End don't putback if not needed
63
64 private:
65 // 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
66 #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
67 typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue;
68 typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet;
69 #undef QUEUE_SIZE
70
71 static ParCompactionManager** _manager_array;
72 static OopTaskQueueSet* _stack_array;
73 static ObjArrayTaskQueueSet* _objarray_queues;
74 static ObjectStartArray* _start_array;
75 static RegionTaskQueueSet* _region_array;
76 static PSOldGen* _old_gen;
77
78 private:
79 OverflowTaskQueue<oop, mtGC> _marking_stack;
80 ObjArrayTaskQueue _objarray_stack;
81 size_t _next_shadow_region;
82
83 // Is there a way to reuse the _marking_stack for the
84 // saving empty regions? For now just create a different
85 // type of TaskQueue.
86 RegionTaskQueue _region_stack;
87
88 static ParMarkBitMap* _mark_bitmap;
89
90 // Contains currently free shadow regions. We use it in
91 // a LIFO fashion for better data locality and utilization.
92 static GrowableArray<size_t>* _shadow_region_array;
93
94 // Provides mutual exclusive access of _shadow_region_array.
95 // See pop/push_shadow_region_mt_safe() below
96 static Monitor* _shadow_region_monitor;
97
98 Action _action;
99
100 HeapWord* _last_query_beg;
101 oop _last_query_obj;
102 size_t _last_query_ret;
103
104 static PSOldGen* old_gen() { return _old_gen; }
105 static ObjectStartArray* start_array() { return _start_array; }
106 static OopTaskQueueSet* stack_array() { return _stack_array; }
107
108 static void initialize(ParMarkBitMap* mbm);
109
110 protected:
111 // Array of tasks. Needed by the ParallelTaskTerminator.
112 static RegionTaskQueueSet* region_array() { return _region_array; }
113 OverflowTaskQueue<oop, mtGC>* marking_stack() { return &_marking_stack; }
114
115 // Pushes onto the marking stack. If the marking stack is full,
116 // pushes onto the overflow stack.
117 void stack_push(oop obj);
118 // Do not implement an equivalent stack_pop. Deal with the
119 // marking stack and overflow stack directly.
121 public:
122 static const size_t InvalidShadow = ~0;
123 static size_t pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr);
124 static void push_shadow_region_mt_safe(size_t shadow_region);
125 static void push_shadow_region(size_t shadow_region);
126 static void remove_all_shadow_regions();
127
128 inline size_t next_shadow_region() { return _next_shadow_region; }
129 inline void set_next_shadow_region(size_t record) { _next_shadow_region = record; }
130 inline size_t move_next_shadow_region_by(size_t workers) {
131 _next_shadow_region += workers;
132 return next_shadow_region();
133 }
134
135 void reset_bitmap_query_cache() {
136 _last_query_beg = NULL;
137 _last_query_obj = NULL;
138 _last_query_ret = 0;
139 }
140
141 Action action() { return _action; }
142 void set_action(Action v) { _action = v; }
143
144 // Bitmap query support, cache last query and result
145 HeapWord* last_query_begin() { return _last_query_beg; }
146 oop last_query_object() { return _last_query_obj; }
147 size_t last_query_return() { return _last_query_ret; }
148
149 void set_last_query_begin(HeapWord *new_beg) { _last_query_beg = new_beg; }
150 void set_last_query_object(oop new_obj) { _last_query_obj = new_obj; }
151 void set_last_query_return(size_t new_ret) { _last_query_ret = new_ret; }
152
153 static void reset_all_bitmap_query_caches();
154
155 RegionTaskQueue* region_stack() { return &_region_stack; }
156
157 inline static ParCompactionManager* manager_array(uint index);
158
159 ParCompactionManager();
160
161 // Pushes onto the region stack at the given index. If the
162 // region stack is full,
163 // pushes onto the region overflow stack.
|
1 /*
2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
33 class MutableSpace;
34 class PSOldGen;
35 class ParCompactionManager;
36 class ObjectStartArray;
37 class ParallelCompactData;
38 class ParMarkBitMap;
39
40 class ParCompactionManager : public CHeapObj<mtGC> {
41 friend class ParallelTaskTerminator;
42 friend class ParMarkBitMap;
43 friend class PSParallelCompact;
44 friend class CompactionWithStealingTask;
45 friend class UpdateAndFillClosure;
46 friend class RefProcTaskExecutor;
47 friend class PCRefProcTask;
48 friend class MarkFromRootsTask;
49 friend class UpdateDensePrefixAndCompactionTask;
50
51 public:
52
53
54 private:
55 // 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
56 #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
57 typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue;
58 typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet;
59 #undef QUEUE_SIZE
60
61 static ParCompactionManager** _manager_array;
62 static OopTaskQueueSet* _stack_array;
63 static ObjArrayTaskQueueSet* _objarray_queues;
64 static ObjectStartArray* _start_array;
65 static RegionTaskQueueSet* _region_array;
66 static PSOldGen* _old_gen;
67
68 private:
69 OverflowTaskQueue<oop, mtGC> _marking_stack;
70 ObjArrayTaskQueue _objarray_stack;
71 size_t _next_shadow_region;
72
73 // Is there a way to reuse the _marking_stack for the
74 // saving empty regions? For now just create a different
75 // type of TaskQueue.
76 RegionTaskQueue _region_stack;
77
78 static ParMarkBitMap* _mark_bitmap;
79
80 // Contains currently free shadow regions. We use it in
81 // a LIFO fashion for better data locality and utilization.
82 static GrowableArray<size_t>* _shadow_region_array;
83
84 // Provides mutual exclusive access of _shadow_region_array.
85 // See pop/push_shadow_region_mt_safe() below
86 static Monitor* _shadow_region_monitor;
87
88 HeapWord* _last_query_beg;
89 oop _last_query_obj;
90 size_t _last_query_ret;
91
92 static PSOldGen* old_gen() { return _old_gen; }
93 static ObjectStartArray* start_array() { return _start_array; }
94 static OopTaskQueueSet* stack_array() { return _stack_array; }
95
96 static void initialize(ParMarkBitMap* mbm);
97
98 protected:
99 // Array of tasks. Needed by the ParallelTaskTerminator.
100 static RegionTaskQueueSet* region_array() { return _region_array; }
101 OverflowTaskQueue<oop, mtGC>* marking_stack() { return &_marking_stack; }
102
103 // Pushes onto the marking stack. If the marking stack is full,
104 // pushes onto the overflow stack.
105 void stack_push(oop obj);
106 // Do not implement an equivalent stack_pop. Deal with the
107 // marking stack and overflow stack directly.
109 public:
110 static const size_t InvalidShadow = ~0;
111 static size_t pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr);
112 static void push_shadow_region_mt_safe(size_t shadow_region);
113 static void push_shadow_region(size_t shadow_region);
114 static void remove_all_shadow_regions();
115
116 inline size_t next_shadow_region() { return _next_shadow_region; }
117 inline void set_next_shadow_region(size_t record) { _next_shadow_region = record; }
118 inline size_t move_next_shadow_region_by(size_t workers) {
119 _next_shadow_region += workers;
120 return next_shadow_region();
121 }
122
123 void reset_bitmap_query_cache() {
124 _last_query_beg = NULL;
125 _last_query_obj = NULL;
126 _last_query_ret = 0;
127 }
128
129 // Bitmap query support, cache last query and result
130 HeapWord* last_query_begin() { return _last_query_beg; }
131 oop last_query_object() { return _last_query_obj; }
132 size_t last_query_return() { return _last_query_ret; }
133
134 void set_last_query_begin(HeapWord *new_beg) { _last_query_beg = new_beg; }
135 void set_last_query_object(oop new_obj) { _last_query_obj = new_obj; }
136 void set_last_query_return(size_t new_ret) { _last_query_ret = new_ret; }
137
138 static void reset_all_bitmap_query_caches();
139
140 RegionTaskQueue* region_stack() { return &_region_stack; }
141
142 inline static ParCompactionManager* manager_array(uint index);
143
144 ParCompactionManager();
145
146 // Pushes onto the region stack at the given index. If the
147 // region stack is full,
148 // pushes onto the region overflow stack.
|