0 /*
1 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
|
0 /*
1 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
|
3843 Node* obj,
3844 Node* adr,
3845 uint adr_idx,
3846 Node* val,
3847 bool use_precise) {
3848 // No store check needed if we're storing a NULL or an old object
3849 // (latter case is probably a string constant). The concurrent
3850 // mark sweep garbage collector, however, needs to have all nonNull
3851 // oop updates flagged via card-marks.
3852 if (val != NULL && val->is_Con()) {
3853 // must be either an oop or NULL
3854 const Type* t = val->bottom_type();
3855 if (t == TypePtr::NULL_PTR || t == Type::TOP)
3856 // stores of null never (?) need barriers
3857 return;
3858 }
3859
3860 if (use_ReduceInitialCardMarks()
3861 && obj == just_allocated_object(control())) {
3862 // We can skip marks on a freshly-allocated object in Eden.
3863 // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
3864 // That routine informs GC to take appropriate compensating steps,
3865 // upon a slow-path allocation, so as to make this card-mark
3866 // elision safe.
3867 return;
3868 }
3869
3870 if (!use_precise) {
3871 // All card marks for a (non-array) instance are in one place:
3872 adr = obj;
3873 }
3874 // (Else it's an array (or unknown), and we want more precise card marks.)
3875 assert(adr != NULL, "");
3876
3877 IdealKit ideal(this, true);
3878
3879 // Convert the pointer to an int prior to doing math on it
3880 Node* cast = __ CastPX(__ ctrl(), adr);
3881
3882 // Divide by card size
|
3843 Node* obj,
3844 Node* adr,
3845 uint adr_idx,
3846 Node* val,
3847 bool use_precise) {
3848 // No store check needed if we're storing a NULL or an old object
3849 // (latter case is probably a string constant). The concurrent
3850 // mark sweep garbage collector, however, needs to have all nonNull
3851 // oop updates flagged via card-marks.
3852 if (val != NULL && val->is_Con()) {
3853 // must be either an oop or NULL
3854 const Type* t = val->bottom_type();
3855 if (t == TypePtr::NULL_PTR || t == Type::TOP)
3856 // stores of null never (?) need barriers
3857 return;
3858 }
3859
3860 if (use_ReduceInitialCardMarks()
3861 && obj == just_allocated_object(control())) {
3862 // We can skip marks on a freshly-allocated object in Eden.
3863 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
3864 // That routine informs GC to take appropriate compensating steps,
3865 // upon a slow-path allocation, so as to make this card-mark
3866 // elision safe.
3867 return;
3868 }
3869
3870 if (!use_precise) {
3871 // All card marks for a (non-array) instance are in one place:
3872 adr = obj;
3873 }
3874 // (Else it's an array (or unknown), and we want more precise card marks.)
3875 assert(adr != NULL, "");
3876
3877 IdealKit ideal(this, true);
3878
3879 // Convert the pointer to an int prior to doing math on it
3880 Node* cast = __ CastPX(__ ctrl(), adr);
3881
3882 // Divide by card size
|
4141 * objects are found. G1 also requires to keep track of object references
4142 * between different regions to enable evacuation of old regions, which is done
4143 * as part of mixed collections. References are tracked in remembered sets and
4144 * is continuously updated as reference are written to with the help of the
4145 * post-barrier.
4146 *
4147 * To reduce the number of updates to the remembered set the post-barrier
4148 * filters updates to fields in objects located in the Young Generation,
4149 * the same region as the reference, when the NULL is being written or
4150 * if the card is already marked as dirty by an earlier write.
4151 *
4152 * Under certain circumstances it is possible to avoid generating the
4153 * post-barrier completely if it is possible during compile time to prove
4154 * the object is newly allocated and that no safepoint exists between the
4155 * allocation and the store.
4156 *
4157 * In the case of slow allocation the allocation code must handle the barrier
4158 * as part of the allocation in the case the allocated object is not located
4159 * in the nursery, this would happen for humongous objects. This is similar to
4160 * how CMS is required to handle this case, see the comments for the method
4161 * CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier.
4162 * A deferred card mark is required for these objects and handled in the above
4163 * mentioned methods.
4164 *
4165 * Returns true if the post barrier can be removed
4166 */
4167 bool GraphKit::g1_can_remove_post_barrier(PhaseTransform* phase, Node* store,
4168 Node* adr) {
4169 intptr_t offset = 0;
4170 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
4171 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
4172
4173 if (offset == Type::OffsetBot) {
4174 return false; // cannot unalias unless there are precise offsets
4175 }
4176
4177 if (alloc == NULL) {
4178 return false; // No allocation found
4179 }
4180
|
4141 * objects are found. G1 also requires to keep track of object references
4142 * between different regions to enable evacuation of old regions, which is done
4143 * as part of mixed collections. References are tracked in remembered sets and
4144 * is continuously updated as reference are written to with the help of the
4145 * post-barrier.
4146 *
4147 * To reduce the number of updates to the remembered set the post-barrier
4148 * filters updates to fields in objects located in the Young Generation,
4149 * the same region as the reference, when the NULL is being written or
4150 * if the card is already marked as dirty by an earlier write.
4151 *
4152 * Under certain circumstances it is possible to avoid generating the
4153 * post-barrier completely if it is possible during compile time to prove
4154 * the object is newly allocated and that no safepoint exists between the
4155 * allocation and the store.
4156 *
4157 * In the case of slow allocation the allocation code must handle the barrier
4158 * as part of the allocation in the case the allocated object is not located
4159 * in the nursery, this would happen for humongous objects. This is similar to
4160 * how CMS is required to handle this case, see the comments for the method
4161 * CardTableModRefBS::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier.
4162 * A deferred card mark is required for these objects and handled in the above
4163 * mentioned methods.
4164 *
4165 * Returns true if the post barrier can be removed
4166 */
4167 bool GraphKit::g1_can_remove_post_barrier(PhaseTransform* phase, Node* store,
4168 Node* adr) {
4169 intptr_t offset = 0;
4170 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
4171 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
4172
4173 if (offset == Type::OffsetBot) {
4174 return false; // cannot unalias unless there are precise offsets
4175 }
4176
4177 if (alloc == NULL) {
4178 return false; // No allocation found
4179 }
4180
|
4231
4232 void GraphKit::g1_write_barrier_post(Node* oop_store,
4233 Node* obj,
4234 Node* adr,
4235 uint alias_idx,
4236 Node* val,
4237 BasicType bt,
4238 bool use_precise) {
4239 // If we are writing a NULL then we need no post barrier
4240
4241 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
4242 // Must be NULL
4243 const Type* t = val->bottom_type();
4244 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
4245 // No post barrier if writing NULLx
4246 return;
4247 }
4248
4249 if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
4250 // We can skip marks on a freshly-allocated object in Eden.
4251 // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
4252 // That routine informs GC to take appropriate compensating steps,
4253 // upon a slow-path allocation, so as to make this card-mark
4254 // elision safe.
4255 return;
4256 }
4257
4258 if (use_ReduceInitialCardMarks()
4259 && g1_can_remove_post_barrier(&_gvn, oop_store, adr)) {
4260 return;
4261 }
4262
4263 if (!use_precise) {
4264 // All card marks for a (non-array) instance are in one place:
4265 adr = obj;
4266 }
4267 // (Else it's an array (or unknown), and we want more precise card marks.)
4268 assert(adr != NULL, "");
4269
4270 IdealKit ideal(this, true);
|
4231
4232 void GraphKit::g1_write_barrier_post(Node* oop_store,
4233 Node* obj,
4234 Node* adr,
4235 uint alias_idx,
4236 Node* val,
4237 BasicType bt,
4238 bool use_precise) {
4239 // If we are writing a NULL then we need no post barrier
4240
4241 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
4242 // Must be NULL
4243 const Type* t = val->bottom_type();
4244 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
4245 // No post barrier if writing NULLx
4246 return;
4247 }
4248
4249 if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
4250 // We can skip marks on a freshly-allocated object in Eden.
4251 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
4252 // That routine informs GC to take appropriate compensating steps,
4253 // upon a slow-path allocation, so as to make this card-mark
4254 // elision safe.
4255 return;
4256 }
4257
4258 if (use_ReduceInitialCardMarks()
4259 && g1_can_remove_post_barrier(&_gvn, oop_store, adr)) {
4260 return;
4261 }
4262
4263 if (!use_precise) {
4264 // All card marks for a (non-array) instance are in one place:
4265 adr = obj;
4266 }
4267 // (Else it's an array (or unknown), and we want more precise card marks.)
4268 assert(adr != NULL, "");
4269
4270 IdealKit ideal(this, true);
|