7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
25 #define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
26
27 #include "gc/shared/barrierSet.inline.hpp"
28 #include "gc/shared/modRefBarrierSet.hpp"
29 #include "oops/klass.inline.hpp"
30 #include "oops/objArrayOop.hpp"
31 #include "oops/oop.hpp"
32
33 template <DecoratorSet decorators, typename BarrierSetT>
34 template <typename T>
35 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
36 oop_store_in_heap(T* addr, oop value) {
37 BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
38 bs->template write_ref_field_pre<decorators>(addr);
39 Raw::oop_store(addr, value);
40 bs->template write_ref_field_post<decorators>(addr, value);
41 }
42
43 template <DecoratorSet decorators, typename BarrierSetT>
44 template <typename T>
45 inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
46 oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
47 BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
48 bs->template write_ref_field_pre<decorators>(addr);
49 oop result = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
50 if (result == compare_value) {
51 bs->template write_ref_field_post<decorators>(addr, new_value);
|
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
25 #define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
26
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/modRefBarrierSet.hpp"
29 #include "oops/klass.inline.hpp"
30 #include "oops/objArrayOop.hpp"
31 #include "oops/oop.hpp"
32
33 // count is number of array elements being written
34 void ModRefBarrierSet::write_ref_array(HeapWord* start, size_t count) {
35 HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
36 // In the case of compressed oops, start and end may potentially be misaligned;
37 // so we need to conservatively align the first downward (this is not
38 // strictly necessary for current uses, but a case of good hygiene and,
39 // if you will, aesthetics) and the second upward (this is essential for
40 // current uses) to a HeapWord boundary, so we mark all cards overlapping
41 // this write. If this evolves in the future to calling a
42 // logging barrier of narrow oop granularity, like the pre-barrier for G1
43 // (mentioned here merely by way of example), we will need to change this
44 // interface, so it is "exactly precise" (if i may be allowed the adverbial
45 // redundancy for emphasis) and does not include narrow oop slots not
46 // included in the original write interval.
47 HeapWord* aligned_start = align_down(start, HeapWordSize);
48 HeapWord* aligned_end = align_up (end, HeapWordSize);
49 // If compressed oops were not being used, these should already be aligned
50 assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
51 "Expected heap word alignment of start and end");
52 write_ref_array_work(MemRegion(aligned_start, aligned_end));
53 }
54
55 template <DecoratorSet decorators, typename BarrierSetT>
56 template <typename T>
57 inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
58 oop_store_in_heap(T* addr, oop value) {
59 BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
60 bs->template write_ref_field_pre<decorators>(addr);
61 Raw::oop_store(addr, value);
62 bs->template write_ref_field_post<decorators>(addr, value);
63 }
64
65 template <DecoratorSet decorators, typename BarrierSetT>
66 template <typename T>
67 inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
68 oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
69 BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
70 bs->template write_ref_field_pre<decorators>(addr);
71 oop result = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
72 if (result == compare_value) {
73 bs->template write_ref_field_post<decorators>(addr, new_value);
|
55
56 template <DecoratorSet decorators, typename BarrierSetT>
57 template <typename T>
58 inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
59 oop_atomic_xchg_in_heap(oop new_value, T* addr) {
60 BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
61 bs->template write_ref_field_pre<decorators>(addr);
62 oop result = Raw::oop_atomic_xchg(new_value, addr);
63 bs->template write_ref_field_post<decorators>(addr, new_value);
64 return result;
65 }
66
67 template <DecoratorSet decorators, typename BarrierSetT>
68 template <typename T>
69 inline bool ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
70 oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
71 BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
72
73 if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
74 // Optimized covariant case
75 bs->write_ref_array_pre(dst, (int)length,
76 HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value);
77 Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
78 bs->write_ref_array((HeapWord*)dst, length);
79 } else {
80 Klass* bound = objArrayOop(dst_obj)->element_klass();
81 T* from = src;
82 T* end = from + length;
83 for (T* p = dst; from < end; from++, p++) {
84 T element = *from;
85 if (bound->is_instanceof_or_null(element)) {
86 bs->template write_ref_field_pre<decorators>(p);
87 *p = element;
88 } else {
89 // We must do a barrier to cover the partial copy.
90 const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
91 // pointer delta is scaled to number of elements (length field in
92 // objArrayOop) which we assume is 32 bit.
93 assert(pd == (size_t)(int)pd, "length field overflow");
94 bs->write_ref_array((HeapWord*)dst, pd);
|
77
78 template <DecoratorSet decorators, typename BarrierSetT>
79 template <typename T>
80 inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
81 oop_atomic_xchg_in_heap(oop new_value, T* addr) {
82 BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
83 bs->template write_ref_field_pre<decorators>(addr);
84 oop result = Raw::oop_atomic_xchg(new_value, addr);
85 bs->template write_ref_field_post<decorators>(addr, new_value);
86 return result;
87 }
88
89 template <DecoratorSet decorators, typename BarrierSetT>
90 template <typename T>
91 inline bool ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
92 oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
93 BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
94
95 if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
96 // Optimized covariant case
97 bs->write_ref_array_pre(dst, length,
98 HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value);
99 Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
100 bs->write_ref_array((HeapWord*)dst, length);
101 } else {
102 Klass* bound = objArrayOop(dst_obj)->element_klass();
103 T* from = src;
104 T* end = from + length;
105 for (T* p = dst; from < end; from++, p++) {
106 T element = *from;
107 if (bound->is_instanceof_or_null(element)) {
108 bs->template write_ref_field_pre<decorators>(p);
109 *p = element;
110 } else {
111 // We must do a barrier to cover the partial copy.
112 const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
113 // pointer delta is scaled to number of elements (length field in
114 // objArrayOop) which we assume is 32 bit.
115 assert(pd == (size_t)(int)pd, "length field overflow");
116 bs->write_ref_array((HeapWord*)dst, pd);
|