1 /*
2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/nativeInst.hpp"
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "logging/log.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "runtime/sharedRuntime.hpp"
32 #include "runtime/thread.hpp"
33 #include "utilities/align.hpp"
34 #include "utilities/debug.hpp"
35
36 class NativeNMethodCmpBarrier: public NativeInstruction {
37 public:
38 enum Intel_specific_constants {
39 instruction_code = 0x81,
40 instruction_size = 8,
41 imm_offset = 4,
42 instruction_rex_prefix = Assembler::REX | Assembler::REX_B,
43 instruction_modrm = 0x7f // [r15 + offset]
44 };
45
46 address instruction_address() const { return addr_at(0); }
47 address immediate_address() const { return addr_at(imm_offset); }
48
49 jint get_immedate() const { return int_at(imm_offset); }
50 void set_immediate(jint imm) { set_int_at(imm_offset, imm); }
51 void verify() const;
52 };
53
54 void NativeNMethodCmpBarrier::verify() const {
55 if (((uintptr_t) instruction_address()) & 0x7) {
56 fatal("Not properly aligned");
57 }
58
59 int prefix = ubyte_at(0);
60 if (prefix != instruction_rex_prefix) {
61 tty->print_cr("Addr: " INTPTR_FORMAT " Prefix: 0x%x", p2i(instruction_address()),
62 prefix);
63 fatal("not a cmp barrier");
64 }
65
66 int inst = ubyte_at(1);
67 if (inst != instruction_code) {
68 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
69 inst);
70 fatal("not a cmp barrier");
71 }
72
73 int modrm = ubyte_at(2);
74 if (modrm != instruction_modrm) {
75 tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
76 modrm);
77 fatal("not a cmp barrier");
78 }
79 }
80
81 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
82 /*
83 * [ callers frame ]
84 * [ callers return address ] <- callers rsp
85 * [ callers rbp ] <- callers rbp
86 * [ callers frame slots ]
87 * [ return_address ] <- return_address_ptr
88 * [ cookie ] <- used to write the new rsp (callers rsp)
89 * [ stub rbp ]
90 * [ stub stuff ]
91 */
92
93 address* stub_rbp = return_address_ptr - 2;
94 address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */
95 address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp
96 address* cookie = return_address_ptr - 1;
97
98 LogTarget(Trace, nmethod, barrier) out;
99 if (out.is_enabled()) {
110 assert(*cookie == (address) -1, "invariant");
111
112 // Preserve caller rbp.
113 *stub_rbp = *callers_rbp;
114
115 // At the cookie address put the callers rsp.
116 *cookie = (address) callers_rsp; // should point to the return address
117
118 // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end.
119 // Overwriting the caller rbp should be okay since our stub rbp has the same value.
120 address* jmp_addr_ptr = callers_rbp;
121 *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub();
122 }
123
124 // This is the offset of the entry barrier from where the frame is completed.
125 // If any code changes between the end of the verified entry where the entry
126 // barrier resides, and the completion of the frame, then
127 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
128 // not find the expected native instruction at this offset, which needs updating.
129 // Note that this offset is invariant of PreserveFramePointer.
130 static const int entry_barrier_offset = -19;
131
132 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
133 address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset;
134 NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address);
135 debug_only(barrier->verify());
136 return barrier;
137 }
138
139 void BarrierSetNMethod::disarm(nmethod* nm) {
140 if (!supports_entry_barrier(nm)) {
141 return;
142 }
143
144 NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
145 cmp->set_immediate(disarmed_value());
146 }
147
148 bool BarrierSetNMethod::is_armed(nmethod* nm) {
149 if (!supports_entry_barrier(nm)) {
150 return false;
|
1 /*
2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/nativeInst.hpp"
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "logging/log.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "runtime/sharedRuntime.hpp"
32 #include "runtime/thread.hpp"
33 #include "utilities/align.hpp"
34 #include "utilities/debug.hpp"
35
36 class NativeNMethodCmpBarrier: public NativeInstruction {
37 public:
38 #ifdef _LP64
39 enum Intel_specific_constants {
40 instruction_code = 0x81,
41 instruction_size = 8,
42 imm_offset = 4,
43 instruction_rex_prefix = Assembler::REX | Assembler::REX_B,
44 instruction_modrm = 0x7f // [r15 + offset]
45 };
46 #else
47 enum Intel_specific_constants {
48 instruction_code = 0x81,
49 instruction_size = 7,
50 imm_offset = 2,
51 instruction_modrm = 0x3f // [rdi]
52 };
53 #endif
54
55 address instruction_address() const { return addr_at(0); }
56 address immediate_address() const { return addr_at(imm_offset); }
57
58 jint get_immedate() const { return int_at(imm_offset); }
59 void set_immediate(jint imm) { set_int_at(imm_offset, imm); }
60 void verify() const;
61 };
62
63 #ifdef _LP64
64 void NativeNMethodCmpBarrier::verify() const {
65 if (((uintptr_t) instruction_address()) & 0x7) {
66 fatal("Not properly aligned");
67 }
68
69 int prefix = ubyte_at(0);
70 if (prefix != instruction_rex_prefix) {
71 tty->print_cr("Addr: " INTPTR_FORMAT " Prefix: 0x%x", p2i(instruction_address()),
72 prefix);
73 fatal("not a cmp barrier");
74 }
75
76 int inst = ubyte_at(1);
77 if (inst != instruction_code) {
78 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
79 inst);
80 fatal("not a cmp barrier");
81 }
82
83 int modrm = ubyte_at(2);
84 if (modrm != instruction_modrm) {
85 tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
86 modrm);
87 fatal("not a cmp barrier");
88 }
89 }
90 #else
91 void NativeNMethodCmpBarrier::verify() const {
92 if (((uintptr_t) instruction_address()) & 0x3) {
93 fatal("Not properly aligned");
94 }
95
96 int inst = ubyte_at(0);
97 if (inst != instruction_code) {
98 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
99 inst);
100 fatal("not a cmp barrier");
101 }
102
103 int modrm = ubyte_at(1);
104 if (modrm != instruction_modrm) {
105 tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x", p2i(instruction_address()),
106 modrm);
107 fatal("not a cmp barrier");
108 }
109 }
110 #endif // _LP64
111
112 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
113 /*
114 * [ callers frame ]
115 * [ callers return address ] <- callers rsp
116 * [ callers rbp ] <- callers rbp
117 * [ callers frame slots ]
118 * [ return_address ] <- return_address_ptr
119 * [ cookie ] <- used to write the new rsp (callers rsp)
120 * [ stub rbp ]
121 * [ stub stuff ]
122 */
123
124 address* stub_rbp = return_address_ptr - 2;
125 address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */
126 address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp
127 address* cookie = return_address_ptr - 1;
128
129 LogTarget(Trace, nmethod, barrier) out;
130 if (out.is_enabled()) {
141 assert(*cookie == (address) -1, "invariant");
142
143 // Preserve caller rbp.
144 *stub_rbp = *callers_rbp;
145
146 // At the cookie address put the callers rsp.
147 *cookie = (address) callers_rsp; // should point to the return address
148
149 // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end.
150 // Overwriting the caller rbp should be okay since our stub rbp has the same value.
151 address* jmp_addr_ptr = callers_rbp;
152 *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub();
153 }
154
155 // This is the offset of the entry barrier from where the frame is completed.
156 // If any code changes between the end of the verified entry where the entry
157 // barrier resides, and the completion of the frame, then
158 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
159 // not find the expected native instruction at this offset, which needs updating.
160 // Note that this offset is invariant of PreserveFramePointer.
161 static const int entry_barrier_offset = LP64_ONLY(-19) NOT_LP64(-18);
162
163 static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) {
164 address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset;
165 NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address);
166 debug_only(barrier->verify());
167 return barrier;
168 }
169
170 void BarrierSetNMethod::disarm(nmethod* nm) {
171 if (!supports_entry_barrier(nm)) {
172 return;
173 }
174
175 NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm);
176 cmp->set_immediate(disarmed_value());
177 }
178
179 bool BarrierSetNMethod::is_armed(nmethod* nm) {
180 if (!supports_entry_barrier(nm)) {
181 return false;
|