Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/nativeInst_sparc.cpp
+++ new/src/cpu/sparc/vm/nativeInst_sparc.cpp
1 1 /*
2 - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
2 + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 # include "incls/_precompiled.incl"
26 26 # include "incls/_nativeInst_sparc.cpp.incl"
27 27
28 28
29 29 bool NativeInstruction::is_dtrace_trap() {
30 30 return !is_nop();
31 31 }
32 32
33 33 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
34 34 ResourceMark rm;
35 35 CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
36 36 MacroAssembler* _masm = new MacroAssembler(&buf);
37 37 Register destreg;
38 38
39 39 destreg = inv_rd(*(unsigned int *)instaddr);
40 40 // Generate a the new sequence
41 41 _masm->patchable_sethi(x, destreg);
42 42 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
43 43 }
44 44
45 45 void NativeInstruction::verify() {
46 46 // make sure code pattern is actually an instruction address
47 47 address addr = addr_at(0);
48 48 if (addr == 0 || ((intptr_t)addr & 3) != 0) {
49 49 fatal("not an instruction address");
50 50 }
51 51 }
52 52
53 53 void NativeInstruction::print() {
54 54 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
55 55 }
56 56
57 57 void NativeInstruction::set_long_at(int offset, int i) {
58 58 address addr = addr_at(offset);
59 59 *(int*)addr = i;
60 60 ICache::invalidate_word(addr);
61 61 }
62 62
63 63 void NativeInstruction::set_jlong_at(int offset, jlong i) {
64 64 address addr = addr_at(offset);
65 65 *(jlong*)addr = i;
66 66 // Don't need to invalidate 2 words here, because
67 67 // the flush instruction operates on doublewords.
68 68 ICache::invalidate_word(addr);
69 69 }
70 70
71 71 void NativeInstruction::set_addr_at(int offset, address x) {
72 72 address addr = addr_at(offset);
73 73 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
74 74 *(uintptr_t*)addr = (uintptr_t)x;
75 75 // Don't need to invalidate 2 words here in the 64-bit case,
76 76 // because the flush instruction operates on doublewords.
77 77 ICache::invalidate_word(addr);
78 78 // The Intel code has this assertion for NativeCall::set_destination,
79 79 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
80 80 // NativeJump::set_jump_destination, and NativePushImm32::set_data
81 81 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
82 82 }
83 83
84 84 bool NativeInstruction::is_zero_test(Register ®) {
85 85 int x = long_at(0);
86 86 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
87 87 if (is_op3(x, temp, Assembler::arith_op) &&
88 88 inv_immed(x) && inv_rd(x) == G0) {
89 89 if (inv_rs1(x) == G0) {
90 90 reg = inv_rs2(x);
91 91 return true;
92 92 } else if (inv_rs2(x) == G0) {
93 93 reg = inv_rs1(x);
94 94 return true;
95 95 }
96 96 }
97 97 return false;
98 98 }
99 99
100 100 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
101 101 int x = long_at(0);
102 102 if (is_op(x, Assembler::ldst_op) &&
103 103 inv_rs1(x) == reg && inv_immed(x)) {
104 104 return true;
105 105 }
106 106 return false;
107 107 }
108 108
109 109 void NativeCall::verify() {
110 110 NativeInstruction::verify();
111 111 // make sure code pattern is actually a call instruction
112 112 if (!is_op(long_at(0), Assembler::call_op)) {
113 113 fatal("not a call");
114 114 }
115 115 }
116 116
117 117 void NativeCall::print() {
118 118 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
119 119 }
120 120
121 121
122 122 // MT-safe patching of a call instruction (and following word).
123 123 // First patches the second word, and then atomicly replaces
124 124 // the first word with the first new instruction word.
125 125 // Other processors might briefly see the old first word
126 126 // followed by the new second word. This is OK if the old
127 127 // second word is harmless, and the new second word may be
128 128 // harmlessly executed in the delay slot of the call.
129 129 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
130 130 assert(Patching_lock->is_locked() ||
131 131 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
132 132 assert (instr_addr != NULL, "illegal address for code patching");
133 133 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
134 134 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
135 135 int i0 = ((int*)code_buffer)[0];
136 136 int i1 = ((int*)code_buffer)[1];
137 137 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
138 138 assert(inv_op(*contention_addr) == Assembler::arith_op ||
139 139 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
140 140 "must not interfere with original call");
141 141 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
142 142 n_call->set_long_at(1*BytesPerInstWord, i1);
143 143 n_call->set_long_at(0*BytesPerInstWord, i0);
144 144 // NOTE: It is possible that another thread T will execute
145 145 // only the second patched word.
146 146 // In other words, since the original instruction is this
147 147 // call patching_stub; nop (NativeCall)
148 148 // and the new sequence from the buffer is this:
149 149 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
150 150 // what T will execute is this:
151 151 // call patching_stub; add %r, %lo(K), %r
152 152 // thereby putting garbage into %r before calling the patching stub.
153 153 // This is OK, because the patching stub ignores the value of %r.
154 154
155 155 // Make sure the first-patched instruction, which may co-exist
156 156 // briefly with the call, will do something harmless.
157 157 assert(inv_op(*contention_addr) == Assembler::arith_op ||
158 158 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
159 159 "must not interfere with original call");
160 160 }
161 161
162 162 // Similar to replace_mt_safe, but just changes the destination. The
163 163 // important thing is that free-running threads are able to execute this
164 164 // call instruction at all times. Thus, the displacement field must be
165 165 // instruction-word-aligned. This is always true on SPARC.
166 166 //
167 167 // Used in the runtime linkage of calls; see class CompiledIC.
168 168 void NativeCall::set_destination_mt_safe(address dest) {
169 169 assert(Patching_lock->is_locked() ||
170 170 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
171 171 // set_destination uses set_long_at which does the ICache::invalidate
172 172 set_destination(dest);
173 173 }
174 174
175 175 // Code for unit testing implementation of NativeCall class
176 176 void NativeCall::test() {
177 177 #ifdef ASSERT
178 178 ResourceMark rm;
179 179 CodeBuffer cb("test", 100, 100);
180 180 MacroAssembler* a = new MacroAssembler(&cb);
181 181 NativeCall *nc;
182 182 uint idx;
183 183 int offsets[] = {
184 184 0x0,
185 185 0xfffffff0,
186 186 0x7ffffff0,
187 187 0x80000000,
188 188 0x20,
189 189 0x4000,
190 190 };
191 191
192 192 VM_Version::allow_all();
193 193
194 194 a->call( a->pc(), relocInfo::none );
195 195 a->delayed()->nop();
196 196 nc = nativeCall_at( cb.code_begin() );
197 197 nc->print();
198 198
199 199 nc = nativeCall_overwriting_at( nc->next_instruction_address() );
200 200 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
201 201 nc->set_destination( cb.code_begin() + offsets[idx] );
202 202 assert(nc->destination() == (cb.code_begin() + offsets[idx]), "check unit test");
203 203 nc->print();
204 204 }
205 205
206 206 nc = nativeCall_before( cb.code_begin() + 8 );
207 207 nc->print();
208 208
209 209 VM_Version::revert();
210 210 #endif
211 211 }
212 212 // End code for unit testing implementation of NativeCall class
213 213
214 214 //-------------------------------------------------------------------
215 215
216 216 #ifdef _LP64
217 217
218 218 void NativeFarCall::set_destination(address dest) {
219 219 // Address materialized in the instruction stream, so nothing to do.
220 220 return;
221 221 #if 0 // What we'd do if we really did want to change the destination
222 222 if (destination() == dest) {
223 223 return;
224 224 }
225 225 ResourceMark rm;
226 226 CodeBuffer buf(addr_at(0), instruction_size + 1);
227 227 MacroAssembler* _masm = new MacroAssembler(&buf);
228 228 // Generate the new sequence
229 229 AddressLiteral(dest);
230 230 _masm->jumpl_to(dest, O7, O7);
231 231 ICache::invalidate_range(addr_at(0), instruction_size );
232 232 #endif
233 233 }
234 234
235 235 void NativeFarCall::verify() {
236 236 // make sure code pattern is actually a jumpl_to instruction
237 237 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
238 238 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
239 239 nativeJump_at(addr_at(0))->verify();
240 240 }
241 241
242 242 bool NativeFarCall::is_call_at(address instr) {
243 243 return nativeInstruction_at(instr)->is_sethi();
244 244 }
245 245
246 246 void NativeFarCall::print() {
247 247 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
248 248 }
249 249
250 250 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
251 251 nmethod* callee = CodeCache::find_nmethod(destination());
252 252 if (callee == NULL) {
253 253 return false;
254 254 } else {
255 255 return destination() == callee->verified_entry_point();
256 256 }
257 257 }
258 258
259 259 // MT-safe patching of a far call.
260 260 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
261 261 Unimplemented();
262 262 }
263 263
264 264 // Code for unit testing implementation of NativeFarCall class
265 265 void NativeFarCall::test() {
266 266 Unimplemented();
267 267 }
268 268 // End code for unit testing implementation of NativeFarCall class
269 269
270 270 #endif // _LP64
271 271
272 272 //-------------------------------------------------------------------
273 273
274 274
275 275 void NativeMovConstReg::verify() {
276 276 NativeInstruction::verify();
277 277 // make sure code pattern is actually a "set_oop" synthetic instruction
278 278 // see MacroAssembler::set_oop()
279 279 int i0 = long_at(sethi_offset);
280 280 int i1 = long_at(add_offset);
281 281
282 282 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
283 283 Register rd = inv_rd(i0);
284 284 #ifndef _LP64
285 285 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
286 286 is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
287 287 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
288 288 rd == inv_rs1(i1) && rd == inv_rd(i1))) {
289 289 fatal("not a set_oop");
290 290 }
291 291 #else
292 292 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
293 293 fatal("not a set_oop");
294 294 }
295 295 #endif
296 296 }
297 297
298 298
299 299 void NativeMovConstReg::print() {
300 300 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
301 301 }
302 302
303 303
304 304 #ifdef _LP64
305 305 intptr_t NativeMovConstReg::data() const {
306 306 return data64(addr_at(sethi_offset), long_at(add_offset));
307 307 }
308 308 #else
309 309 intptr_t NativeMovConstReg::data() const {
310 310 return data32(long_at(sethi_offset), long_at(add_offset));
311 311 }
312 312 #endif
313 313
↓ open down ↓ |
301 lines elided |
↑ open up ↑ |
314 314
315 315 void NativeMovConstReg::set_data(intptr_t x) {
316 316 #ifdef _LP64
317 317 set_data64_sethi(addr_at(sethi_offset), x);
318 318 #else
319 319 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
320 320 #endif
321 321 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
322 322
323 323 // also store the value into an oop_Relocation cell, if any
324 - CodeBlob* nm = CodeCache::find_blob(instruction_address());
325 - if (nm != NULL) {
324 + CodeBlob* cb = CodeCache::find_blob(instruction_address());
325 + if (cb != NULL) {
326 + nmethod* nm = cb->as_nmethod_or_null();
327 + assert(nm, "must be");
326 328 RelocIterator iter(nm, instruction_address(), next_instruction_address());
327 329 oop* oop_addr = NULL;
328 330 while (iter.next()) {
329 331 if (iter.type() == relocInfo::oop_type) {
330 332 oop_Relocation *r = iter.oop_reloc();
331 333 if (oop_addr == NULL) {
332 334 oop_addr = r->oop_addr();
333 335 *oop_addr = (oop)x;
334 336 } else {
335 337 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
336 338 }
337 339 }
338 340 }
339 341 }
340 342 }
341 343
342 344
343 345 // Code for unit testing implementation of NativeMovConstReg class
344 346 void NativeMovConstReg::test() {
345 347 #ifdef ASSERT
346 348 ResourceMark rm;
347 349 CodeBuffer cb("test", 100, 100);
348 350 MacroAssembler* a = new MacroAssembler(&cb);
349 351 NativeMovConstReg* nm;
350 352 uint idx;
351 353 int offsets[] = {
352 354 0x0,
353 355 0x7fffffff,
354 356 0x80000000,
355 357 0xffffffff,
356 358 0x20,
357 359 4096,
358 360 4097,
359 361 };
360 362
361 363 VM_Version::allow_all();
362 364
363 365 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
364 366 a->sethi(al1, I3);
365 367 a->add(I3, al1.low10(), I3);
366 368 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
367 369 a->sethi(al2, O2);
368 370 a->add(O2, al2.low10(), O2);
369 371
370 372 nm = nativeMovConstReg_at( cb.code_begin() );
371 373 nm->print();
372 374
373 375 nm = nativeMovConstReg_at( nm->next_instruction_address() );
374 376 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
375 377 nm->set_data( offsets[idx] );
376 378 assert(nm->data() == offsets[idx], "check unit test");
377 379 }
378 380 nm->print();
379 381
380 382 VM_Version::revert();
381 383 #endif
382 384 }
383 385 // End code for unit testing implementation of NativeMovConstReg class
384 386
385 387 //-------------------------------------------------------------------
386 388
387 389 void NativeMovConstRegPatching::verify() {
388 390 NativeInstruction::verify();
389 391 // Make sure code pattern is sethi/nop/add.
390 392 int i0 = long_at(sethi_offset);
391 393 int i1 = long_at(nop_offset);
392 394 int i2 = long_at(add_offset);
393 395 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
394 396
395 397 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
396 398 // The casual reader should note that on Sparc a nop is a special case if sethi
397 399 // in which the destination register is %g0.
398 400 Register rd0 = inv_rd(i0);
399 401 Register rd1 = inv_rd(i1);
400 402 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
401 403 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi
402 404 is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
403 405 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
404 406 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
405 407 fatal("not a set_oop");
406 408 }
407 409 }
408 410
409 411
410 412 void NativeMovConstRegPatching::print() {
411 413 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
412 414 }
413 415
414 416
415 417 int NativeMovConstRegPatching::data() const {
416 418 #ifdef _LP64
417 419 return data64(addr_at(sethi_offset), long_at(add_offset));
418 420 #else
419 421 return data32(long_at(sethi_offset), long_at(add_offset));
420 422 #endif
421 423 }
422 424
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
423 425
424 426 void NativeMovConstRegPatching::set_data(int x) {
425 427 #ifdef _LP64
426 428 set_data64_sethi(addr_at(sethi_offset), x);
427 429 #else
428 430 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
429 431 #endif
430 432 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
431 433
432 434 // also store the value into an oop_Relocation cell, if any
433 - CodeBlob* nm = CodeCache::find_blob(instruction_address());
434 - if (nm != NULL) {
435 + CodeBlob* cb = CodeCache::find_blob(instruction_address());
436 + if (cb != NULL) {
437 + nmethod* nm = cb->as_nmethod_or_null();
438 + assert(nm, "must be");
435 439 RelocIterator iter(nm, instruction_address(), next_instruction_address());
436 440 oop* oop_addr = NULL;
437 441 while (iter.next()) {
438 442 if (iter.type() == relocInfo::oop_type) {
439 443 oop_Relocation *r = iter.oop_reloc();
440 444 if (oop_addr == NULL) {
441 445 oop_addr = r->oop_addr();
442 446 *oop_addr = (oop)x;
443 447 } else {
444 448 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
445 449 }
446 450 }
447 451 }
448 452 }
449 453 }
450 454
451 455
452 456 // Code for unit testing implementation of NativeMovConstRegPatching class
453 457 void NativeMovConstRegPatching::test() {
454 458 #ifdef ASSERT
455 459 ResourceMark rm;
456 460 CodeBuffer cb("test", 100, 100);
457 461 MacroAssembler* a = new MacroAssembler(&cb);
458 462 NativeMovConstRegPatching* nm;
459 463 uint idx;
460 464 int offsets[] = {
461 465 0x0,
462 466 0x7fffffff,
463 467 0x80000000,
464 468 0xffffffff,
465 469 0x20,
466 470 4096,
467 471 4097,
468 472 };
469 473
470 474 VM_Version::allow_all();
471 475
472 476 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
473 477 a->sethi(al1, I3);
474 478 a->nop();
475 479 a->add(I3, al1.low10(), I3);
476 480 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
477 481 a->sethi(al2, O2);
478 482 a->nop();
479 483 a->add(O2, al2.low10(), O2);
480 484
481 485 nm = nativeMovConstRegPatching_at( cb.code_begin() );
482 486 nm->print();
483 487
484 488 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
485 489 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
486 490 nm->set_data( offsets[idx] );
487 491 assert(nm->data() == offsets[idx], "check unit test");
488 492 }
489 493 nm->print();
490 494
491 495 VM_Version::revert();
492 496 #endif // ASSERT
493 497 }
494 498 // End code for unit testing implementation of NativeMovConstRegPatching class
495 499
496 500
497 501 //-------------------------------------------------------------------
498 502
499 503
500 504 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
501 505 Untested("copy_instruction_to");
502 506 int instruction_size = next_instruction_address() - instruction_address();
503 507 for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
504 508 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
505 509 }
506 510 }
507 511
508 512
509 513 void NativeMovRegMem::verify() {
510 514 NativeInstruction::verify();
511 515 // make sure code pattern is actually a "ld" or "st" of some sort.
512 516 int i0 = long_at(0);
513 517 int op3 = inv_op3(i0);
514 518
515 519 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
516 520
517 521 if (!(is_op(i0, Assembler::ldst_op) &&
518 522 inv_immed(i0) &&
519 523 0 != (op3 < op3_ldst_int_limit
520 524 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
521 525 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
522 526 {
523 527 int i1 = long_at(ldst_offset);
524 528 Register rd = inv_rd(i0);
525 529
526 530 op3 = inv_op3(i1);
527 531 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
528 532 0 != (op3 < op3_ldst_int_limit
529 533 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
530 534 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
531 535 fatal("not a ld* or st* op");
532 536 }
533 537 }
534 538 }
535 539
536 540
537 541 void NativeMovRegMem::print() {
538 542 if (is_immediate()) {
539 543 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
540 544 } else {
541 545 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
542 546 }
543 547 }
544 548
545 549
546 550 // Code for unit testing implementation of NativeMovRegMem class
547 551 void NativeMovRegMem::test() {
548 552 #ifdef ASSERT
549 553 ResourceMark rm;
550 554 CodeBuffer cb("test", 1000, 1000);
551 555 MacroAssembler* a = new MacroAssembler(&cb);
552 556 NativeMovRegMem* nm;
553 557 uint idx = 0;
554 558 uint idx1;
555 559 int offsets[] = {
556 560 0x0,
557 561 0xffffffff,
558 562 0x7fffffff,
559 563 0x80000000,
560 564 4096,
561 565 4097,
562 566 0x20,
563 567 0x4000,
564 568 };
565 569
566 570 VM_Version::allow_all();
567 571
568 572 AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
569 573 AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
570 574 a->ldsw( G5, al1.low10(), G4 ); idx++;
571 575 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
572 576 a->ldsw( G5, I3, G4 ); idx++;
573 577 a->ldsb( G5, al1.low10(), G4 ); idx++;
574 578 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
575 579 a->ldsb( G5, I3, G4 ); idx++;
576 580 a->ldsh( G5, al1.low10(), G4 ); idx++;
577 581 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
578 582 a->ldsh( G5, I3, G4 ); idx++;
579 583 a->lduw( G5, al1.low10(), G4 ); idx++;
580 584 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
581 585 a->lduw( G5, I3, G4 ); idx++;
582 586 a->ldub( G5, al1.low10(), G4 ); idx++;
583 587 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
584 588 a->ldub( G5, I3, G4 ); idx++;
585 589 a->lduh( G5, al1.low10(), G4 ); idx++;
586 590 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
587 591 a->lduh( G5, I3, G4 ); idx++;
588 592 a->ldx( G5, al1.low10(), G4 ); idx++;
589 593 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
590 594 a->ldx( G5, I3, G4 ); idx++;
591 595 a->ldd( G5, al1.low10(), G4 ); idx++;
592 596 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
593 597 a->ldd( G5, I3, G4 ); idx++;
594 598 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
595 599 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
596 600 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
597 601
598 602 a->stw( G5, G4, al1.low10() ); idx++;
599 603 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
600 604 a->stw( G5, G4, I3 ); idx++;
601 605 a->stb( G5, G4, al1.low10() ); idx++;
602 606 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
603 607 a->stb( G5, G4, I3 ); idx++;
604 608 a->sth( G5, G4, al1.low10() ); idx++;
605 609 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
606 610 a->sth( G5, G4, I3 ); idx++;
607 611 a->stx( G5, G4, al1.low10() ); idx++;
608 612 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
609 613 a->stx( G5, G4, I3 ); idx++;
610 614 a->std( G5, G4, al1.low10() ); idx++;
611 615 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
612 616 a->std( G5, G4, I3 ); idx++;
613 617 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
614 618 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
615 619 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
616 620
617 621 nm = nativeMovRegMem_at( cb.code_begin() );
618 622 nm->print();
619 623 nm->set_offset( low10(0) );
620 624 nm->print();
621 625 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
622 626 nm->print();
623 627
624 628 while (--idx) {
625 629 nm = nativeMovRegMem_at( nm->next_instruction_address() );
626 630 nm->print();
627 631 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
628 632 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
629 633 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
630 634 "check unit test");
631 635 nm->print();
632 636 }
633 637 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
634 638 nm->print();
635 639 }
636 640
637 641 VM_Version::revert();
638 642 #endif // ASSERT
639 643 }
640 644
641 645 // End code for unit testing implementation of NativeMovRegMem class
642 646
643 647 //--------------------------------------------------------------------------------
644 648
645 649
646 650 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
647 651 Untested("copy_instruction_to");
648 652 int instruction_size = next_instruction_address() - instruction_address();
649 653 for (int i = 0; i < instruction_size; i += wordSize) {
650 654 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
651 655 }
652 656 }
653 657
654 658
655 659 void NativeMovRegMemPatching::verify() {
656 660 NativeInstruction::verify();
657 661 // make sure code pattern is actually a "ld" or "st" of some sort.
658 662 int i0 = long_at(0);
659 663 int op3 = inv_op3(i0);
660 664
661 665 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
662 666
663 667 if (!(is_op(i0, Assembler::ldst_op) &&
664 668 inv_immed(i0) &&
665 669 0 != (op3 < op3_ldst_int_limit
666 670 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
667 671 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
668 672 int i1 = long_at(ldst_offset);
669 673 Register rd = inv_rd(i0);
670 674
671 675 op3 = inv_op3(i1);
672 676 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
673 677 0 != (op3 < op3_ldst_int_limit
674 678 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
675 679 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
676 680 fatal("not a ld* or st* op");
677 681 }
678 682 }
679 683 }
680 684
681 685
682 686 void NativeMovRegMemPatching::print() {
683 687 if (is_immediate()) {
684 688 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
685 689 } else {
686 690 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
687 691 }
688 692 }
689 693
690 694
691 695 // Code for unit testing implementation of NativeMovRegMemPatching class
692 696 void NativeMovRegMemPatching::test() {
693 697 #ifdef ASSERT
694 698 ResourceMark rm;
695 699 CodeBuffer cb("test", 1000, 1000);
696 700 MacroAssembler* a = new MacroAssembler(&cb);
697 701 NativeMovRegMemPatching* nm;
698 702 uint idx = 0;
699 703 uint idx1;
700 704 int offsets[] = {
701 705 0x0,
702 706 0xffffffff,
703 707 0x7fffffff,
704 708 0x80000000,
705 709 4096,
706 710 4097,
707 711 0x20,
708 712 0x4000,
709 713 };
710 714
711 715 VM_Version::allow_all();
712 716
713 717 AddressLiteral al(0xffffffff, relocInfo::external_word_type);
714 718 a->ldsw( G5, al.low10(), G4); idx++;
715 719 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
716 720 a->ldsw( G5, I3, G4 ); idx++;
717 721 a->ldsb( G5, al.low10(), G4); idx++;
718 722 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
719 723 a->ldsb( G5, I3, G4 ); idx++;
720 724 a->ldsh( G5, al.low10(), G4); idx++;
721 725 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
722 726 a->ldsh( G5, I3, G4 ); idx++;
723 727 a->lduw( G5, al.low10(), G4); idx++;
724 728 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
725 729 a->lduw( G5, I3, G4 ); idx++;
726 730 a->ldub( G5, al.low10(), G4); idx++;
727 731 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
728 732 a->ldub( G5, I3, G4 ); idx++;
729 733 a->lduh( G5, al.low10(), G4); idx++;
730 734 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
731 735 a->lduh( G5, I3, G4 ); idx++;
732 736 a->ldx( G5, al.low10(), G4); idx++;
733 737 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
734 738 a->ldx( G5, I3, G4 ); idx++;
735 739 a->ldd( G5, al.low10(), G4); idx++;
736 740 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
737 741 a->ldd( G5, I3, G4 ); idx++;
738 742 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
739 743 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
740 744 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
741 745
742 746 a->stw( G5, G4, al.low10()); idx++;
743 747 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
744 748 a->stw( G5, G4, I3 ); idx++;
745 749 a->stb( G5, G4, al.low10()); idx++;
746 750 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
747 751 a->stb( G5, G4, I3 ); idx++;
748 752 a->sth( G5, G4, al.low10()); idx++;
749 753 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
750 754 a->sth( G5, G4, I3 ); idx++;
751 755 a->stx( G5, G4, al.low10()); idx++;
752 756 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
753 757 a->stx( G5, G4, I3 ); idx++;
754 758 a->std( G5, G4, al.low10()); idx++;
755 759 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
756 760 a->std( G5, G4, I3 ); idx++;
757 761 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
758 762 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
759 763 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
760 764
761 765 nm = nativeMovRegMemPatching_at( cb.code_begin() );
762 766 nm->print();
763 767 nm->set_offset( low10(0) );
764 768 nm->print();
765 769 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
766 770 nm->print();
767 771
768 772 while (--idx) {
769 773 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
770 774 nm->print();
771 775 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
772 776 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
773 777 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
774 778 "check unit test");
775 779 nm->print();
776 780 }
777 781 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
778 782 nm->print();
779 783 }
780 784
781 785 VM_Version::revert();
782 786 #endif // ASSERT
783 787 }
784 788 // End code for unit testing implementation of NativeMovRegMemPatching class
785 789
786 790
787 791 //--------------------------------------------------------------------------------
788 792
789 793
790 794 void NativeJump::verify() {
791 795 NativeInstruction::verify();
792 796 int i0 = long_at(sethi_offset);
793 797 int i1 = long_at(jmpl_offset);
794 798 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
795 799 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
796 800 Register rd = inv_rd(i0);
797 801 #ifndef _LP64
798 802 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
799 803 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
800 804 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
801 805 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
802 806 rd == inv_rs1(i1))) {
803 807 fatal("not a jump_to instruction");
804 808 }
805 809 #else
806 810 // In LP64, the jump instruction location varies for non relocatable
807 811 // jumps, for example is could be sethi, xor, jmp instead of the
808 812 // 7 instructions for sethi. So let's check sethi only.
809 813 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
810 814 fatal("not a jump_to instruction");
811 815 }
812 816 #endif
813 817 }
814 818
815 819
816 820 void NativeJump::print() {
817 821 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
818 822 }
819 823
820 824
821 825 // Code for unit testing implementation of NativeJump class
822 826 void NativeJump::test() {
823 827 #ifdef ASSERT
824 828 ResourceMark rm;
825 829 CodeBuffer cb("test", 100, 100);
826 830 MacroAssembler* a = new MacroAssembler(&cb);
827 831 NativeJump* nj;
828 832 uint idx;
829 833 int offsets[] = {
830 834 0x0,
831 835 0xffffffff,
832 836 0x7fffffff,
833 837 0x80000000,
834 838 4096,
835 839 4097,
836 840 0x20,
837 841 0x4000,
838 842 };
839 843
840 844 VM_Version::allow_all();
841 845
842 846 AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
843 847 a->sethi(al, I3);
844 848 a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
845 849 a->delayed()->nop();
846 850 a->sethi(al, I3);
847 851 a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
848 852 a->delayed()->nop();
849 853
850 854 nj = nativeJump_at( cb.code_begin() );
851 855 nj->print();
852 856
853 857 nj = nativeJump_at( nj->next_instruction_address() );
854 858 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
855 859 nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
856 860 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
857 861 nj->print();
858 862 }
859 863
860 864 VM_Version::revert();
861 865 #endif // ASSERT
862 866 }
863 867 // End code for unit testing implementation of NativeJump class
864 868
865 869
866 870 void NativeJump::insert(address code_pos, address entry) {
867 871 Unimplemented();
868 872 }
869 873
870 874 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
871 875 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
872 876 // Atomic write can be only with 1 word.
873 877 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
874 878 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere
875 879 // in the header of the nmethod, within a short branch's span of the patch point.
876 880 // Set up the jump sequence using NativeJump::insert, and then use an annulled
877 881 // unconditional branch at the target site (an atomic 1-word update).
878 882 // Limitations: You can only patch nmethods, with any given nmethod patched at
879 883 // most once, and the patch must be in the nmethod's header.
880 884 // It's messy, but you can ask the CodeCache for the nmethod containing the
881 885 // target address.
882 886
883 887 // %%%%% For now, do something MT-stupid:
884 888 ResourceMark rm;
885 889 int code_size = 1 * BytesPerInstWord;
886 890 CodeBuffer cb(verified_entry, code_size + 1);
887 891 MacroAssembler* a = new MacroAssembler(&cb);
888 892 if (VM_Version::v9_instructions_work()) {
889 893 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
890 894 } else {
891 895 a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
892 896 }
893 897 ICache::invalidate_range(verified_entry, code_size);
894 898 }
895 899
896 900
897 901 void NativeIllegalInstruction::insert(address code_pos) {
898 902 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
899 903 nii->set_long_at(0, illegal_instruction());
900 904 }
901 905
902 906 static int illegal_instruction_bits = 0;
903 907
904 908 int NativeInstruction::illegal_instruction() {
905 909 if (illegal_instruction_bits == 0) {
906 910 ResourceMark rm;
907 911 char buf[40];
908 912 CodeBuffer cbuf((address)&buf[0], 20);
909 913 MacroAssembler* a = new MacroAssembler(&cbuf);
910 914 address ia = a->pc();
911 915 a->trap(ST_RESERVED_FOR_USER_0 + 1);
912 916 int bits = *(int*)ia;
913 917 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
914 918 illegal_instruction_bits = bits;
915 919 assert(illegal_instruction_bits != 0, "oops");
916 920 }
917 921 return illegal_instruction_bits;
918 922 }
919 923
920 924 static int ic_miss_trap_bits = 0;
921 925
922 926 bool NativeInstruction::is_ic_miss_trap() {
923 927 if (ic_miss_trap_bits == 0) {
924 928 ResourceMark rm;
925 929 char buf[40];
926 930 CodeBuffer cbuf((address)&buf[0], 20);
927 931 MacroAssembler* a = new MacroAssembler(&cbuf);
928 932 address ia = a->pc();
929 933 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
930 934 int bits = *(int*)ia;
931 935 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
932 936 ic_miss_trap_bits = bits;
933 937 assert(ic_miss_trap_bits != 0, "oops");
934 938 }
935 939 return long_at(0) == ic_miss_trap_bits;
936 940 }
937 941
938 942
939 943 bool NativeInstruction::is_illegal() {
940 944 if (illegal_instruction_bits == 0) {
941 945 return false;
942 946 }
943 947 return long_at(0) == illegal_instruction_bits;
944 948 }
945 949
946 950
947 951 void NativeGeneralJump::verify() {
948 952 assert(((NativeInstruction *)this)->is_jump() ||
949 953 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
950 954 }
951 955
952 956
953 957 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
954 958 Assembler::Condition condition = Assembler::always;
955 959 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
956 960 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
957 961 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
958 962 ni->set_long_at(0, x);
959 963 }
960 964
961 965
962 966 // MT-safe patching of a jmp instruction (and following word).
963 967 // First patches the second word, and then atomicly replaces
964 968 // the first word with the first new instruction word.
965 969 // Other processors might briefly see the old first word
966 970 // followed by the new second word. This is OK if the old
967 971 // second word is harmless, and the new second word may be
968 972 // harmlessly executed in the delay slot of the call.
969 973 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
970 974 assert(Patching_lock->is_locked() ||
971 975 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
972 976 assert (instr_addr != NULL, "illegal address for code patching");
973 977 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call
974 978 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
975 979 int i0 = ((int*)code_buffer)[0];
976 980 int i1 = ((int*)code_buffer)[1];
977 981 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
978 982 assert(inv_op(*contention_addr) == Assembler::arith_op ||
979 983 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
980 984 "must not interfere with original call");
981 985 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
982 986 h_jump->set_long_at(1*BytesPerInstWord, i1);
983 987 h_jump->set_long_at(0*BytesPerInstWord, i0);
984 988 // NOTE: It is possible that another thread T will execute
985 989 // only the second patched word.
986 990 // In other words, since the original instruction is this
987 991 // jmp patching_stub; nop (NativeGeneralJump)
988 992 // and the new sequence from the buffer is this:
989 993 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
990 994 // what T will execute is this:
991 995 // jmp patching_stub; add %r, %lo(K), %r
992 996 // thereby putting garbage into %r before calling the patching stub.
993 997 // This is OK, because the patching stub ignores the value of %r.
994 998
995 999 // Make sure the first-patched instruction, which may co-exist
996 1000 // briefly with the call, will do something harmless.
997 1001 assert(inv_op(*contention_addr) == Assembler::arith_op ||
998 1002 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
999 1003 "must not interfere with original call");
1000 1004 }
↓ open down ↓ |
556 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX