Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
+++ new/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
1 1 /*
2 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "c1/c1_CodeStubs.hpp"
27 27 #include "c1/c1_FrameMap.hpp"
28 28 #include "c1/c1_LIRAssembler.hpp"
29 29 #include "c1/c1_MacroAssembler.hpp"
30 30 #include "c1/c1_Runtime1.hpp"
31 31 #include "nativeInst_sparc.hpp"
32 32 #include "runtime/sharedRuntime.hpp"
33 33 #include "vmreg_sparc.inline.hpp"
34 34 #ifndef SERIALGC
35 35 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
36 36 #endif
37 37
38 38 #define __ ce->masm()->
39 39
40 40 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
41 41 bool throw_index_out_of_bounds_exception)
42 42 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
43 43 , _index(index)
44 44 {
45 45 assert(info != NULL, "must have info");
46 46 _info = new CodeEmitInfo(info);
47 47 }
48 48
49 49
50 50 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
51 51 __ bind(_entry);
52 52
53 53 if (_index->is_register()) {
54 54 __ mov(_index->as_register(), G4);
55 55 } else {
56 56 __ set(_index->as_jint(), G4);
57 57 }
58 58 if (_throw_index_out_of_bounds_exception) {
59 59 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
60 60 } else {
61 61 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
62 62 }
63 63 __ delayed()->nop();
64 64 ce->add_call_info_here(_info);
65 65 ce->verify_oop_map(_info);
66 66 #ifdef ASSERT
67 67 __ should_not_reach_here();
68 68 #endif
69 69 }
70 70
71 71
72 72 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
73 73 __ bind(_entry);
74 74 __ set(_bci, G4);
75 75 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
76 76 __ delayed()->mov_or_nop(_method->as_register(), G5);
77 77 ce->add_call_info_here(_info);
78 78 ce->verify_oop_map(_info);
79 79
80 80 __ br(Assembler::always, true, Assembler::pt, _continuation);
81 81 __ delayed()->nop();
82 82 }
83 83
84 84
85 85 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
86 86 if (_offset != -1) {
87 87 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
88 88 }
89 89 __ bind(_entry);
90 90 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
91 91 __ delayed()->nop();
92 92 ce->add_call_info_here(_info);
93 93 ce->verify_oop_map(_info);
94 94 #ifdef ASSERT
95 95 __ should_not_reach_here();
96 96 #endif
97 97 }
98 98
99 99
100 100 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
101 101 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
102 102 __ bind(_entry);
103 103 __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id),
104 104 relocInfo::runtime_call_type);
105 105 __ delayed()->nop();
106 106 ce->add_call_info_here(_info);
107 107 ce->verify_oop_map(_info);
108 108 #ifdef ASSERT
109 109 __ should_not_reach_here();
110 110 #endif
111 111 }
112 112
113 113
114 114 // Implementation of SimpleExceptionStub
115 115 // Note: %g1 and %g3 are already in use
116 116 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
117 117 __ bind(_entry);
118 118 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
119 119
120 120 if (_obj->is_valid()) {
121 121 __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub
122 122 } else {
123 123 __ delayed()->mov(G0, G4);
124 124 }
125 125 ce->add_call_info_here(_info);
126 126 #ifdef ASSERT
127 127 __ should_not_reach_here();
128 128 #endif
129 129 }
130 130
131 131
132 132 // Implementation of NewInstanceStub
133 133
134 134 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
135 135 _result = result;
136 136 _klass = klass;
137 137 _klass_reg = klass_reg;
138 138 _info = new CodeEmitInfo(info);
139 139 assert(stub_id == Runtime1::new_instance_id ||
140 140 stub_id == Runtime1::fast_new_instance_id ||
141 141 stub_id == Runtime1::fast_new_instance_init_check_id,
142 142 "need new_instance id");
143 143 _stub_id = stub_id;
144 144 }
145 145
146 146
147 147 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
148 148 __ bind(_entry);
149 149 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
150 150 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
151 151 ce->add_call_info_here(_info);
152 152 ce->verify_oop_map(_info);
153 153 __ br(Assembler::always, false, Assembler::pt, _continuation);
154 154 __ delayed()->mov_or_nop(O0, _result->as_register());
155 155 }
156 156
157 157
158 158 // Implementation of NewTypeArrayStub
159 159 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
160 160 _klass_reg = klass_reg;
161 161 _length = length;
162 162 _result = result;
163 163 _info = new CodeEmitInfo(info);
164 164 }
165 165
166 166
167 167 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
168 168 __ bind(_entry);
169 169
170 170 __ mov(_length->as_register(), G4);
171 171 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
172 172 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
173 173 ce->add_call_info_here(_info);
174 174 ce->verify_oop_map(_info);
175 175 __ br(Assembler::always, false, Assembler::pt, _continuation);
176 176 __ delayed()->mov_or_nop(O0, _result->as_register());
177 177 }
178 178
179 179
180 180 // Implementation of NewObjectArrayStub
181 181
182 182 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
183 183 _klass_reg = klass_reg;
184 184 _length = length;
185 185 _result = result;
186 186 _info = new CodeEmitInfo(info);
187 187 }
188 188
189 189
190 190 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
191 191 __ bind(_entry);
192 192
193 193 __ mov(_length->as_register(), G4);
194 194 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
195 195 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
196 196 ce->add_call_info_here(_info);
197 197 ce->verify_oop_map(_info);
198 198 __ br(Assembler::always, false, Assembler::pt, _continuation);
199 199 __ delayed()->mov_or_nop(O0, _result->as_register());
200 200 }
201 201
202 202
203 203 // Implementation of MonitorAccessStubs
204 204 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
205 205 : MonitorAccessStub(obj_reg, lock_reg) {
206 206 _info = new CodeEmitInfo(info);
207 207 }
208 208
209 209
210 210 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
211 211 __ bind(_entry);
212 212 __ mov(_obj_reg->as_register(), G4);
213 213 if (ce->compilation()->has_fpu_code()) {
214 214 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
215 215 } else {
216 216 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
217 217 }
218 218 __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
219 219 ce->add_call_info_here(_info);
220 220 ce->verify_oop_map(_info);
221 221 __ br(Assembler::always, true, Assembler::pt, _continuation);
222 222 __ delayed()->nop();
223 223 }
224 224
225 225
226 226 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
227 227 __ bind(_entry);
228 228 if (_compute_lock) {
229 229 ce->monitor_address(_monitor_ix, _lock_reg);
230 230 }
231 231 if (ce->compilation()->has_fpu_code()) {
232 232 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
233 233 } else {
234 234 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
235 235 }
236 236
237 237 __ delayed()->mov_or_nop(_lock_reg->as_register(), G4);
238 238 __ br(Assembler::always, true, Assembler::pt, _continuation);
239 239 __ delayed()->nop();
240 240 }
241 241
242 242 // Implementation of patching:
243 243 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
244 244 // - Replace original code with a call to the stub
245 245 // At Runtime:
246 246 // - call to stub, jump to runtime
247 247 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
248 248 // - in runtime: after initializing class, restore original code, reexecute instruction
249 249
250 250 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
251 251
252 252 void PatchingStub::align_patch_site(MacroAssembler* ) {
253 253 // patch sites on sparc are always properly aligned.
254 254 }
255 255
256 256 void PatchingStub::emit_code(LIR_Assembler* ce) {
257 257 // copy original code here
258 258 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
259 259 "not enough room for call");
260 260 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
261 261
262 262 Label call_patch;
263 263
264 264 int being_initialized_entry = __ offset();
265 265
266 266 if (_id == load_klass_id) {
267 267 // produce a copy of the load klass instruction for use by the being initialized case
268 268 #ifdef ASSERT
269 269 address start = __ pc();
270 270 #endif
271 271 AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index));
272 272 __ patchable_set(addrlit, _obj);
273 273
274 274 #ifdef ASSERT
275 275 for (int i = 0; i < _bytes_to_copy; i++) {
276 276 address ptr = (address)(_pc_start + i);
277 277 int a_byte = (*ptr) & 0xFF;
278 278 assert(a_byte == *start++, "should be the same code");
279 279 }
280 280 #endif
281 281 } else {
282 282 // make a copy the code which is going to be patched.
283 283 for (int i = 0; i < _bytes_to_copy; i++) {
284 284 address ptr = (address)(_pc_start + i);
285 285 int a_byte = (*ptr) & 0xFF;
286 286 __ a_byte (a_byte);
287 287 }
288 288 }
289 289
290 290 address end_of_patch = __ pc();
291 291 int bytes_to_skip = 0;
292 292 if (_id == load_klass_id) {
293 293 int offset = __ offset();
294 294 if (CommentedAssembly) {
295 295 __ block_comment(" being_initialized check");
296 296 }
297 297
298 298 // static field accesses have special semantics while the class
299 299 // initializer is being run so we emit a test which can be used to
300 300 // check that this code is being executed by the initializing
301 301 // thread.
302 302 assert(_obj != noreg, "must be a valid register");
303 303 assert(_oop_index >= 0, "must have oop index");
304 304 __ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
305 305 __ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
306 306 __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
307 307
308 308 // load_klass patches may execute the patched code before it's
309 309 // copied back into place so we need to jump back into the main
310 310 // code of the nmethod to continue execution.
311 311 __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
312 312 __ delayed()->nop();
313 313
314 314 // make sure this extra code gets skipped
315 315 bytes_to_skip += __ offset() - offset;
316 316 }
317 317
318 318 // Now emit the patch record telling the runtime how to find the
319 319 // pieces of the patch. We only need 3 bytes but it has to be
320 320 // aligned as an instruction so emit 4 bytes.
321 321 int sizeof_patch_record = 4;
322 322 bytes_to_skip += sizeof_patch_record;
323 323
324 324 // emit the offsets needed to find the code to patch
325 325 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
326 326
327 327 // Emit the patch record. We need to emit a full word, so emit an extra empty byte
328 328 __ a_byte(0);
329 329 __ a_byte(being_initialized_entry_offset);
330 330 __ a_byte(bytes_to_skip);
331 331 __ a_byte(_bytes_to_copy);
332 332 address patch_info_pc = __ pc();
333 333 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
334 334
335 335 address entry = __ pc();
336 336 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
337 337 address target = NULL;
338 338 switch (_id) {
339 339 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
340 340 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
341 341 default: ShouldNotReachHere();
342 342 }
343 343 __ bind(call_patch);
344 344
345 345 if (CommentedAssembly) {
346 346 __ block_comment("patch entry point");
347 347 }
348 348 __ call(target, relocInfo::runtime_call_type);
349 349 __ delayed()->nop();
350 350 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
351 351 ce->add_call_info_here(_info);
352 352 __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
353 353 __ delayed()->nop();
354 354 if (_id == load_klass_id) {
355 355 CodeSection* cs = __ code_section();
356 356 address pc = (address)_pc_start;
357 357 RelocIterator iter(cs, pc, pc + 1);
358 358 relocInfo::change_reloc_info_for_address(&iter, (address) pc, relocInfo::oop_type, relocInfo::none);
359 359
↓ open down ↓ |
359 lines elided |
↑ open up ↑ |
360 360 pc = (address)(_pc_start + NativeMovConstReg::add_offset);
361 361 RelocIterator iter2(cs, pc, pc+1);
362 362 relocInfo::change_reloc_info_for_address(&iter2, (address) pc, relocInfo::oop_type, relocInfo::none);
363 363 }
364 364
365 365 }
366 366
367 367
368 368 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
369 369 __ bind(_entry);
370 - __ call(SharedRuntime::deopt_blob()->unpack_with_reexecution());
370 + __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
371 371 __ delayed()->nop();
372 372 ce->add_call_info_here(_info);
373 - debug_only(__ should_not_reach_here());
373 + DEBUG_ONLY(__ should_not_reach_here());
374 374 }
375 375
376 376
377 377 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
378 378 //---------------slow case: call to native-----------------
379 379 __ bind(_entry);
380 380 __ mov(src()->as_register(), O0);
381 381 __ mov(src_pos()->as_register(), O1);
382 382 __ mov(dst()->as_register(), O2);
383 383 __ mov(dst_pos()->as_register(), O3);
384 384 __ mov(length()->as_register(), O4);
385 385
386 386 ce->emit_static_call_stub();
387 387
388 388 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
389 389 __ delayed()->nop();
390 390 ce->add_call_info_here(info());
391 391 ce->verify_oop_map(info());
392 392
393 393 #ifndef PRODUCT
394 394 __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0);
395 395 __ ld(O0, 0, O1);
396 396 __ inc(O1);
397 397 __ st(O1, 0, O0);
398 398 #endif
399 399
400 400 __ br(Assembler::always, false, Assembler::pt, _continuation);
401 401 __ delayed()->nop();
402 402 }
403 403
404 404
405 405 ///////////////////////////////////////////////////////////////////////////////////
406 406 #ifndef SERIALGC
407 407
408 408 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
409 409 // At this point we know that marking is in progress.
410 410 // If do_load() is true then we have to emit the
411 411 // load of the previous value; otherwise it has already
412 412 // been loaded into _pre_val.
413 413
414 414 __ bind(_entry);
415 415
416 416 assert(pre_val()->is_register(), "Precondition.");
417 417 Register pre_val_reg = pre_val()->as_register();
418 418
419 419 if (do_load()) {
420 420 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
421 421 }
422 422
423 423 if (__ is_in_wdisp16_range(_continuation)) {
424 424 __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation);
425 425 } else {
426 426 __ cmp(pre_val_reg, G0);
427 427 __ brx(Assembler::equal, false, Assembler::pn, _continuation);
428 428 }
429 429 __ delayed()->nop();
430 430
431 431 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
432 432 __ delayed()->mov(pre_val_reg, G4);
433 433 __ br(Assembler::always, false, Assembler::pt, _continuation);
434 434 __ delayed()->nop();
435 435
436 436 }
437 437
438 438 void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
439 439 // At this point we know that offset == referent_offset.
440 440 //
441 441 // So we might have to emit:
442 442 // if (src == null) goto continuation.
443 443 //
444 444 // and we definitely have to emit:
445 445 // if (klass(src).reference_type == REF_NONE) goto continuation
446 446 // if (!marking_active) goto continuation
447 447 // if (pre_val == null) goto continuation
448 448 // call pre_barrier(pre_val)
449 449 // goto continuation
450 450 //
451 451 __ bind(_entry);
452 452
453 453 assert(src()->is_register(), "sanity");
454 454 Register src_reg = src()->as_register();
455 455
456 456 if (gen_src_check()) {
457 457 // The original src operand was not a constant.
458 458 // Generate src == null?
459 459 if (__ is_in_wdisp16_range(_continuation)) {
460 460 __ br_null(src_reg, /*annul*/false, Assembler::pt, _continuation);
461 461 } else {
462 462 __ cmp(src_reg, G0);
463 463 __ brx(Assembler::equal, false, Assembler::pt, _continuation);
464 464 }
465 465 __ delayed()->nop();
466 466 }
467 467
468 468 // Generate src->_klass->_reference_type() == REF_NONE)?
469 469 assert(tmp()->is_register(), "sanity");
470 470 Register tmp_reg = tmp()->as_register();
471 471
472 472 __ load_klass(src_reg, tmp_reg);
473 473
474 474 Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
475 475 __ ld(ref_type_adr, tmp_reg);
476 476
477 477 // _reference_type field is of type ReferenceType (enum)
478 478 assert(REF_NONE == 0, "check this code");
479 479 __ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
480 480 __ delayed()->nop();
481 481
482 482 // Is marking active?
483 483 assert(thread()->is_register(), "precondition");
484 484 Register thread_reg = thread()->as_pointer_register();
485 485
486 486 Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
487 487 PtrQueue::byte_offset_of_active()));
488 488
489 489 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
490 490 __ ld(in_progress, tmp_reg);
491 491 } else {
492 492 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
493 493 __ ldsb(in_progress, tmp_reg);
494 494 }
495 495
496 496 __ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
497 497 __ delayed()->nop();
498 498
499 499 // val == null?
500 500 assert(val()->is_register(), "Precondition.");
501 501 Register val_reg = val()->as_register();
502 502
503 503 if (__ is_in_wdisp16_range(_continuation)) {
504 504 __ br_null(val_reg, /*annul*/false, Assembler::pt, _continuation);
505 505 } else {
506 506 __ cmp(val_reg, G0);
507 507 __ brx(Assembler::equal, false, Assembler::pt, _continuation);
508 508 }
509 509 __ delayed()->nop();
510 510
511 511 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
512 512 __ delayed()->mov(val_reg, G4);
513 513 __ br(Assembler::always, false, Assembler::pt, _continuation);
514 514 __ delayed()->nop();
515 515 }
516 516
517 517 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
518 518
519 519 jbyte* G1PostBarrierStub::byte_map_base_slow() {
520 520 BarrierSet* bs = Universe::heap()->barrier_set();
521 521 assert(bs->is_a(BarrierSet::G1SATBCTLogging),
522 522 "Must be if we're using this.");
523 523 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
524 524 }
525 525
526 526 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
527 527 __ bind(_entry);
528 528
529 529 assert(addr()->is_register(), "Precondition.");
530 530 assert(new_val()->is_register(), "Precondition.");
531 531 Register addr_reg = addr()->as_pointer_register();
532 532 Register new_val_reg = new_val()->as_register();
533 533
534 534 if (__ is_in_wdisp16_range(_continuation)) {
535 535 __ br_null(new_val_reg, /*annul*/false, Assembler::pt, _continuation);
536 536 } else {
537 537 __ cmp(new_val_reg, G0);
538 538 __ brx(Assembler::equal, false, Assembler::pn, _continuation);
539 539 }
540 540 __ delayed()->nop();
541 541
542 542 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
543 543 __ delayed()->mov(addr_reg, G4);
544 544 __ br(Assembler::always, false, Assembler::pt, _continuation);
545 545 __ delayed()->nop();
546 546 }
547 547
548 548 #endif // SERIALGC
549 549 ///////////////////////////////////////////////////////////////////////////////////
550 550
551 551 #undef __
↓ open down ↓ |
168 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX