Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
+++ new/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
1 1 /*
2 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "c1/c1_CodeStubs.hpp"
27 27 #include "c1/c1_FrameMap.hpp"
28 28 #include "c1/c1_LIRAssembler.hpp"
29 29 #include "c1/c1_MacroAssembler.hpp"
30 30 #include "c1/c1_Runtime1.hpp"
31 31 #include "nativeInst_x86.hpp"
32 32 #include "runtime/sharedRuntime.hpp"
33 33 #include "vmreg_x86.inline.hpp"
34 34 #ifndef SERIALGC
35 35 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
36 36 #endif
37 37
38 38
39 39 #define __ ce->masm()->
40 40
41 41 float ConversionStub::float_zero = 0.0;
42 42 double ConversionStub::double_zero = 0.0;
43 43
44 44 void ConversionStub::emit_code(LIR_Assembler* ce) {
45 45 __ bind(_entry);
46 46 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
47 47
48 48
49 49 if (input()->is_single_xmm()) {
50 50 __ comiss(input()->as_xmm_float_reg(),
51 51 ExternalAddress((address)&float_zero));
52 52 } else if (input()->is_double_xmm()) {
53 53 __ comisd(input()->as_xmm_double_reg(),
54 54 ExternalAddress((address)&double_zero));
55 55 } else {
56 56 LP64_ONLY(ShouldNotReachHere());
57 57 __ push(rax);
58 58 __ ftst();
59 59 __ fnstsw_ax();
60 60 __ sahf();
61 61 __ pop(rax);
62 62 }
63 63
64 64 Label NaN, do_return;
65 65 __ jccb(Assembler::parity, NaN);
66 66 __ jccb(Assembler::below, do_return);
67 67
68 68 // input is > 0 -> return maxInt
69 69 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
70 70 __ decrement(result()->as_register());
71 71 __ jmpb(do_return);
72 72
73 73 // input is NaN -> return 0
74 74 __ bind(NaN);
75 75 __ xorptr(result()->as_register(), result()->as_register());
76 76
77 77 __ bind(do_return);
78 78 __ jmp(_continuation);
79 79 }
80 80
81 81 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
82 82 __ bind(_entry);
83 83 ce->store_parameter(_method->as_register(), 1);
84 84 ce->store_parameter(_bci, 0);
85 85 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
86 86 ce->add_call_info_here(_info);
87 87 ce->verify_oop_map(_info);
88 88 __ jmp(_continuation);
89 89 }
90 90
91 91 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
92 92 bool throw_index_out_of_bounds_exception)
93 93 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
94 94 , _index(index)
95 95 {
96 96 assert(info != NULL, "must have info");
97 97 _info = new CodeEmitInfo(info);
98 98 }
99 99
100 100
101 101 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
102 102 __ bind(_entry);
103 103 // pass the array index on stack because all registers must be preserved
104 104 if (_index->is_cpu_register()) {
105 105 ce->store_parameter(_index->as_register(), 0);
106 106 } else {
107 107 ce->store_parameter(_index->as_jint(), 0);
108 108 }
109 109 Runtime1::StubID stub_id;
110 110 if (_throw_index_out_of_bounds_exception) {
111 111 stub_id = Runtime1::throw_index_exception_id;
112 112 } else {
113 113 stub_id = Runtime1::throw_range_check_failed_id;
114 114 }
115 115 __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
116 116 ce->add_call_info_here(_info);
117 117 debug_only(__ should_not_reach_here());
118 118 }
119 119
120 120
121 121 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
122 122 if (_offset != -1) {
123 123 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
124 124 }
125 125 __ bind(_entry);
126 126 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
127 127 ce->add_call_info_here(_info);
128 128 debug_only(__ should_not_reach_here());
129 129 }
130 130
131 131
132 132 // Implementation of NewInstanceStub
133 133
134 134 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
135 135 _result = result;
136 136 _klass = klass;
137 137 _klass_reg = klass_reg;
138 138 _info = new CodeEmitInfo(info);
139 139 assert(stub_id == Runtime1::new_instance_id ||
140 140 stub_id == Runtime1::fast_new_instance_id ||
141 141 stub_id == Runtime1::fast_new_instance_init_check_id,
142 142 "need new_instance id");
143 143 _stub_id = stub_id;
144 144 }
145 145
146 146
147 147 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
148 148 assert(__ rsp_offset() == 0, "frame size should be fixed");
149 149 __ bind(_entry);
150 150 __ movptr(rdx, _klass_reg->as_register());
151 151 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
152 152 ce->add_call_info_here(_info);
153 153 ce->verify_oop_map(_info);
154 154 assert(_result->as_register() == rax, "result must in rax,");
155 155 __ jmp(_continuation);
156 156 }
157 157
158 158
159 159 // Implementation of NewTypeArrayStub
160 160
161 161 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
162 162 _klass_reg = klass_reg;
163 163 _length = length;
164 164 _result = result;
165 165 _info = new CodeEmitInfo(info);
166 166 }
167 167
168 168
169 169 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
170 170 assert(__ rsp_offset() == 0, "frame size should be fixed");
171 171 __ bind(_entry);
172 172 assert(_length->as_register() == rbx, "length must in rbx,");
173 173 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
174 174 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
175 175 ce->add_call_info_here(_info);
176 176 ce->verify_oop_map(_info);
177 177 assert(_result->as_register() == rax, "result must in rax,");
178 178 __ jmp(_continuation);
179 179 }
180 180
181 181
182 182 // Implementation of NewObjectArrayStub
183 183
184 184 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
185 185 _klass_reg = klass_reg;
186 186 _result = result;
187 187 _length = length;
188 188 _info = new CodeEmitInfo(info);
189 189 }
190 190
191 191
192 192 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
193 193 assert(__ rsp_offset() == 0, "frame size should be fixed");
194 194 __ bind(_entry);
195 195 assert(_length->as_register() == rbx, "length must in rbx,");
196 196 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
197 197 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
198 198 ce->add_call_info_here(_info);
199 199 ce->verify_oop_map(_info);
200 200 assert(_result->as_register() == rax, "result must in rax,");
201 201 __ jmp(_continuation);
202 202 }
203 203
204 204
205 205 // Implementation of MonitorAccessStubs
206 206
207 207 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
208 208 : MonitorAccessStub(obj_reg, lock_reg)
209 209 {
210 210 _info = new CodeEmitInfo(info);
211 211 }
212 212
213 213
214 214 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
215 215 assert(__ rsp_offset() == 0, "frame size should be fixed");
216 216 __ bind(_entry);
217 217 ce->store_parameter(_obj_reg->as_register(), 1);
218 218 ce->store_parameter(_lock_reg->as_register(), 0);
219 219 Runtime1::StubID enter_id;
220 220 if (ce->compilation()->has_fpu_code()) {
221 221 enter_id = Runtime1::monitorenter_id;
222 222 } else {
223 223 enter_id = Runtime1::monitorenter_nofpu_id;
224 224 }
225 225 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
226 226 ce->add_call_info_here(_info);
227 227 ce->verify_oop_map(_info);
228 228 __ jmp(_continuation);
229 229 }
230 230
231 231
232 232 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
233 233 __ bind(_entry);
234 234 if (_compute_lock) {
235 235 // lock_reg was destroyed by fast unlocking attempt => recompute it
236 236 ce->monitor_address(_monitor_ix, _lock_reg);
237 237 }
238 238 ce->store_parameter(_lock_reg->as_register(), 0);
239 239 // note: non-blocking leaf routine => no call info needed
240 240 Runtime1::StubID exit_id;
241 241 if (ce->compilation()->has_fpu_code()) {
242 242 exit_id = Runtime1::monitorexit_id;
243 243 } else {
244 244 exit_id = Runtime1::monitorexit_nofpu_id;
245 245 }
246 246 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
247 247 __ jmp(_continuation);
248 248 }
249 249
250 250
251 251 // Implementation of patching:
252 252 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
253 253 // - Replace original code with a call to the stub
254 254 // At Runtime:
255 255 // - call to stub, jump to runtime
256 256 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
257 257 // - in runtime: after initializing class, restore original code, reexecute instruction
258 258
259 259 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
260 260
261 261 void PatchingStub::align_patch_site(MacroAssembler* masm) {
262 262 // We're patching a 5-7 byte instruction on intel and we need to
263 263 // make sure that we don't see a piece of the instruction. It
264 264 // appears mostly impossible on Intel to simply invalidate other
265 265 // processors caches and since they may do aggressive prefetch it's
266 266 // very hard to make a guess about what code might be in the icache.
267 267 // Force the instruction to be double word aligned so that it
268 268 // doesn't span a cache line.
269 269 masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
270 270 }
271 271
272 272 void PatchingStub::emit_code(LIR_Assembler* ce) {
273 273 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
274 274
275 275 Label call_patch;
276 276
277 277 // static field accesses have special semantics while the class
278 278 // initializer is being run so we emit a test which can be used to
279 279 // check that this code is being executed by the initializing
280 280 // thread.
281 281 address being_initialized_entry = __ pc();
282 282 if (CommentedAssembly) {
283 283 __ block_comment(" patch template");
284 284 }
285 285 if (_id == load_klass_id) {
286 286 // produce a copy of the load klass instruction for use by the being initialized case
287 287 address start = __ pc();
288 288 jobject o = NULL;
289 289 __ movoop(_obj, o);
290 290 #ifdef ASSERT
291 291 for (int i = 0; i < _bytes_to_copy; i++) {
292 292 address ptr = (address)(_pc_start + i);
293 293 int a_byte = (*ptr) & 0xFF;
294 294 assert(a_byte == *start++, "should be the same code");
295 295 }
296 296 #endif
297 297 } else {
298 298 // make a copy the code which is going to be patched.
299 299 for ( int i = 0; i < _bytes_to_copy; i++) {
300 300 address ptr = (address)(_pc_start + i);
301 301 int a_byte = (*ptr) & 0xFF;
302 302 __ a_byte (a_byte);
303 303 *ptr = 0x90; // make the site look like a nop
304 304 }
305 305 }
306 306
307 307 address end_of_patch = __ pc();
308 308 int bytes_to_skip = 0;
309 309 if (_id == load_klass_id) {
310 310 int offset = __ offset();
311 311 if (CommentedAssembly) {
312 312 __ block_comment(" being_initialized check");
313 313 }
314 314 assert(_obj != noreg, "must be a valid register");
315 315 Register tmp = rax;
316 316 Register tmp2 = rbx;
317 317 __ push(tmp);
318 318 __ push(tmp2);
319 319 // Load without verification to keep code size small. We need it because
320 320 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
321 321 __ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
322 322 __ get_thread(tmp);
323 323 __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
324 324 __ pop(tmp2);
325 325 __ pop(tmp);
326 326 __ jcc(Assembler::notEqual, call_patch);
327 327
328 328 // access_field patches may execute the patched code before it's
329 329 // copied back into place so we need to jump back into the main
330 330 // code of the nmethod to continue execution.
331 331 __ jmp(_patch_site_continuation);
332 332
333 333 // make sure this extra code gets skipped
334 334 bytes_to_skip += __ offset() - offset;
335 335 }
336 336 if (CommentedAssembly) {
337 337 __ block_comment("patch data encoded as movl");
338 338 }
339 339 // Now emit the patch record telling the runtime how to find the
340 340 // pieces of the patch. We only need 3 bytes but for readability of
341 341 // the disassembly we make the data look like a movl reg, imm32,
342 342 // which requires 5 bytes
343 343 int sizeof_patch_record = 5;
344 344 bytes_to_skip += sizeof_patch_record;
345 345
346 346 // emit the offsets needed to find the code to patch
347 347 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
348 348
349 349 __ a_byte(0xB8);
350 350 __ a_byte(0);
351 351 __ a_byte(being_initialized_entry_offset);
352 352 __ a_byte(bytes_to_skip);
353 353 __ a_byte(_bytes_to_copy);
354 354 address patch_info_pc = __ pc();
355 355 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
356 356
357 357 address entry = __ pc();
358 358 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
359 359 address target = NULL;
360 360 switch (_id) {
361 361 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
362 362 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
363 363 default: ShouldNotReachHere();
364 364 }
365 365 __ bind(call_patch);
366 366
367 367 if (CommentedAssembly) {
368 368 __ block_comment("patch entry point");
369 369 }
370 370 __ call(RuntimeAddress(target));
371 371 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
372 372 ce->add_call_info_here(_info);
373 373 int jmp_off = __ offset();
374 374 __ jmp(_patch_site_entry);
375 375 // Add enough nops so deoptimization can overwrite the jmp above with a call
376 376 // and not destroy the world.
377 377 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
378 378 __ nop();
379 379 }
↓ open down ↓ |
379 lines elided |
↑ open up ↑ |
380 380 if (_id == load_klass_id) {
381 381 CodeSection* cs = __ code_section();
382 382 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
383 383 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none);
384 384 }
385 385 }
386 386
387 387
388 388 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
389 389 __ bind(_entry);
390 - __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution()));
390 + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
391 391 ce->add_call_info_here(_info);
392 - debug_only(__ should_not_reach_here());
392 + DEBUG_ONLY(__ should_not_reach_here());
393 393 }
394 394
395 395
396 396 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
397 397 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
398 398 __ bind(_entry);
399 399 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
400 400 ce->add_call_info_here(_info);
401 401 debug_only(__ should_not_reach_here());
402 402 }
403 403
404 404
405 405 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
406 406 assert(__ rsp_offset() == 0, "frame size should be fixed");
407 407
408 408 __ bind(_entry);
409 409 // pass the object on stack because all registers must be preserved
410 410 if (_obj->is_cpu_register()) {
411 411 ce->store_parameter(_obj->as_register(), 0);
412 412 }
413 413 __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
414 414 ce->add_call_info_here(_info);
415 415 debug_only(__ should_not_reach_here());
416 416 }
417 417
418 418
419 419 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
420 420 //---------------slow case: call to native-----------------
421 421 __ bind(_entry);
422 422 // Figure out where the args should go
423 423 // This should really convert the IntrinsicID to the methodOop and signature
424 424 // but I don't know how to do that.
425 425 //
426 426 VMRegPair args[5];
427 427 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
428 428 SharedRuntime::java_calling_convention(signature, args, 5, true);
429 429
430 430 // push parameters
431 431 // (src, src_pos, dest, destPos, length)
432 432 Register r[5];
433 433 r[0] = src()->as_register();
434 434 r[1] = src_pos()->as_register();
435 435 r[2] = dst()->as_register();
436 436 r[3] = dst_pos()->as_register();
437 437 r[4] = length()->as_register();
438 438
439 439 // next registers will get stored on the stack
440 440 for (int i = 0; i < 5 ; i++ ) {
441 441 VMReg r_1 = args[i].first();
442 442 if (r_1->is_stack()) {
443 443 int st_off = r_1->reg2stack() * wordSize;
444 444 __ movptr (Address(rsp, st_off), r[i]);
445 445 } else {
446 446 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
447 447 }
448 448 }
449 449
450 450 ce->align_call(lir_static_call);
451 451
452 452 ce->emit_static_call_stub();
453 453 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
454 454 relocInfo::static_call_type);
455 455 __ call(resolve);
456 456 ce->add_call_info_here(info());
457 457
458 458 #ifndef PRODUCT
459 459 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
460 460 #endif
461 461
462 462 __ jmp(_continuation);
463 463 }
464 464
465 465 /////////////////////////////////////////////////////////////////////////////
466 466 #ifndef SERIALGC
467 467
468 468 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
469 469 // At this point we know that marking is in progress.
470 470 // If do_load() is true then we have to emit the
471 471 // load of the previous value; otherwise it has already
472 472 // been loaded into _pre_val.
473 473
474 474 __ bind(_entry);
475 475 assert(pre_val()->is_register(), "Precondition.");
476 476
477 477 Register pre_val_reg = pre_val()->as_register();
478 478
479 479 if (do_load()) {
480 480 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
481 481 }
482 482
483 483 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
484 484 __ jcc(Assembler::equal, _continuation);
485 485 ce->store_parameter(pre_val()->as_register(), 0);
486 486 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
487 487 __ jmp(_continuation);
488 488
489 489 }
490 490
491 491 void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
492 492 // At this point we know that offset == referent_offset.
493 493 //
494 494 // So we might have to emit:
495 495 // if (src == null) goto continuation.
496 496 //
497 497 // and we definitely have to emit:
498 498 // if (klass(src).reference_type == REF_NONE) goto continuation
499 499 // if (!marking_active) goto continuation
500 500 // if (pre_val == null) goto continuation
501 501 // call pre_barrier(pre_val)
502 502 // goto continuation
503 503 //
504 504 __ bind(_entry);
505 505
506 506 assert(src()->is_register(), "sanity");
507 507 Register src_reg = src()->as_register();
508 508
509 509 if (gen_src_check()) {
510 510 // The original src operand was not a constant.
511 511 // Generate src == null?
512 512 __ cmpptr(src_reg, (int32_t) NULL_WORD);
513 513 __ jcc(Assembler::equal, _continuation);
514 514 }
515 515
516 516 // Generate src->_klass->_reference_type == REF_NONE)?
517 517 assert(tmp()->is_register(), "sanity");
518 518 Register tmp_reg = tmp()->as_register();
519 519
520 520 __ load_klass(tmp_reg, src_reg);
521 521
522 522 Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
523 523 __ cmpl(ref_type_adr, REF_NONE);
524 524 __ jcc(Assembler::equal, _continuation);
525 525
526 526 // Is marking active?
527 527 assert(thread()->is_register(), "precondition");
528 528 Register thread_reg = thread()->as_pointer_register();
529 529
530 530 Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
531 531 PtrQueue::byte_offset_of_active()));
532 532
533 533 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
534 534 __ cmpl(in_progress, 0);
535 535 } else {
536 536 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
537 537 __ cmpb(in_progress, 0);
538 538 }
539 539 __ jcc(Assembler::equal, _continuation);
540 540
541 541 // val == null?
542 542 assert(val()->is_register(), "Precondition.");
543 543 Register val_reg = val()->as_register();
544 544
545 545 __ cmpptr(val_reg, (int32_t) NULL_WORD);
546 546 __ jcc(Assembler::equal, _continuation);
547 547
548 548 ce->store_parameter(val()->as_register(), 0);
549 549 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
550 550 __ jmp(_continuation);
551 551 }
552 552
553 553 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
554 554
555 555 jbyte* G1PostBarrierStub::byte_map_base_slow() {
556 556 BarrierSet* bs = Universe::heap()->barrier_set();
557 557 assert(bs->is_a(BarrierSet::G1SATBCTLogging),
558 558 "Must be if we're using this.");
559 559 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
560 560 }
561 561
562 562 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
563 563 __ bind(_entry);
564 564 assert(addr()->is_register(), "Precondition.");
565 565 assert(new_val()->is_register(), "Precondition.");
566 566 Register new_val_reg = new_val()->as_register();
567 567 __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
568 568 __ jcc(Assembler::equal, _continuation);
569 569 ce->store_parameter(addr()->as_pointer_register(), 0);
570 570 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
571 571 __ jmp(_continuation);
572 572 }
573 573
574 574 #endif // SERIALGC
575 575 /////////////////////////////////////////////////////////////////////////////
576 576
577 577 #undef __
↓ open down ↓ |
175 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX