Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
+++ new/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
1 1 /*
2 - * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
2 + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 #include "incls/_precompiled.incl"
26 26 #include "incls/_c1_CodeStubs_x86.cpp.incl"
27 27
28 28
29 29 #define __ ce->masm()->
30 30
31 31 float ConversionStub::float_zero = 0.0;
32 32 double ConversionStub::double_zero = 0.0;
33 33
34 34 void ConversionStub::emit_code(LIR_Assembler* ce) {
35 35 __ bind(_entry);
36 36 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
37 37
38 38
39 39 if (input()->is_single_xmm()) {
40 40 __ comiss(input()->as_xmm_float_reg(),
41 41 ExternalAddress((address)&float_zero));
42 42 } else if (input()->is_double_xmm()) {
43 43 __ comisd(input()->as_xmm_double_reg(),
44 44 ExternalAddress((address)&double_zero));
45 45 } else {
46 46 LP64_ONLY(ShouldNotReachHere());
47 47 __ push(rax);
48 48 __ ftst();
49 49 __ fnstsw_ax();
50 50 __ sahf();
51 51 __ pop(rax);
52 52 }
53 53
54 54 Label NaN, do_return;
55 55 __ jccb(Assembler::parity, NaN);
56 56 __ jccb(Assembler::below, do_return);
57 57
58 58 // input is > 0 -> return maxInt
59 59 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
60 60 __ decrement(result()->as_register());
61 61 __ jmpb(do_return);
62 62
63 63 // input is NaN -> return 0
64 64 __ bind(NaN);
65 65 __ xorptr(result()->as_register(), result()->as_register());
66 66
67 67 __ bind(do_return);
68 68 __ jmp(_continuation);
69 69 }
70 70
71 71 #ifdef TIERED
72 72 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
73 73 __ bind(_entry);
74 74 ce->store_parameter(_bci, 0);
75 75 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
76 76 ce->add_call_info_here(_info);
77 77 ce->verify_oop_map(_info);
78 78
79 79 __ jmp(_continuation);
80 80 }
81 81 #endif // TIERED
82 82
83 83
84 84
85 85 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
86 86 bool throw_index_out_of_bounds_exception)
87 87 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
88 88 , _index(index)
89 89 {
90 90 _info = info == NULL ? NULL : new CodeEmitInfo(info);
91 91 }
92 92
93 93
94 94 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
95 95 __ bind(_entry);
96 96 // pass the array index on stack because all registers must be preserved
97 97 if (_index->is_cpu_register()) {
98 98 ce->store_parameter(_index->as_register(), 0);
99 99 } else {
100 100 ce->store_parameter(_index->as_jint(), 0);
101 101 }
102 102 Runtime1::StubID stub_id;
103 103 if (_throw_index_out_of_bounds_exception) {
104 104 stub_id = Runtime1::throw_index_exception_id;
105 105 } else {
106 106 stub_id = Runtime1::throw_range_check_failed_id;
107 107 }
108 108 __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
109 109 ce->add_call_info_here(_info);
110 110 debug_only(__ should_not_reach_here());
111 111 }
112 112
113 113
114 114 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
115 115 if (_offset != -1) {
116 116 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
117 117 }
118 118 __ bind(_entry);
119 119 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
120 120 ce->add_call_info_here(_info);
121 121 debug_only(__ should_not_reach_here());
122 122 }
123 123
124 124
125 125 // Implementation of NewInstanceStub
126 126
127 127 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
128 128 _result = result;
129 129 _klass = klass;
130 130 _klass_reg = klass_reg;
131 131 _info = new CodeEmitInfo(info);
132 132 assert(stub_id == Runtime1::new_instance_id ||
133 133 stub_id == Runtime1::fast_new_instance_id ||
134 134 stub_id == Runtime1::fast_new_instance_init_check_id,
135 135 "need new_instance id");
136 136 _stub_id = stub_id;
137 137 }
138 138
139 139
140 140 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
141 141 assert(__ rsp_offset() == 0, "frame size should be fixed");
142 142 __ bind(_entry);
143 143 __ movptr(rdx, _klass_reg->as_register());
144 144 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
145 145 ce->add_call_info_here(_info);
146 146 ce->verify_oop_map(_info);
147 147 assert(_result->as_register() == rax, "result must in rax,");
148 148 __ jmp(_continuation);
149 149 }
150 150
151 151
152 152 // Implementation of NewTypeArrayStub
153 153
154 154 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
155 155 _klass_reg = klass_reg;
156 156 _length = length;
157 157 _result = result;
158 158 _info = new CodeEmitInfo(info);
159 159 }
160 160
161 161
162 162 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
163 163 assert(__ rsp_offset() == 0, "frame size should be fixed");
164 164 __ bind(_entry);
165 165 assert(_length->as_register() == rbx, "length must in rbx,");
166 166 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
167 167 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
168 168 ce->add_call_info_here(_info);
169 169 ce->verify_oop_map(_info);
170 170 assert(_result->as_register() == rax, "result must in rax,");
171 171 __ jmp(_continuation);
172 172 }
173 173
174 174
175 175 // Implementation of NewObjectArrayStub
176 176
177 177 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
178 178 _klass_reg = klass_reg;
179 179 _result = result;
180 180 _length = length;
181 181 _info = new CodeEmitInfo(info);
182 182 }
183 183
184 184
185 185 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
186 186 assert(__ rsp_offset() == 0, "frame size should be fixed");
187 187 __ bind(_entry);
188 188 assert(_length->as_register() == rbx, "length must in rbx,");
189 189 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
190 190 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
191 191 ce->add_call_info_here(_info);
192 192 ce->verify_oop_map(_info);
193 193 assert(_result->as_register() == rax, "result must in rax,");
194 194 __ jmp(_continuation);
195 195 }
196 196
197 197
198 198 // Implementation of MonitorAccessStubs
199 199
200 200 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
201 201 : MonitorAccessStub(obj_reg, lock_reg)
202 202 {
203 203 _info = new CodeEmitInfo(info);
204 204 }
205 205
206 206
207 207 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
208 208 assert(__ rsp_offset() == 0, "frame size should be fixed");
209 209 __ bind(_entry);
210 210 ce->store_parameter(_obj_reg->as_register(), 1);
211 211 ce->store_parameter(_lock_reg->as_register(), 0);
212 212 Runtime1::StubID enter_id;
213 213 if (ce->compilation()->has_fpu_code()) {
214 214 enter_id = Runtime1::monitorenter_id;
215 215 } else {
216 216 enter_id = Runtime1::monitorenter_nofpu_id;
217 217 }
218 218 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
219 219 ce->add_call_info_here(_info);
220 220 ce->verify_oop_map(_info);
221 221 __ jmp(_continuation);
222 222 }
223 223
224 224
225 225 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
226 226 __ bind(_entry);
227 227 if (_compute_lock) {
228 228 // lock_reg was destroyed by fast unlocking attempt => recompute it
229 229 ce->monitor_address(_monitor_ix, _lock_reg);
230 230 }
231 231 ce->store_parameter(_lock_reg->as_register(), 0);
232 232 // note: non-blocking leaf routine => no call info needed
233 233 Runtime1::StubID exit_id;
234 234 if (ce->compilation()->has_fpu_code()) {
235 235 exit_id = Runtime1::monitorexit_id;
236 236 } else {
237 237 exit_id = Runtime1::monitorexit_nofpu_id;
238 238 }
239 239 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
240 240 __ jmp(_continuation);
241 241 }
242 242
243 243
244 244 // Implementation of patching:
245 245 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
246 246 // - Replace original code with a call to the stub
247 247 // At Runtime:
248 248 // - call to stub, jump to runtime
249 249 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
250 250 // - in runtime: after initializing class, restore original code, reexecute instruction
251 251
252 252 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
253 253
254 254 void PatchingStub::align_patch_site(MacroAssembler* masm) {
255 255 // We're patching a 5-7 byte instruction on intel and we need to
256 256 // make sure that we don't see a piece of the instruction. It
257 257 // appears mostly impossible on Intel to simply invalidate other
258 258 // processors caches and since they may do aggressive prefetch it's
259 259 // very hard to make a guess about what code might be in the icache.
260 260 // Force the instruction to be double word aligned so that it
261 261 // doesn't span a cache line.
262 262 masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
263 263 }
264 264
265 265 void PatchingStub::emit_code(LIR_Assembler* ce) {
266 266 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
267 267
268 268 Label call_patch;
269 269
270 270 // static field accesses have special semantics while the class
271 271 // initializer is being run so we emit a test which can be used to
272 272 // check that this code is being executed by the initializing
273 273 // thread.
274 274 address being_initialized_entry = __ pc();
275 275 if (CommentedAssembly) {
276 276 __ block_comment(" patch template");
277 277 }
278 278 if (_id == load_klass_id) {
279 279 // produce a copy of the load klass instruction for use by the being initialized case
280 280 address start = __ pc();
281 281 jobject o = NULL;
282 282 __ movoop(_obj, o);
283 283 #ifdef ASSERT
284 284 for (int i = 0; i < _bytes_to_copy; i++) {
285 285 address ptr = (address)(_pc_start + i);
286 286 int a_byte = (*ptr) & 0xFF;
287 287 assert(a_byte == *start++, "should be the same code");
288 288 }
289 289 #endif
290 290 } else {
291 291 // make a copy the code which is going to be patched.
292 292 for ( int i = 0; i < _bytes_to_copy; i++) {
293 293 address ptr = (address)(_pc_start + i);
294 294 int a_byte = (*ptr) & 0xFF;
295 295 __ a_byte (a_byte);
296 296 *ptr = 0x90; // make the site look like a nop
297 297 }
298 298 }
299 299
300 300 address end_of_patch = __ pc();
301 301 int bytes_to_skip = 0;
302 302 if (_id == load_klass_id) {
303 303 int offset = __ offset();
304 304 if (CommentedAssembly) {
305 305 __ block_comment(" being_initialized check");
306 306 }
307 307 assert(_obj != noreg, "must be a valid register");
308 308 Register tmp = rax;
309 309 if (_obj == tmp) tmp = rbx;
310 310 __ push(tmp);
311 311 __ get_thread(tmp);
312 312 __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
313 313 __ pop(tmp);
314 314 __ jcc(Assembler::notEqual, call_patch);
315 315
316 316 // access_field patches may execute the patched code before it's
317 317 // copied back into place so we need to jump back into the main
318 318 // code of the nmethod to continue execution.
319 319 __ jmp(_patch_site_continuation);
320 320
321 321 // make sure this extra code gets skipped
322 322 bytes_to_skip += __ offset() - offset;
323 323 }
324 324 if (CommentedAssembly) {
325 325 __ block_comment("patch data encoded as movl");
326 326 }
327 327 // Now emit the patch record telling the runtime how to find the
328 328 // pieces of the patch. We only need 3 bytes but for readability of
329 329 // the disassembly we make the data look like a movl reg, imm32,
330 330 // which requires 5 bytes
331 331 int sizeof_patch_record = 5;
332 332 bytes_to_skip += sizeof_patch_record;
333 333
334 334 // emit the offsets needed to find the code to patch
335 335 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
336 336
337 337 __ a_byte(0xB8);
338 338 __ a_byte(0);
339 339 __ a_byte(being_initialized_entry_offset);
340 340 __ a_byte(bytes_to_skip);
341 341 __ a_byte(_bytes_to_copy);
342 342 address patch_info_pc = __ pc();
343 343 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
344 344
345 345 address entry = __ pc();
346 346 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
347 347 address target = NULL;
348 348 switch (_id) {
349 349 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
350 350 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
351 351 default: ShouldNotReachHere();
352 352 }
353 353 __ bind(call_patch);
354 354
355 355 if (CommentedAssembly) {
356 356 __ block_comment("patch entry point");
357 357 }
358 358 __ call(RuntimeAddress(target));
359 359 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
360 360 ce->add_call_info_here(_info);
361 361 int jmp_off = __ offset();
362 362 __ jmp(_patch_site_entry);
363 363 // Add enough nops so deoptimization can overwrite the jmp above with a call
364 364 // and not destroy the world.
365 365 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
↓ open down ↓ |
353 lines elided |
↑ open up ↑ |
366 366 __ nop();
367 367 }
368 368 if (_id == load_klass_id) {
369 369 CodeSection* cs = __ code_section();
370 370 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
371 371 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none);
372 372 }
373 373 }
374 374
375 375
376 +void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
377 + __ bind(_entry);
378 + __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution()));
379 + ce->add_call_info_here(_info);
380 + debug_only(__ should_not_reach_here());
381 +}
382 +
383 +
376 384 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
377 385 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
378 386 __ bind(_entry);
379 387 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
380 388 ce->add_call_info_here(_info);
381 389 debug_only(__ should_not_reach_here());
382 390 }
383 391
384 392
385 393 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
386 394 assert(__ rsp_offset() == 0, "frame size should be fixed");
387 395
388 396 __ bind(_entry);
389 397 // pass the object on stack because all registers must be preserved
390 398 if (_obj->is_cpu_register()) {
391 399 ce->store_parameter(_obj->as_register(), 0);
392 400 }
393 401 __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
394 402 ce->add_call_info_here(_info);
395 403 debug_only(__ should_not_reach_here());
396 404 }
397 405
398 406
399 407 ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info):
400 408 _info(info) {
401 409 }
402 410
403 411
404 412 void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) {
405 413 assert(__ rsp_offset() == 0, "frame size should be fixed");
406 414 __ bind(_entry);
407 415 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_array_store_exception_id)));
408 416 ce->add_call_info_here(_info);
409 417 debug_only(__ should_not_reach_here());
410 418 }
411 419
412 420
413 421 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
414 422 //---------------slow case: call to native-----------------
415 423 __ bind(_entry);
416 424 // Figure out where the args should go
417 425 // This should really convert the IntrinsicID to the methodOop and signature
418 426 // but I don't know how to do that.
419 427 //
420 428 VMRegPair args[5];
421 429 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
422 430 SharedRuntime::java_calling_convention(signature, args, 5, true);
423 431
424 432 // push parameters
425 433 // (src, src_pos, dest, destPos, length)
426 434 Register r[5];
427 435 r[0] = src()->as_register();
428 436 r[1] = src_pos()->as_register();
429 437 r[2] = dst()->as_register();
430 438 r[3] = dst_pos()->as_register();
431 439 r[4] = length()->as_register();
432 440
433 441 // next registers will get stored on the stack
434 442 for (int i = 0; i < 5 ; i++ ) {
435 443 VMReg r_1 = args[i].first();
436 444 if (r_1->is_stack()) {
437 445 int st_off = r_1->reg2stack() * wordSize;
438 446 __ movptr (Address(rsp, st_off), r[i]);
439 447 } else {
440 448 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
441 449 }
442 450 }
443 451
444 452 ce->align_call(lir_static_call);
445 453
446 454 ce->emit_static_call_stub();
447 455 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
448 456 relocInfo::static_call_type);
449 457 __ call(resolve);
450 458 ce->add_call_info_here(info());
451 459
452 460 #ifndef PRODUCT
453 461 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
454 462 #endif
455 463
456 464 __ jmp(_continuation);
457 465 }
458 466
459 467 /////////////////////////////////////////////////////////////////////////////
460 468 #ifndef SERIALGC
461 469
462 470 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
463 471
464 472 // At this point we know that marking is in progress
465 473
466 474 __ bind(_entry);
467 475 assert(pre_val()->is_register(), "Precondition.");
468 476
469 477 Register pre_val_reg = pre_val()->as_register();
470 478
471 479 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
472 480
473 481 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
474 482 __ jcc(Assembler::equal, _continuation);
475 483 ce->store_parameter(pre_val()->as_register(), 0);
476 484 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
477 485 __ jmp(_continuation);
478 486
479 487 }
480 488
481 489 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
482 490
483 491 jbyte* G1PostBarrierStub::byte_map_base_slow() {
484 492 BarrierSet* bs = Universe::heap()->barrier_set();
485 493 assert(bs->is_a(BarrierSet::G1SATBCTLogging),
486 494 "Must be if we're using this.");
487 495 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
488 496 }
489 497
490 498 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
491 499 __ bind(_entry);
492 500 assert(addr()->is_register(), "Precondition.");
493 501 assert(new_val()->is_register(), "Precondition.");
494 502 Register new_val_reg = new_val()->as_register();
495 503 __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
496 504 __ jcc(Assembler::equal, _continuation);
497 505 ce->store_parameter(addr()->as_register(), 0);
498 506 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
499 507 __ jmp(_continuation);
500 508 }
501 509
502 510 #endif // SERIALGC
503 511 /////////////////////////////////////////////////////////////////////////////
504 512
505 513 #undef __
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX