Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/c1/c1_LIRAssembler.cpp
+++ new/src/share/vm/c1/c1_LIRAssembler.cpp
1 1 /*
2 2 * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 # include "incls/_precompiled.incl"
26 26 # include "incls/_c1_LIRAssembler.cpp.incl"
27 27
28 28
29 29 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
30 30 // we must have enough patching space so that call can be inserted
31 31 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
32 32 _masm->nop();
33 33 }
34 34 patch->install(_masm, patch_code, obj, info);
35 35 append_patching_stub(patch);
36 36
37 37 #ifdef ASSERT
38 38 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci());
39 39 if (patch->id() == PatchingStub::access_field_id) {
40 40 switch (code) {
41 41 case Bytecodes::_putstatic:
42 42 case Bytecodes::_getstatic:
43 43 case Bytecodes::_putfield:
44 44 case Bytecodes::_getfield:
45 45 break;
46 46 default:
47 47 ShouldNotReachHere();
48 48 }
49 49 } else if (patch->id() == PatchingStub::load_klass_id) {
50 50 switch (code) {
51 51 case Bytecodes::_putstatic:
52 52 case Bytecodes::_getstatic:
53 53 case Bytecodes::_new:
54 54 case Bytecodes::_anewarray:
55 55 case Bytecodes::_multianewarray:
56 56 case Bytecodes::_instanceof:
57 57 case Bytecodes::_checkcast:
58 58 case Bytecodes::_ldc:
59 59 case Bytecodes::_ldc_w:
60 60 break;
61 61 default:
62 62 ShouldNotReachHere();
63 63 }
64 64 } else {
65 65 ShouldNotReachHere();
66 66 }
67 67 #endif
68 68 }
69 69
70 70
71 71 //---------------------------------------------------------------
72 72
73 73
74 74 LIR_Assembler::LIR_Assembler(Compilation* c):
75 75 _compilation(c)
76 76 , _masm(c->masm())
77 77 , _bs(Universe::heap()->barrier_set())
78 78 , _frame_map(c->frame_map())
79 79 , _current_block(NULL)
80 80 , _pending_non_safepoint(NULL)
81 81 , _pending_non_safepoint_offset(0)
82 82 {
83 83 _slow_case_stubs = new CodeStubList();
84 84 }
85 85
86 86
87 87 LIR_Assembler::~LIR_Assembler() {
88 88 }
89 89
90 90
91 91 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
92 92 _slow_case_stubs->append(stub);
93 93 }
94 94
95 95
96 96 void LIR_Assembler::check_codespace() {
97 97 CodeSection* cs = _masm->code_section();
98 98 if (cs->remaining() < (int)(1*K)) {
99 99 BAILOUT("CodeBuffer overflow");
100 100 }
101 101 }
102 102
103 103
104 104 void LIR_Assembler::emit_code_stub(CodeStub* stub) {
105 105 _slow_case_stubs->append(stub);
106 106 }
107 107
108 108 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
109 109 for (int m = 0; m < stub_list->length(); m++) {
110 110 CodeStub* s = (*stub_list)[m];
111 111
112 112 check_codespace();
113 113 CHECK_BAILOUT();
114 114
115 115 #ifndef PRODUCT
116 116 if (CommentedAssembly) {
117 117 stringStream st;
118 118 s->print_name(&st);
119 119 st.print(" slow case");
120 120 _masm->block_comment(st.as_string());
121 121 }
122 122 #endif
123 123 s->emit_code(this);
124 124 #ifdef ASSERT
125 125 s->assert_no_unbound_labels();
126 126 #endif
127 127 }
128 128 }
129 129
130 130
131 131 void LIR_Assembler::emit_slow_case_stubs() {
132 132 emit_stubs(_slow_case_stubs);
133 133 }
134 134
135 135
136 136 bool LIR_Assembler::needs_icache(ciMethod* method) const {
137 137 return !method->is_static();
138 138 }
139 139
140 140
141 141 int LIR_Assembler::code_offset() const {
142 142 return _masm->offset();
143 143 }
144 144
145 145
146 146 address LIR_Assembler::pc() const {
147 147 return _masm->pc();
148 148 }
149 149
150 150
151 151 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
152 152 for (int i = 0; i < info_list->length(); i++) {
153 153 XHandlers* handlers = info_list->at(i)->exception_handlers();
154 154
155 155 for (int j = 0; j < handlers->length(); j++) {
156 156 XHandler* handler = handlers->handler_at(j);
157 157 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
158 158 assert(handler->entry_code() == NULL ||
159 159 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
160 160 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
161 161
162 162 if (handler->entry_pco() == -1) {
163 163 // entry code not emitted yet
164 164 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
165 165 handler->set_entry_pco(code_offset());
166 166 if (CommentedAssembly) {
167 167 _masm->block_comment("Exception adapter block");
168 168 }
169 169 emit_lir_list(handler->entry_code());
170 170 } else {
171 171 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
172 172 }
173 173
174 174 assert(handler->entry_pco() != -1, "must be set now");
175 175 }
176 176 }
177 177 }
178 178 }
179 179
180 180
181 181 void LIR_Assembler::emit_code(BlockList* hir) {
182 182 if (PrintLIR) {
183 183 print_LIR(hir);
184 184 }
185 185
186 186 int n = hir->length();
187 187 for (int i = 0; i < n; i++) {
188 188 emit_block(hir->at(i));
189 189 CHECK_BAILOUT();
190 190 }
191 191
192 192 flush_debug_info(code_offset());
193 193
194 194 DEBUG_ONLY(check_no_unbound_labels());
195 195 }
196 196
197 197
198 198 void LIR_Assembler::emit_block(BlockBegin* block) {
199 199 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
200 200 align_backward_branch_target();
201 201 }
202 202
203 203 // if this block is the start of an exception handler, record the
204 204 // PC offset of the first instruction for later construction of
205 205 // the ExceptionHandlerTable
206 206 if (block->is_set(BlockBegin::exception_entry_flag)) {
207 207 block->set_exception_handler_pco(code_offset());
208 208 }
209 209
210 210 #ifndef PRODUCT
211 211 if (PrintLIRWithAssembly) {
212 212 // don't print Phi's
213 213 InstructionPrinter ip(false);
214 214 block->print(ip);
215 215 }
216 216 #endif /* PRODUCT */
217 217
218 218 assert(block->lir() != NULL, "must have LIR");
219 219 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
220 220
221 221 #ifndef PRODUCT
222 222 if (CommentedAssembly) {
223 223 stringStream st;
224 224 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci());
225 225 _masm->block_comment(st.as_string());
226 226 }
227 227 #endif
228 228
229 229 emit_lir_list(block->lir());
230 230
231 231 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
232 232 }
233 233
234 234
235 235 void LIR_Assembler::emit_lir_list(LIR_List* list) {
236 236 peephole(list);
237 237
238 238 int n = list->length();
239 239 for (int i = 0; i < n; i++) {
240 240 LIR_Op* op = list->at(i);
241 241
242 242 check_codespace();
243 243 CHECK_BAILOUT();
244 244
245 245 #ifndef PRODUCT
246 246 if (CommentedAssembly) {
247 247 // Don't record out every op since that's too verbose. Print
248 248 // branches since they include block and stub names. Also print
249 249 // patching moves since they generate funny looking code.
250 250 if (op->code() == lir_branch ||
251 251 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
252 252 stringStream st;
253 253 op->print_on(&st);
254 254 _masm->block_comment(st.as_string());
255 255 }
256 256 }
257 257 if (PrintLIRWithAssembly) {
258 258 // print out the LIR operation followed by the resulting assembly
259 259 list->at(i)->print(); tty->cr();
260 260 }
261 261 #endif /* PRODUCT */
262 262
263 263 op->emit_code(this);
264 264
265 265 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
266 266 process_debug_info(op);
267 267 }
268 268
269 269 #ifndef PRODUCT
270 270 if (PrintLIRWithAssembly) {
271 271 _masm->code()->decode();
272 272 }
273 273 #endif /* PRODUCT */
274 274 }
275 275 }
276 276
277 277 #ifdef ASSERT
278 278 void LIR_Assembler::check_no_unbound_labels() {
279 279 CHECK_BAILOUT();
280 280
281 281 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
282 282 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
283 283 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
284 284 assert(false, "unbound label");
285 285 }
286 286 }
287 287 }
288 288 #endif
289 289
290 290 //----------------------------------debug info--------------------------------
291 291
292 292
293 293 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
↓ open down ↓ |
293 lines elided |
↑ open up ↑ |
294 294 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
295 295 int pc_offset = code_offset();
296 296 flush_debug_info(pc_offset);
297 297 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
298 298 if (info->exception_handlers() != NULL) {
299 299 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
300 300 }
301 301 }
302 302
303 303
304 -void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
304 +void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
305 305 flush_debug_info(pc_offset);
306 - cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
306 + cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
307 307 if (cinfo->exception_handlers() != NULL) {
308 308 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
309 309 }
310 310 }
311 311
312 312 static ValueStack* debug_info(Instruction* ins) {
313 313 StateSplit* ss = ins->as_StateSplit();
314 314 if (ss != NULL) return ss->state();
315 315 return ins->lock_stack();
316 316 }
317 317
318 318 void LIR_Assembler::process_debug_info(LIR_Op* op) {
319 319 Instruction* src = op->source();
320 320 if (src == NULL) return;
321 321 int pc_offset = code_offset();
322 322 if (_pending_non_safepoint == src) {
323 323 _pending_non_safepoint_offset = pc_offset;
324 324 return;
325 325 }
326 326 ValueStack* vstack = debug_info(src);
327 327 if (vstack == NULL) return;
328 328 if (_pending_non_safepoint != NULL) {
329 329 // Got some old debug info. Get rid of it.
330 330 if (_pending_non_safepoint->bci() == src->bci() &&
331 331 debug_info(_pending_non_safepoint) == vstack) {
332 332 _pending_non_safepoint_offset = pc_offset;
333 333 return;
334 334 }
335 335 if (_pending_non_safepoint_offset < pc_offset) {
336 336 record_non_safepoint_debug_info();
337 337 }
338 338 _pending_non_safepoint = NULL;
339 339 }
340 340 // Remember the debug info.
341 341 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
342 342 _pending_non_safepoint = src;
343 343 _pending_non_safepoint_offset = pc_offset;
344 344 }
345 345 }
346 346
347 347 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
348 348 // Return NULL if n is too large.
349 349 // Returns the caller_bci for the next-younger state, also.
350 350 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
351 351 ValueStack* t = s;
352 352 for (int i = 0; i < n; i++) {
353 353 if (t == NULL) break;
354 354 t = t->caller_state();
355 355 }
356 356 if (t == NULL) return NULL;
357 357 for (;;) {
358 358 ValueStack* tc = t->caller_state();
359 359 if (tc == NULL) return s;
360 360 t = tc;
361 361 bci_result = s->scope()->caller_bci();
362 362 s = s->caller_state();
363 363 }
364 364 }
365 365
366 366 void LIR_Assembler::record_non_safepoint_debug_info() {
367 367 int pc_offset = _pending_non_safepoint_offset;
368 368 ValueStack* vstack = debug_info(_pending_non_safepoint);
369 369 int bci = _pending_non_safepoint->bci();
370 370
371 371 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
372 372 assert(debug_info->recording_non_safepoints(), "sanity");
373 373
374 374 debug_info->add_non_safepoint(pc_offset);
375 375
376 376 // Visit scopes from oldest to youngest.
377 377 for (int n = 0; ; n++) {
378 378 int s_bci = bci;
379 379 ValueStack* s = nth_oldest(vstack, n, s_bci);
380 380 if (s == NULL) break;
381 381 IRScope* scope = s->scope();
382 382 //Always pass false for reexecute since these ScopeDescs are never used for deopt
383 383 debug_info->describe_scope(pc_offset, scope->method(), s_bci, false/*reexecute*/);
384 384 }
385 385
386 386 debug_info->end_non_safepoint(pc_offset);
387 387 }
388 388
389 389
390 390 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
391 391 add_debug_info_for_null_check(code_offset(), cinfo);
392 392 }
393 393
394 394 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
395 395 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
396 396 emit_code_stub(stub);
397 397 }
398 398
399 399 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
400 400 add_debug_info_for_div0(code_offset(), info);
401 401 }
402 402
403 403 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
404 404 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
405 405 emit_code_stub(stub);
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
406 406 }
407 407
408 408 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
409 409 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
410 410 }
411 411
412 412
413 413 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
414 414 verify_oop_map(op->info());
415 415
416 - // JSR 292
417 - // Preserve the SP over MethodHandle call sites.
418 - if (op->is_method_handle_invoke()) {
419 - preserve_SP(op);
420 - }
421 -
422 416 if (os::is_MP()) {
423 417 // must align calls sites, otherwise they can't be updated atomically on MP hardware
424 418 align_call(op->code());
425 419 }
426 420
427 421 // emit the static call stub stuff out of line
428 422 emit_static_call_stub();
429 423
430 424 switch (op->code()) {
431 425 case lir_static_call:
432 426 call(op, relocInfo::static_call_type);
433 427 break;
434 428 case lir_optvirtual_call:
435 429 case lir_dynamic_call:
436 430 call(op, relocInfo::opt_virtual_call_type);
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
437 431 break;
438 432 case lir_icvirtual_call:
439 433 ic_call(op);
440 434 break;
441 435 case lir_virtual_call:
442 436 vtable_call(op);
443 437 break;
444 438 default: ShouldNotReachHere();
445 439 }
446 440
447 - if (op->is_method_handle_invoke()) {
448 - restore_SP(op);
449 - }
450 -
451 441 #if defined(X86) && defined(TIERED)
452 442 // C2 leave fpu stack dirty clean it
453 443 if (UseSSE < 2) {
454 444 int i;
455 445 for ( i = 1; i <= 7 ; i++ ) {
456 446 ffree(i);
457 447 }
458 448 if (!op->result_opr()->is_float_kind()) {
459 449 ffree(0);
460 450 }
461 451 }
462 452 #endif // X86 && TIERED
463 453 }
464 454
465 455
466 456 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
467 457 _masm->bind (*(op->label()));
468 458 }
469 459
470 460
471 461 void LIR_Assembler::emit_op1(LIR_Op1* op) {
472 462 switch (op->code()) {
473 463 case lir_move:
474 464 if (op->move_kind() == lir_move_volatile) {
475 465 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
476 466 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
477 467 } else {
478 468 move_op(op->in_opr(), op->result_opr(), op->type(),
479 469 op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
480 470 }
481 471 break;
482 472
483 473 case lir_prefetchr:
484 474 prefetchr(op->in_opr());
485 475 break;
486 476
487 477 case lir_prefetchw:
488 478 prefetchw(op->in_opr());
489 479 break;
490 480
491 481 case lir_roundfp: {
492 482 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
493 483 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
494 484 break;
495 485 }
496 486
497 487 case lir_return:
498 488 return_op(op->in_opr());
499 489 break;
500 490
501 491 case lir_safepoint:
502 492 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
503 493 _masm->nop();
504 494 }
505 495 safepoint_poll(op->in_opr(), op->info());
506 496 break;
507 497
508 498 case lir_fxch:
509 499 fxch(op->in_opr()->as_jint());
510 500 break;
511 501
512 502 case lir_fld:
513 503 fld(op->in_opr()->as_jint());
514 504 break;
515 505
516 506 case lir_ffree:
517 507 ffree(op->in_opr()->as_jint());
518 508 break;
519 509
520 510 case lir_branch:
521 511 break;
522 512
523 513 case lir_push:
524 514 push(op->in_opr());
525 515 break;
526 516
527 517 case lir_pop:
528 518 pop(op->in_opr());
529 519 break;
530 520
531 521 case lir_neg:
532 522 negate(op->in_opr(), op->result_opr());
533 523 break;
534 524
535 525 case lir_leal:
536 526 leal(op->in_opr(), op->result_opr());
537 527 break;
538 528
539 529 case lir_null_check:
540 530 if (GenerateCompilerNullChecks) {
541 531 add_debug_info_for_null_check_here(op->info());
542 532
543 533 if (op->in_opr()->is_single_cpu()) {
544 534 _masm->null_check(op->in_opr()->as_register());
545 535 } else {
546 536 Unimplemented();
547 537 }
548 538 }
549 539 break;
550 540
551 541 case lir_monaddr:
552 542 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
553 543 break;
554 544
555 545 case lir_unwind:
556 546 unwind_op(op->in_opr());
557 547 break;
558 548
559 549 default:
560 550 Unimplemented();
561 551 break;
562 552 }
563 553 }
564 554
565 555
566 556 void LIR_Assembler::emit_op0(LIR_Op0* op) {
567 557 switch (op->code()) {
568 558 case lir_word_align: {
569 559 while (code_offset() % BytesPerWord != 0) {
570 560 _masm->nop();
571 561 }
572 562 break;
573 563 }
574 564
575 565 case lir_nop:
576 566 assert(op->info() == NULL, "not supported");
577 567 _masm->nop();
578 568 break;
579 569
580 570 case lir_label:
581 571 Unimplemented();
582 572 break;
583 573
584 574 case lir_build_frame:
585 575 build_frame();
586 576 break;
587 577
588 578 case lir_std_entry:
589 579 // init offsets
590 580 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
591 581 _masm->align(CodeEntryAlignment);
592 582 if (needs_icache(compilation()->method())) {
593 583 check_icache();
594 584 }
595 585 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
596 586 _masm->verified_entry();
597 587 build_frame();
598 588 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
599 589 break;
600 590
601 591 case lir_osr_entry:
602 592 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
603 593 osr_entry();
604 594 break;
605 595
606 596 case lir_24bit_FPU:
607 597 set_24bit_FPU();
608 598 break;
609 599
610 600 case lir_reset_FPU:
611 601 reset_FPU();
612 602 break;
613 603
614 604 case lir_breakpoint:
615 605 breakpoint();
616 606 break;
617 607
618 608 case lir_fpop_raw:
619 609 fpop();
620 610 break;
621 611
622 612 case lir_membar:
623 613 membar();
624 614 break;
625 615
626 616 case lir_membar_acquire:
627 617 membar_acquire();
628 618 break;
629 619
630 620 case lir_membar_release:
631 621 membar_release();
632 622 break;
633 623
634 624 case lir_get_thread:
635 625 get_thread(op->result_opr());
636 626 break;
637 627
638 628 default:
639 629 ShouldNotReachHere();
640 630 break;
641 631 }
642 632 }
643 633
644 634
645 635 void LIR_Assembler::emit_op2(LIR_Op2* op) {
646 636 switch (op->code()) {
647 637 case lir_cmp:
648 638 if (op->info() != NULL) {
649 639 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
650 640 "shouldn't be codeemitinfo for non-address operands");
651 641 add_debug_info_for_null_check_here(op->info()); // exception possible
652 642 }
653 643 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
654 644 break;
655 645
656 646 case lir_cmp_l2i:
657 647 case lir_cmp_fd2i:
658 648 case lir_ucmp_fd2i:
659 649 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
660 650 break;
661 651
662 652 case lir_cmove:
663 653 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
664 654 break;
665 655
666 656 case lir_shl:
667 657 case lir_shr:
668 658 case lir_ushr:
669 659 if (op->in_opr2()->is_constant()) {
670 660 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
671 661 } else {
672 662 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
673 663 }
674 664 break;
675 665
676 666 case lir_add:
677 667 case lir_sub:
678 668 case lir_mul:
679 669 case lir_mul_strictfp:
680 670 case lir_div:
681 671 case lir_div_strictfp:
682 672 case lir_rem:
683 673 assert(op->fpu_pop_count() < 2, "");
684 674 arith_op(
685 675 op->code(),
686 676 op->in_opr1(),
687 677 op->in_opr2(),
688 678 op->result_opr(),
689 679 op->info(),
690 680 op->fpu_pop_count() == 1);
691 681 break;
692 682
693 683 case lir_abs:
694 684 case lir_sqrt:
695 685 case lir_sin:
696 686 case lir_tan:
697 687 case lir_cos:
698 688 case lir_log:
699 689 case lir_log10:
700 690 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
701 691 break;
702 692
703 693 case lir_logic_and:
704 694 case lir_logic_or:
705 695 case lir_logic_xor:
706 696 logic_op(
707 697 op->code(),
708 698 op->in_opr1(),
709 699 op->in_opr2(),
710 700 op->result_opr());
711 701 break;
712 702
713 703 case lir_throw:
714 704 throw_op(op->in_opr1(), op->in_opr2(), op->info());
715 705 break;
716 706
717 707 default:
718 708 Unimplemented();
719 709 break;
720 710 }
721 711 }
722 712
723 713
724 714 void LIR_Assembler::build_frame() {
725 715 _masm->build_frame(initial_frame_size_in_bytes());
726 716 }
727 717
728 718
729 719 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
730 720 assert((src->is_single_fpu() && dest->is_single_stack()) ||
731 721 (src->is_double_fpu() && dest->is_double_stack()),
732 722 "round_fp: rounds register -> stack location");
733 723
734 724 reg2stack (src, dest, src->type(), pop_fpu_stack);
735 725 }
736 726
737 727
738 728 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
739 729 if (src->is_register()) {
740 730 if (dest->is_register()) {
741 731 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
742 732 reg2reg(src, dest);
743 733 } else if (dest->is_stack()) {
744 734 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
745 735 reg2stack(src, dest, type, pop_fpu_stack);
746 736 } else if (dest->is_address()) {
747 737 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
748 738 } else {
749 739 ShouldNotReachHere();
750 740 }
751 741
752 742 } else if (src->is_stack()) {
753 743 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
754 744 if (dest->is_register()) {
755 745 stack2reg(src, dest, type);
756 746 } else if (dest->is_stack()) {
757 747 stack2stack(src, dest, type);
758 748 } else {
759 749 ShouldNotReachHere();
760 750 }
761 751
762 752 } else if (src->is_constant()) {
763 753 if (dest->is_register()) {
764 754 const2reg(src, dest, patch_code, info); // patching is possible
765 755 } else if (dest->is_stack()) {
766 756 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
767 757 const2stack(src, dest);
768 758 } else if (dest->is_address()) {
769 759 assert(patch_code == lir_patch_none, "no patching allowed here");
770 760 const2mem(src, dest, type, info);
771 761 } else {
772 762 ShouldNotReachHere();
773 763 }
774 764
775 765 } else if (src->is_address()) {
776 766 mem2reg(src, dest, type, patch_code, info, unaligned);
777 767
778 768 } else {
779 769 ShouldNotReachHere();
780 770 }
781 771 }
782 772
783 773
784 774 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
785 775 #ifndef PRODUCT
786 776 if (VerifyOopMaps || VerifyOops) {
787 777 bool v = VerifyOops;
788 778 VerifyOops = true;
789 779 OopMapStream s(info->oop_map());
790 780 while (!s.is_done()) {
791 781 OopMapValue v = s.current();
792 782 if (v.is_oop()) {
793 783 VMReg r = v.reg();
794 784 if (!r->is_stack()) {
795 785 stringStream st;
796 786 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
797 787 #ifdef SPARC
798 788 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
799 789 #else
800 790 _masm->verify_oop(r->as_Register());
801 791 #endif
802 792 } else {
803 793 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
804 794 }
805 795 }
806 796 s.next();
807 797 }
808 798 VerifyOops = v;
809 799 }
810 800 #endif
811 801 }
↓ open down ↓ |
351 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX