1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)c1_LIRGenerator.cpp 1.24 08/11/07 15:47:10 JVM" 3 #endif 4 /* 5 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 # include "incls/_precompiled.incl" 29 # include "incls/_c1_LIRGenerator.cpp.incl" 30 31 #ifdef ASSERT 32 #define __ gen()->lir(__FILE__, __LINE__)-> 33 #else 34 #define __ gen()->lir()-> 35 #endif 36 37 38 void PhiResolverState::reset(int max_vregs) { 39 // Initialize array sizes 40 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL); 41 _virtual_operands.trunc_to(0); 42 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL); 43 _other_operands.trunc_to(0); 44 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL); 45 _vreg_table.trunc_to(0); 46 } 47 48 49 50 //-------------------------------------------------------------- 51 // PhiResolver 52 53 // Resolves cycles: 54 // 55 // r1 := r2 becomes temp := r1 56 // r2 := r1 r1 := r2 57 // r2 := temp 58 // and orders moves: 59 // 60 // r2 := r3 becomes r1 := r2 61 // r1 := r2 r2 := r3 62 63 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs) 64 : _gen(gen) 65 , _state(gen->resolver_state()) 66 , _temp(LIR_OprFact::illegalOpr) 67 { 68 // reinitialize the shared state arrays 69 _state.reset(max_vregs); 70 } 71 72 73 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) { 74 assert(src->is_valid(), ""); 75 assert(dest->is_valid(), ""); 76 __ move(src, dest); 77 } 78 79 80 void PhiResolver::move_temp_to(LIR_Opr dest) { 81 assert(_temp->is_valid(), ""); 82 emit_move(_temp, dest); 83 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr); 84 } 85 86 87 void PhiResolver::move_to_temp(LIR_Opr src) { 88 assert(_temp->is_illegal(), ""); 89 _temp = _gen->new_register(src->type()); 90 emit_move(src, _temp); 91 } 92 93 94 // Traverse assignment graph in depth first order and generate moves in post order 95 // ie. two assignments: b := c, a := b start with node c: 96 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a) 97 // Generates moves in this order: move b to a and move c to b 98 // ie. cycle a := b, b := a start with node a 99 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a) 100 // Generates moves in this order: move b to temp, move a to b, move temp to a 101 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) { 102 if (!dest->visited()) { 103 dest->set_visited(); 104 for (int i = dest->no_of_destinations()-1; i >= 0; i --) { 105 move(dest, dest->destination_at(i)); 106 } 107 } else if (!dest->start_node()) { 108 // cylce in graph detected 109 assert(_loop == NULL, "only one loop valid!"); 110 _loop = dest; 111 move_to_temp(src->operand()); 112 return; 113 } // else dest is a start node 114 115 if (!dest->assigned()) { 116 if (_loop == dest) { 117 move_temp_to(dest->operand()); 118 dest->set_assigned(); 119 } else if (src != NULL) { 120 emit_move(src->operand(), dest->operand()); 121 dest->set_assigned(); 122 } 123 } 124 } 125 126 127 PhiResolver::~PhiResolver() { 128 int i; 129 // resolve any cycles in moves from and to virtual registers 130 for (i = virtual_operands().length() - 1; i >= 0; i --) { 131 ResolveNode* node = virtual_operands()[i]; 132 if (!node->visited()) { 133 _loop = NULL; 134 move(NULL, node); 135 node->set_start_node(); 136 assert(_temp->is_illegal(), "move_temp_to() call missing"); 137 } 138 } 139 140 // generate move for move from non virtual register to abitrary destination 141 for (i = other_operands().length() - 1; i >= 0; i --) { 142 ResolveNode* node = other_operands()[i]; 143 for (int j = node->no_of_destinations() - 1; j >= 0; j --) { 144 emit_move(node->operand(), node->destination_at(j)->operand()); 145 } 146 } 147 } 148 149 150 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) { 151 ResolveNode* node; 152 if (opr->is_virtual()) { 153 int vreg_num = opr->vreg_number(); 154 node = vreg_table().at_grow(vreg_num, NULL); 155 assert(node == NULL || node->operand() == opr, ""); 156 if (node == NULL) { 157 node = new ResolveNode(opr); 158 vreg_table()[vreg_num] = node; 159 } 160 // Make sure that all virtual operands show up in the list when 161 // they are used as the source of a move. 162 if (source && !virtual_operands().contains(node)) { 163 virtual_operands().append(node); 164 } 165 } else { 166 assert(source, ""); 167 node = new ResolveNode(opr); 168 other_operands().append(node); 169 } 170 return node; 171 } 172 173 174 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) { 175 assert(dest->is_virtual(), ""); 176 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr(); 177 assert(src->is_valid(), ""); 178 assert(dest->is_valid(), ""); 179 ResolveNode* source = source_node(src); 180 source->append(destination_node(dest)); 181 } 182 183 184 //-------------------------------------------------------------- 185 // LIRItem 186 187 void LIRItem::set_result(LIR_Opr opr) { 188 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change"); 189 value()->set_operand(opr); 190 191 if (opr->is_virtual()) { 192 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL); 193 } 194 195 _result = opr; 196 } 197 198 void LIRItem::load_item() { 199 if (result()->is_illegal()) { 200 // update the items result 201 _result = value()->operand(); 202 } 203 if (!result()->is_register()) { 204 LIR_Opr reg = _gen->new_register(value()->type()); 205 __ move(result(), reg); 206 if (result()->is_constant()) { 207 _result = reg; 208 } else { 209 set_result(reg); 210 } 211 } 212 } 213 214 215 void LIRItem::load_for_store(BasicType type) { 216 if (_gen->can_store_as_constant(value(), type)) { 217 _result = value()->operand(); 218 if (!_result->is_constant()) { 219 _result = LIR_OprFact::value_type(value()->type()); 220 } 221 } else if (type == T_BYTE || type == T_BOOLEAN) { 222 load_byte_item(); 223 } else { 224 load_item(); 225 } 226 } 227 228 void LIRItem::load_item_force(LIR_Opr reg) { 229 LIR_Opr r = result(); 230 if (r != reg) { 231 if (r->type() != reg->type()) { 232 // moves between different types need an intervening spill slot 233 LIR_Opr tmp = _gen->force_to_spill(r, reg->type()); 234 __ move(tmp, reg); 235 } else { 236 __ move(r, reg); 237 } 238 _result = reg; 239 } 240 } 241 242 ciObject* LIRItem::get_jobject_constant() const { 243 ObjectType* oc = type()->as_ObjectType(); 244 if (oc) { 245 return oc->constant_value(); 246 } 247 return NULL; 248 } 249 250 251 jint LIRItem::get_jint_constant() const { 252 assert(is_constant() && value() != NULL, ""); 253 assert(type()->as_IntConstant() != NULL, "type check"); 254 return type()->as_IntConstant()->value(); 255 } 256 257 258 jint LIRItem::get_address_constant() const { 259 assert(is_constant() && value() != NULL, ""); 260 assert(type()->as_AddressConstant() != NULL, "type check"); 261 return type()->as_AddressConstant()->value(); 262 } 263 264 265 jfloat LIRItem::get_jfloat_constant() const { 266 assert(is_constant() && value() != NULL, ""); 267 assert(type()->as_FloatConstant() != NULL, "type check"); 268 return type()->as_FloatConstant()->value(); 269 } 270 271 272 jdouble LIRItem::get_jdouble_constant() const { 273 assert(is_constant() && value() != NULL, ""); 274 assert(type()->as_DoubleConstant() != NULL, "type check"); 275 return type()->as_DoubleConstant()->value(); 276 } 277 278 279 jlong LIRItem::get_jlong_constant() const { 280 assert(is_constant() && value() != NULL, ""); 281 assert(type()->as_LongConstant() != NULL, "type check"); 282 return type()->as_LongConstant()->value(); 283 } 284 285 286 287 //-------------------------------------------------------------- 288 289 290 void LIRGenerator::init() { 291 _bs = Universe::heap()->barrier_set(); 292 } 293 294 295 void LIRGenerator::block_do_prolog(BlockBegin* block) { 296 #ifndef PRODUCT 297 if (PrintIRWithLIR) { 298 block->print(); 299 } 300 #endif 301 302 // set up the list of LIR instructions 303 assert(block->lir() == NULL, "LIR list already computed for this block"); 304 _lir = new LIR_List(compilation(), block); 305 block->set_lir(_lir); 306 307 __ branch_destination(block->label()); 308 309 if (LIRTraceExecution && 310 Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() && 311 !block->is_set(BlockBegin::exception_entry_flag)) { 312 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst"); 313 trace_block_entry(block); 314 } 315 } 316 317 318 void LIRGenerator::block_do_epilog(BlockBegin* block) { 319 #ifndef PRODUCT 320 if (PrintIRWithLIR) { 321 tty->cr(); 322 } 323 #endif 324 325 // LIR_Opr for unpinned constants shouldn't be referenced by other 326 // blocks so clear them out after processing the block. 327 for (int i = 0; i < _unpinned_constants.length(); i++) { 328 _unpinned_constants.at(i)->clear_operand(); 329 } 330 _unpinned_constants.trunc_to(0); 331 332 // clear our any registers for other local constants 333 _constants.trunc_to(0); 334 _reg_for_constants.trunc_to(0); 335 } 336 337 338 void LIRGenerator::block_do(BlockBegin* block) { 339 CHECK_BAILOUT(); 340 341 block_do_prolog(block); 342 set_block(block); 343 344 for (Instruction* instr = block; instr != NULL; instr = instr->next()) { 345 if (instr->is_pinned()) do_root(instr); 346 } 347 348 set_block(NULL); 349 block_do_epilog(block); 350 } 351 352 353 //-------------------------LIRGenerator----------------------------- 354 355 // This is where the tree-walk starts; instr must be root; 356 void LIRGenerator::do_root(Value instr) { 357 CHECK_BAILOUT(); 358 359 InstructionMark im(compilation(), instr); 360 361 assert(instr->is_pinned(), "use only with roots"); 362 assert(instr->subst() == instr, "shouldn't have missed substitution"); 363 364 instr->visit(this); 365 366 assert(!instr->has_uses() || instr->operand()->is_valid() || 367 instr->as_Constant() != NULL || bailed_out(), "invalid item set"); 368 } 369 370 371 // This is called for each node in tree; the walk stops if a root is reached 372 void LIRGenerator::walk(Value instr) { 373 InstructionMark im(compilation(), instr); 374 //stop walk when encounter a root 375 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) { 376 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited"); 377 } else { 378 assert(instr->subst() == instr, "shouldn't have missed substitution"); 379 instr->visit(this); 380 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use"); 381 } 382 } 383 384 385 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { 386 int index; 387 Value value; 388 for_each_stack_value(state, index, value) { 389 assert(value->subst() == value, "missed substition"); 390 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 391 walk(value); 392 assert(value->operand()->is_valid(), "must be evaluated now"); 393 } 394 } 395 ValueStack* s = state; 396 int bci = x->bci(); 397 for_each_state(s) { 398 IRScope* scope = s->scope(); 399 ciMethod* method = scope->method(); 400 401 MethodLivenessResult liveness = method->liveness_at_bci(bci); 402 if (bci == SynchronizationEntryBCI) { 403 if (x->as_ExceptionObject() || x->as_Throw()) { 404 // all locals are dead on exit from the synthetic unlocker 405 liveness.clear(); 406 } else { 407 assert(x->as_MonitorEnter(), "only other case is MonitorEnter"); 408 } 409 } 410 if (!liveness.is_valid()) { 411 // Degenerate or breakpointed method. 412 bailout("Degenerate or breakpointed method"); 413 } else { 414 assert((int)liveness.size() == s->locals_size(), "error in use of liveness"); 415 for_each_local_value(s, index, value) { 416 assert(value->subst() == value, "missed substition"); 417 if (liveness.at(index) && !value->type()->is_illegal()) { 418 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 419 walk(value); 420 assert(value->operand()->is_valid(), "must be evaluated now"); 421 } 422 } else { 423 // NULL out this local so that linear scan can assume that all non-NULL values are live. 424 s->invalidate_local(index); 425 } 426 } 427 } 428 bci = scope->caller_bci(); 429 } 430 431 return new CodeEmitInfo(x->bci(), state, ignore_xhandler ? NULL : x->exception_handlers()); 432 } 433 434 435 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { 436 return state_for(x, x->lock_stack()); 437 } 438 439 440 void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) { 441 if (!obj->is_loaded() || PatchALot) { 442 assert(info != NULL, "info must be set if class is not loaded"); 443 __ oop2reg_patch(NULL, r, info); 444 } else { 445 // no patching needed 446 __ oop2reg(obj->encoding(), r); 447 } 448 } 449 450 451 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, 452 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { 453 CodeStub* stub = new RangeCheckStub(range_check_info, index); 454 if (index->is_constant()) { 455 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), 456 index->as_jint(), null_check_info); 457 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch 458 } else { 459 cmp_reg_mem(lir_cond_aboveEqual, index, array, 460 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info); 461 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch 462 } 463 } 464 465 466 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { 467 CodeStub* stub = new RangeCheckStub(info, index, true); 468 if (index->is_constant()) { 469 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info); 470 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch 471 } else { 472 cmp_reg_mem(lir_cond_aboveEqual, index, buffer, 473 java_nio_Buffer::limit_offset(), T_INT, info); 474 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch 475 } 476 __ move(index, result); 477 } 478 479 480 // increment a counter returning the incremented value 481 LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) { 482 LIR_Address* counter = new LIR_Address(base, offset, T_INT); 483 LIR_Opr result = new_register(T_INT); 484 __ load(counter, result); 485 __ add(result, LIR_OprFact::intConst(increment), result); 486 __ store(result, counter); 487 return result; 488 } 489 490 491 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) { 492 LIR_Opr result_op = result; 493 LIR_Opr left_op = left; 494 LIR_Opr right_op = right; 495 496 if (TwoOperandLIRForm && left_op != result_op) { 497 assert(right_op != result_op, "malformed"); 498 __ move(left_op, result_op); 499 left_op = result_op; 500 } 501 502 switch(code) { 503 case Bytecodes::_dadd: 504 case Bytecodes::_fadd: 505 case Bytecodes::_ladd: 506 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break; 507 case Bytecodes::_fmul: 508 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break; 509 510 case Bytecodes::_dmul: 511 { 512 if (is_strictfp) { 513 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break; 514 } else { 515 __ mul(left_op, right_op, result_op); break; 516 } 517 } 518 break; 519 520 case Bytecodes::_imul: 521 { 522 bool did_strength_reduce = false; 523 524 if (right->is_constant()) { 525 int c = right->as_jint(); 526 if (is_power_of_2(c)) { 527 // do not need tmp here 528 __ shift_left(left_op, exact_log2(c), result_op); 529 did_strength_reduce = true; 530 } else { 531 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op); 532 } 533 } 534 // we couldn't strength reduce so just emit the multiply 535 if (!did_strength_reduce) { 536 __ mul(left_op, right_op, result_op); 537 } 538 } 539 break; 540 541 case Bytecodes::_dsub: 542 case Bytecodes::_fsub: 543 case Bytecodes::_lsub: 544 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break; 545 546 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break; 547 // ldiv and lrem are implemented with a direct runtime call 548 549 case Bytecodes::_ddiv: 550 { 551 if (is_strictfp) { 552 __ div_strictfp (left_op, right_op, result_op, tmp_op); break; 553 } else { 554 __ div (left_op, right_op, result_op); break; 555 } 556 } 557 break; 558 559 case Bytecodes::_drem: 560 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break; 561 562 default: ShouldNotReachHere(); 563 } 564 } 565 566 567 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) { 568 arithmetic_op(code, result, left, right, false, tmp); 569 } 570 571 572 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) { 573 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info); 574 } 575 576 577 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) { 578 arithmetic_op(code, result, left, right, is_strictfp, tmp); 579 } 580 581 582 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) { 583 if (TwoOperandLIRForm && value != result_op) { 584 assert(count != result_op, "malformed"); 585 __ move(value, result_op); 586 value = result_op; 587 } 588 589 assert(count->is_constant() || count->is_register(), "must be"); 590 switch(code) { 591 case Bytecodes::_ishl: 592 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break; 593 case Bytecodes::_ishr: 594 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break; 595 case Bytecodes::_iushr: 596 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break; 597 default: ShouldNotReachHere(); 598 } 599 } 600 601 602 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) { 603 if (TwoOperandLIRForm && left_op != result_op) { 604 assert(right_op != result_op, "malformed"); 605 __ move(left_op, result_op); 606 left_op = result_op; 607 } 608 609 switch(code) { 610 case Bytecodes::_iand: 611 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break; 612 613 case Bytecodes::_ior: 614 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break; 615 616 case Bytecodes::_ixor: 617 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break; 618 619 default: ShouldNotReachHere(); 620 } 621 } 622 623 624 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { 625 if (!GenerateSynchronizationCode) return; 626 // for slow path, use debug info for state after successful locking 627 CodeStub* slow_path = new MonitorEnterStub(object, lock, info); 628 __ load_stack_address_monitor(monitor_no, lock); 629 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter 630 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception); 631 } 632 633 634 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, int monitor_no) { 635 if (!GenerateSynchronizationCode) return; 636 // setup registers 637 LIR_Opr hdr = lock; 638 lock = new_hdr; 639 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no); 640 __ load_stack_address_monitor(monitor_no, lock); 641 __ unlock_object(hdr, object, lock, slow_path); 642 } 643 644 645 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { 646 jobject2reg_with_patching(klass_reg, klass, info); 647 // If klass is not loaded we do not know if the klass has finalizers: 648 if (UseFastNewInstance && klass->is_loaded() 649 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { 650 651 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id; 652 653 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id); 654 655 assert(klass->is_loaded(), "must be loaded"); 656 // allocate space for instance 657 assert(klass->size_helper() >= 0, "illegal instance size"); 658 const int instance_size = align_object_size(klass->size_helper()); 659 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, 660 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path); 661 } else { 662 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id); 663 __ branch(lir_cond_always, T_ILLEGAL, slow_path); 664 __ branch_destination(slow_path->continuation()); 665 } 666 } 667 668 669 static bool is_constant_zero(Instruction* inst) { 670 IntConstant* c = inst->type()->as_IntConstant(); 671 if (c) { 672 return (c->value() == 0); 673 } 674 return false; 675 } 676 677 678 static bool positive_constant(Instruction* inst) { 679 IntConstant* c = inst->type()->as_IntConstant(); 680 if (c) { 681 return (c->value() >= 0); 682 } 683 return false; 684 } 685 686 687 static ciArrayKlass* as_array_klass(ciType* type) { 688 if (type != NULL && type->is_array_klass() && type->is_loaded()) { 689 return (ciArrayKlass*)type; 690 } else { 691 return NULL; 692 } 693 } 694 695 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) { 696 Instruction* src = x->argument_at(0); 697 Instruction* src_pos = x->argument_at(1); 698 Instruction* dst = x->argument_at(2); 699 Instruction* dst_pos = x->argument_at(3); 700 Instruction* length = x->argument_at(4); 701 702 // first try to identify the likely type of the arrays involved 703 ciArrayKlass* expected_type = NULL; 704 bool is_exact = false; 705 { 706 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type()); 707 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type()); 708 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type()); 709 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type()); 710 if (src_exact_type != NULL && src_exact_type == dst_exact_type) { 711 // the types exactly match so the type is fully known 712 is_exact = true; 713 expected_type = src_exact_type; 714 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) { 715 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type; 716 ciArrayKlass* src_type = NULL; 717 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) { 718 src_type = (ciArrayKlass*) src_exact_type; 719 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) { 720 src_type = (ciArrayKlass*) src_declared_type; 721 } 722 if (src_type != NULL) { 723 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) { 724 is_exact = true; 725 expected_type = dst_type; 726 } 727 } 728 } 729 // at least pass along a good guess 730 if (expected_type == NULL) expected_type = dst_exact_type; 731 if (expected_type == NULL) expected_type = src_declared_type; 732 if (expected_type == NULL) expected_type = dst_declared_type; 733 } 734 735 // if a probable array type has been identified, figure out if any 736 // of the required checks for a fast case can be elided. 737 int flags = LIR_OpArrayCopy::all_flags; 738 if (expected_type != NULL) { 739 // try to skip null checks 740 if (src->as_NewArray() != NULL) 741 flags &= ~LIR_OpArrayCopy::src_null_check; 742 if (dst->as_NewArray() != NULL) 743 flags &= ~LIR_OpArrayCopy::dst_null_check; 744 745 // check from incoming constant values 746 if (positive_constant(src_pos)) 747 flags &= ~LIR_OpArrayCopy::src_pos_positive_check; 748 if (positive_constant(dst_pos)) 749 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check; 750 if (positive_constant(length)) 751 flags &= ~LIR_OpArrayCopy::length_positive_check; 752 753 // see if the range check can be elided, which might also imply 754 // that src or dst is non-null. 755 ArrayLength* al = length->as_ArrayLength(); 756 if (al != NULL) { 757 if (al->array() == src) { 758 // it's the length of the source array 759 flags &= ~LIR_OpArrayCopy::length_positive_check; 760 flags &= ~LIR_OpArrayCopy::src_null_check; 761 if (is_constant_zero(src_pos)) 762 flags &= ~LIR_OpArrayCopy::src_range_check; 763 } 764 if (al->array() == dst) { 765 // it's the length of the destination array 766 flags &= ~LIR_OpArrayCopy::length_positive_check; 767 flags &= ~LIR_OpArrayCopy::dst_null_check; 768 if (is_constant_zero(dst_pos)) 769 flags &= ~LIR_OpArrayCopy::dst_range_check; 770 } 771 } 772 if (is_exact) { 773 flags &= ~LIR_OpArrayCopy::type_check; 774 } 775 } 776 777 if (src == dst) { 778 // moving within a single array so no type checks are needed 779 if (flags & LIR_OpArrayCopy::type_check) { 780 flags &= ~LIR_OpArrayCopy::type_check; 781 } 782 } 783 *flagsp = flags; 784 *expected_typep = (ciArrayKlass*)expected_type; 785 } 786 787 788 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) { 789 assert(opr->is_register(), "why spill if item is not register?"); 790 791 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) { 792 LIR_Opr result = new_register(T_FLOAT); 793 set_vreg_flag(result, must_start_in_memory); 794 assert(opr->is_register(), "only a register can be spilled"); 795 assert(opr->value_type()->is_float(), "rounding only for floats available"); 796 __ roundfp(opr, LIR_OprFact::illegalOpr, result); 797 return result; 798 } 799 return opr; 800 } 801 802 803 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) { 804 assert(type2size[t] == type2size[value->type()], "size mismatch"); 805 if (!value->is_register()) { 806 // force into a register 807 LIR_Opr r = new_register(value->type()); 808 __ move(value, r); 809 value = r; 810 } 811 812 // create a spill location 813 LIR_Opr tmp = new_register(t); 814 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory); 815 816 // move from register to spill 817 __ move(value, tmp); 818 return tmp; 819 } 820 821 822 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { 823 if (if_instr->should_profile()) { 824 ciMethod* method = if_instr->profiled_method(); 825 assert(method != NULL, "method should be set if branch is profiled"); 826 ciMethodData* md = method->method_data(); 827 if (md == NULL) { 828 bailout("out of memory building methodDataOop"); 829 return; 830 } 831 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); 832 assert(data != NULL, "must have profiling data"); 833 assert(data->is_BranchData(), "need BranchData for two-way branches"); 834 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); 835 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); 836 LIR_Opr md_reg = new_register(T_OBJECT); 837 __ move(LIR_OprFact::oopConst(md->encoding()), md_reg); 838 LIR_Opr data_offset_reg = new_register(T_INT); 839 __ cmove(lir_cond(cond), 840 LIR_OprFact::intConst(taken_count_offset), 841 LIR_OprFact::intConst(not_taken_count_offset), 842 data_offset_reg); 843 LIR_Opr data_reg = new_register(T_INT); 844 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT); 845 __ move(LIR_OprFact::address(data_addr), data_reg); 846 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); 847 // Use leal instead of add to avoid destroying condition codes on x86 848 __ leal(LIR_OprFact::address(fake_incr_value), data_reg); 849 __ move(data_reg, LIR_OprFact::address(data_addr)); 850 } 851 } 852 853 854 // Phi technique: 855 // This is about passing live values from one basic block to the other. 856 // In code generated with Java it is rather rare that more than one 857 // value is on the stack from one basic block to the other. 858 // We optimize our technique for efficient passing of one value 859 // (of type long, int, double..) but it can be extended. 860 // When entering or leaving a basic block, all registers and all spill 861 // slots are release and empty. We use the released registers 862 // and spill slots to pass the live values from one block 863 // to the other. The topmost value, i.e., the value on TOS of expression 864 // stack is passed in registers. All other values are stored in spilling 865 // area. Every Phi has an index which designates its spill slot 866 // At exit of a basic block, we fill the register(s) and spill slots. 867 // At entry of a basic block, the block_prolog sets up the content of phi nodes 868 // and locks necessary registers and spilling slots. 869 870 871 // move current value to referenced phi function 872 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) { 873 Phi* phi = sux_val->as_Phi(); 874 // cur_val can be null without phi being null in conjunction with inlining 875 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) { 876 LIR_Opr operand = cur_val->operand(); 877 if (cur_val->operand()->is_illegal()) { 878 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL, 879 "these can be produced lazily"); 880 operand = operand_for_instruction(cur_val); 881 } 882 resolver->move(operand, operand_for_instruction(phi)); 883 } 884 } 885 886 887 // Moves all stack values into their PHI position 888 void LIRGenerator::move_to_phi(ValueStack* cur_state) { 889 BlockBegin* bb = block(); 890 if (bb->number_of_sux() == 1) { 891 BlockBegin* sux = bb->sux_at(0); 892 assert(sux->number_of_preds() > 0, "invalid CFG"); 893 894 // a block with only one predecessor never has phi functions 895 if (sux->number_of_preds() > 1) { 896 int max_phis = cur_state->stack_size() + cur_state->locals_size(); 897 PhiResolver resolver(this, _virtual_register_number + max_phis * 2); 898 899 ValueStack* sux_state = sux->state(); 900 Value sux_value; 901 int index; 902 903 for_each_stack_value(sux_state, index, sux_value) { 904 move_to_phi(&resolver, cur_state->stack_at(index), sux_value); 905 } 906 907 // Inlining may cause the local state not to match up, so walk up 908 // the caller state until we get to the same scope as the 909 // successor and then start processing from there. 910 while (cur_state->scope() != sux_state->scope()) { 911 cur_state = cur_state->caller_state(); 912 assert(cur_state != NULL, "scopes don't match up"); 913 } 914 915 for_each_local_value(sux_state, index, sux_value) { 916 move_to_phi(&resolver, cur_state->local_at(index), sux_value); 917 } 918 919 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal"); 920 } 921 } 922 } 923 924 925 LIR_Opr LIRGenerator::new_register(BasicType type) { 926 int vreg = _virtual_register_number; 927 // add a little fudge factor for the bailout, since the bailout is 928 // only checked periodically. This gives a few extra registers to 929 // hand out before we really run out, which helps us keep from 930 // tripping over assertions. 931 if (vreg + 20 >= LIR_OprDesc::vreg_max) { 932 bailout("out of virtual registers"); 933 if (vreg + 2 >= LIR_OprDesc::vreg_max) { 934 // wrap it around 935 _virtual_register_number = LIR_OprDesc::vreg_base; 936 } 937 } 938 _virtual_register_number += 1; 939 if (type == T_ADDRESS) type = T_INT; 940 return LIR_OprFact::virtual_register(vreg, type); 941 } 942 943 944 // Try to lock using register in hint 945 LIR_Opr LIRGenerator::rlock(Value instr) { 946 return new_register(instr->type()); 947 } 948 949 950 // does an rlock and sets result 951 LIR_Opr LIRGenerator::rlock_result(Value x) { 952 LIR_Opr reg = rlock(x); 953 set_result(x, reg); 954 return reg; 955 } 956 957 958 // does an rlock and sets result 959 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) { 960 LIR_Opr reg; 961 switch (type) { 962 case T_BYTE: 963 case T_BOOLEAN: 964 reg = rlock_byte(type); 965 break; 966 default: 967 reg = rlock(x); 968 break; 969 } 970 971 set_result(x, reg); 972 return reg; 973 } 974 975 976 //--------------------------------------------------------------------- 977 ciObject* LIRGenerator::get_jobject_constant(Value value) { 978 ObjectType* oc = value->type()->as_ObjectType(); 979 if (oc) { 980 return oc->constant_value(); 981 } 982 return NULL; 983 } 984 985 986 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) { 987 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block"); 988 assert(block()->next() == x, "ExceptionObject must be first instruction of block"); 989 990 // no moves are created for phi functions at the begin of exception 991 // handlers, so assign operands manually here 992 for_each_phi_fun(block(), phi, 993 operand_for_instruction(phi)); 994 995 LIR_Opr thread_reg = getThreadPointer(); 996 __ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 997 exceptionOopOpr()); 998 __ move(LIR_OprFact::oopConst(NULL), 999 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 1000 __ move(LIR_OprFact::oopConst(NULL), 1001 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 1002 1003 LIR_Opr result = new_register(T_OBJECT); 1004 __ move(exceptionOopOpr(), result); 1005 set_result(x, result); 1006 } 1007 1008 1009 //---------------------------------------------------------------------- 1010 //---------------------------------------------------------------------- 1011 //---------------------------------------------------------------------- 1012 //---------------------------------------------------------------------- 1013 // visitor functions 1014 //---------------------------------------------------------------------- 1015 //---------------------------------------------------------------------- 1016 //---------------------------------------------------------------------- 1017 //---------------------------------------------------------------------- 1018 1019 void LIRGenerator::do_Phi(Phi* x) { 1020 // phi functions are never visited directly 1021 ShouldNotReachHere(); 1022 } 1023 1024 1025 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined. 1026 void LIRGenerator::do_Constant(Constant* x) { 1027 if (x->state() != NULL) { 1028 // Any constant with a ValueStack requires patching so emit the patch here 1029 LIR_Opr reg = rlock_result(x); 1030 CodeEmitInfo* info = state_for(x, x->state()); 1031 __ oop2reg_patch(NULL, reg, info); 1032 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) { 1033 if (!x->is_pinned()) { 1034 // unpinned constants are handled specially so that they can be 1035 // put into registers when they are used multiple times within a 1036 // block. After the block completes their operand will be 1037 // cleared so that other blocks can't refer to that register. 1038 set_result(x, load_constant(x)); 1039 } else { 1040 LIR_Opr res = x->operand(); 1041 if (!res->is_valid()) { 1042 res = LIR_OprFact::value_type(x->type()); 1043 } 1044 if (res->is_constant()) { 1045 LIR_Opr reg = rlock_result(x); 1046 __ move(res, reg); 1047 } else { 1048 set_result(x, res); 1049 } 1050 } 1051 } else { 1052 set_result(x, LIR_OprFact::value_type(x->type())); 1053 } 1054 } 1055 1056 1057 void LIRGenerator::do_Local(Local* x) { 1058 // operand_for_instruction has the side effect of setting the result 1059 // so there's no need to do it here. 1060 operand_for_instruction(x); 1061 } 1062 1063 1064 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) { 1065 Unimplemented(); 1066 } 1067 1068 1069 void LIRGenerator::do_Return(Return* x) { 1070 if (DTraceMethodProbes) { 1071 BasicTypeList signature; 1072 signature.append(T_INT); // thread 1073 signature.append(T_OBJECT); // methodOop 1074 LIR_OprList* args = new LIR_OprList(); 1075 args->append(getThreadPointer()); 1076 LIR_Opr meth = new_register(T_OBJECT); 1077 __ oop2reg(method()->encoding(), meth); 1078 args->append(meth); 1079 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); 1080 } 1081 1082 if (x->type()->is_void()) { 1083 __ return_op(LIR_OprFact::illegalOpr); 1084 } else { 1085 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true); 1086 LIRItem result(x->result(), this); 1087 1088 result.load_item_force(reg); 1089 __ return_op(result.result()); 1090 } 1091 set_no_result(x); 1092 } 1093 1094 1095 // Example: object.getClass () 1096 void LIRGenerator::do_getClass(Intrinsic* x) { 1097 assert(x->number_of_arguments() == 1, "wrong type"); 1098 1099 LIRItem rcvr(x->argument_at(0), this); 1100 rcvr.load_item(); 1101 LIR_Opr result = rlock_result(x); 1102 1103 // need to perform the null check on the rcvr 1104 CodeEmitInfo* info = NULL; 1105 if (x->needs_null_check()) { 1106 info = state_for(x, x->state()->copy_locks()); 1107 } 1108 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info); 1109 __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + 1110 klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result); 1111 } 1112 1113 1114 // Example: Thread.currentThread() 1115 void LIRGenerator::do_currentThread(Intrinsic* x) { 1116 assert(x->number_of_arguments() == 0, "wrong type"); 1117 LIR_Opr reg = rlock_result(x); 1118 __ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); 1119 } 1120 1121 1122 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { 1123 assert(x->number_of_arguments() == 1, "wrong type"); 1124 LIRItem receiver(x->argument_at(0), this); 1125 1126 receiver.load_item(); 1127 BasicTypeList signature; 1128 signature.append(T_OBJECT); // receiver 1129 LIR_OprList* args = new LIR_OprList(); 1130 args->append(receiver.result()); 1131 CodeEmitInfo* info = state_for(x, x->state()); 1132 call_runtime(&signature, args, 1133 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)), 1134 voidType, info); 1135 1136 set_no_result(x); 1137 } 1138 1139 1140 //------------------------local access-------------------------------------- 1141 1142 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) { 1143 if (x->operand()->is_illegal()) { 1144 Constant* c = x->as_Constant(); 1145 if (c != NULL) { 1146 x->set_operand(LIR_OprFact::value_type(c->type())); 1147 } else { 1148 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local"); 1149 // allocate a virtual register for this local or phi 1150 x->set_operand(rlock(x)); 1151 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL); 1152 } 1153 } 1154 return x->operand(); 1155 } 1156 1157 1158 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) { 1159 if (opr->is_virtual()) { 1160 return instruction_for_vreg(opr->vreg_number()); 1161 } 1162 return NULL; 1163 } 1164 1165 1166 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) { 1167 if (reg_num < _instruction_for_operand.length()) { 1168 return _instruction_for_operand.at(reg_num); 1169 } 1170 return NULL; 1171 } 1172 1173 1174 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) { 1175 if (_vreg_flags.size_in_bits() == 0) { 1176 BitMap2D temp(100, num_vreg_flags); 1177 temp.clear(); 1178 _vreg_flags = temp; 1179 } 1180 _vreg_flags.at_put_grow(vreg_num, f, true); 1181 } 1182 1183 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) { 1184 if (!_vreg_flags.is_valid_index(vreg_num, f)) { 1185 return false; 1186 } 1187 return _vreg_flags.at(vreg_num, f); 1188 } 1189 1190 1191 // Block local constant handling. This code is useful for keeping 1192 // unpinned constants and constants which aren't exposed in the IR in 1193 // registers. Unpinned Constant instructions have their operands 1194 // cleared when the block is finished so that other blocks can't end 1195 // up referring to their registers. 1196 1197 LIR_Opr LIRGenerator::load_constant(Constant* x) { 1198 assert(!x->is_pinned(), "only for unpinned constants"); 1199 _unpinned_constants.append(x); 1200 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr()); 1201 } 1202 1203 1204 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) { 1205 BasicType t = c->type(); 1206 for (int i = 0; i < _constants.length(); i++) { 1207 LIR_Const* other = _constants.at(i); 1208 if (t == other->type()) { 1209 switch (t) { 1210 case T_INT: 1211 case T_FLOAT: 1212 if (c->as_jint_bits() != other->as_jint_bits()) continue; 1213 break; 1214 case T_LONG: 1215 case T_DOUBLE: 1216 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue; 1217 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue; 1218 break; 1219 case T_OBJECT: 1220 if (c->as_jobject() != other->as_jobject()) continue; 1221 break; 1222 } 1223 return _reg_for_constants.at(i); 1224 } 1225 } 1226 1227 LIR_Opr result = new_register(t); 1228 __ move((LIR_Opr)c, result); 1229 _constants.append(c); 1230 _reg_for_constants.append(result); 1231 return result; 1232 } 1233 1234 // Various barriers 1235 1236 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { 1237 // Do the pre-write barrier, if any. 1238 switch (_bs->kind()) { 1239 #ifndef SERIALGC 1240 case BarrierSet::G1SATBCT: 1241 case BarrierSet::G1SATBCTLogging: 1242 G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info); 1243 break; 1244 #endif // SERIALGC 1245 case BarrierSet::CardTableModRef: 1246 case BarrierSet::CardTableExtension: 1247 // No pre barriers 1248 break; 1249 case BarrierSet::ModRef: 1250 case BarrierSet::Other: 1251 // No pre barriers 1252 break; 1253 default : 1254 ShouldNotReachHere(); 1255 1256 } 1257 } 1258 1259 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1260 switch (_bs->kind()) { 1261 #ifndef SERIALGC 1262 case BarrierSet::G1SATBCT: 1263 case BarrierSet::G1SATBCTLogging: 1264 G1SATBCardTableModRef_post_barrier(addr, new_val); 1265 break; 1266 #endif // SERIALGC 1267 case BarrierSet::CardTableModRef: 1268 case BarrierSet::CardTableExtension: 1269 CardTableModRef_post_barrier(addr, new_val); 1270 break; 1271 case BarrierSet::ModRef: 1272 case BarrierSet::Other: 1273 // No post barriers 1274 break; 1275 default : 1276 ShouldNotReachHere(); 1277 } 1278 } 1279 1280 //////////////////////////////////////////////////////////////////////// 1281 #ifndef SERIALGC 1282 1283 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { 1284 if (G1DisablePreBarrier) return; 1285 1286 // First we test whether marking is in progress. 1287 BasicType flag_type; 1288 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 1289 flag_type = T_INT; 1290 } else { 1291 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, 1292 "Assumption"); 1293 flag_type = T_BYTE; 1294 } 1295 LIR_Opr thrd = getThreadPointer(); 1296 LIR_Address* mark_active_flag_addr = 1297 new LIR_Address(thrd, 1298 in_bytes(JavaThread::satb_mark_queue_offset() + 1299 PtrQueue::byte_offset_of_active()), 1300 flag_type); 1301 // Read the marking-in-progress flag. 1302 LIR_Opr flag_val = new_register(T_INT); 1303 __ load(mark_active_flag_addr, flag_val); 1304 1305 LabelObj* start_store = new LabelObj(); 1306 1307 LIR_PatchCode pre_val_patch_code = 1308 patch ? lir_patch_normal : lir_patch_none; 1309 1310 LIR_Opr pre_val = new_register(T_OBJECT); 1311 1312 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 1313 if (!addr_opr->is_address()) { 1314 assert(addr_opr->is_register(), "must be"); 1315 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT)); 1316 } 1317 CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, 1318 info); 1319 __ branch(lir_cond_notEqual, T_INT, slow); 1320 __ branch_destination(slow->continuation()); 1321 } 1322 1323 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1324 if (G1DisablePostBarrier) return; 1325 1326 // If the "new_val" is a constant NULL, no barrier is necessary. 1327 if (new_val->is_constant() && 1328 new_val->as_constant_ptr()->as_jobject() == NULL) return; 1329 1330 if (!new_val->is_register()) { 1331 LIR_Opr new_val_reg = new_pointer_register(); 1332 if (new_val->is_constant()) { 1333 __ move(new_val, new_val_reg); 1334 } else { 1335 __ leal(new_val, new_val_reg); 1336 } 1337 new_val = new_val_reg; 1338 } 1339 assert(new_val->is_register(), "must be a register at this point"); 1340 1341 if (addr->is_address()) { 1342 LIR_Address* address = addr->as_address_ptr(); 1343 LIR_Opr ptr = new_pointer_register(); 1344 if (!address->index()->is_valid() && address->disp() == 0) { 1345 __ move(address->base(), ptr); 1346 } else { 1347 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 1348 __ leal(addr, ptr); 1349 } 1350 addr = ptr; 1351 } 1352 assert(addr->is_register(), "must be a register at this point"); 1353 1354 LIR_Opr xor_res = new_pointer_register(); 1355 LIR_Opr xor_shift_res = new_pointer_register(); 1356 1357 if (TwoOperandLIRForm ) { 1358 __ move(addr, xor_res); 1359 __ logical_xor(xor_res, new_val, xor_res); 1360 __ move(xor_res, xor_shift_res); 1361 __ unsigned_shift_right(xor_shift_res, 1362 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), 1363 xor_shift_res, 1364 LIR_OprDesc::illegalOpr()); 1365 } else { 1366 __ logical_xor(addr, new_val, xor_res); 1367 __ unsigned_shift_right(xor_res, 1368 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), 1369 xor_shift_res, 1370 LIR_OprDesc::illegalOpr()); 1371 } 1372 1373 if (!new_val->is_register()) { 1374 LIR_Opr new_val_reg = new_pointer_register(); 1375 __ leal(new_val, new_val_reg); 1376 new_val = new_val_reg; 1377 } 1378 assert(new_val->is_register(), "must be a register at this point"); 1379 1380 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); 1381 1382 CodeStub* slow = new G1PostBarrierStub(addr, new_val); 1383 __ branch(lir_cond_notEqual, T_INT, slow); 1384 __ branch_destination(slow->continuation()); 1385 } 1386 1387 #endif // SERIALGC 1388 //////////////////////////////////////////////////////////////////////// 1389 1390 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1391 1392 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code"); 1393 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base); 1394 if (addr->is_address()) { 1395 LIR_Address* address = addr->as_address_ptr(); 1396 LIR_Opr ptr = new_register(T_OBJECT); 1397 if (!address->index()->is_valid() && address->disp() == 0) { 1398 __ move(address->base(), ptr); 1399 } else { 1400 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 1401 __ leal(addr, ptr); 1402 } 1403 addr = ptr; 1404 } 1405 assert(addr->is_register(), "must be a register at this point"); 1406 1407 LIR_Opr tmp = new_pointer_register(); 1408 if (TwoOperandLIRForm) { 1409 __ move(addr, tmp); 1410 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp); 1411 } else { 1412 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp); 1413 } 1414 if (can_inline_as_constant(card_table_base)) { 1415 __ move(LIR_OprFact::intConst(0), 1416 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE)); 1417 } else { 1418 __ move(LIR_OprFact::intConst(0), 1419 new LIR_Address(tmp, load_constant(card_table_base), 1420 T_BYTE)); 1421 } 1422 } 1423 1424 1425 //------------------------field access-------------------------------------- 1426 1427 // Comment copied form templateTable_i486.cpp 1428 // ---------------------------------------------------------------------------- 1429 // Volatile variables demand their effects be made known to all CPU's in 1430 // order. Store buffers on most chips allow reads & writes to reorder; the 1431 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1432 // memory barrier (i.e., it's not sufficient that the interpreter does not 1433 // reorder volatile references, the hardware also must not reorder them). 1434 // 1435 // According to the new Java Memory Model (JMM): 1436 // (1) All volatiles are serialized wrt to each other. 1437 // ALSO reads & writes act as aquire & release, so: 1438 // (2) A read cannot let unrelated NON-volatile memory refs that happen after 1439 // the read float up to before the read. It's OK for non-volatile memory refs 1440 // that happen before the volatile read to float down below it. 1441 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1442 // that happen BEFORE the write float down to after the write. It's OK for 1443 // non-volatile memory refs that happen after the volatile write to float up 1444 // before it. 1445 // 1446 // We only put in barriers around volatile refs (they are expensive), not 1447 // _between_ memory refs (that would require us to track the flavor of the 1448 // previous memory refs). Requirements (2) and (3) require some barriers 1449 // before volatile stores and after volatile loads. These nearly cover 1450 // requirement (1) but miss the volatile-store-volatile-load case. This final 1451 // case is placed after volatile-stores although it could just as well go 1452 // before volatile-loads. 1453 1454 1455 void LIRGenerator::do_StoreField(StoreField* x) { 1456 bool needs_patching = x->needs_patching(); 1457 bool is_volatile = x->field()->is_volatile(); 1458 BasicType field_type = x->field_type(); 1459 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT); 1460 1461 CodeEmitInfo* info = NULL; 1462 if (needs_patching) { 1463 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1464 info = state_for(x, x->state_before()); 1465 } else if (x->needs_null_check()) { 1466 NullCheck* nc = x->explicit_null_check(); 1467 if (nc == NULL) { 1468 info = state_for(x, x->lock_stack()); 1469 } else { 1470 info = state_for(nc); 1471 } 1472 } 1473 1474 1475 LIRItem object(x->obj(), this); 1476 LIRItem value(x->value(), this); 1477 1478 object.load_item(); 1479 1480 if (is_volatile || needs_patching) { 1481 // load item if field is volatile (fewer special cases for volatiles) 1482 // load item if field not initialized 1483 // load item if field not constant 1484 // because of code patching we cannot inline constants 1485 if (field_type == T_BYTE || field_type == T_BOOLEAN) { 1486 value.load_byte_item(); 1487 } else { 1488 value.load_item(); 1489 } 1490 } else { 1491 value.load_for_store(field_type); 1492 } 1493 1494 set_no_result(x); 1495 1496 if (PrintNotLoaded && needs_patching) { 1497 tty->print_cr(" ###class not loaded at store_%s bci %d", 1498 x->is_static() ? "static" : "field", x->bci()); 1499 } 1500 1501 if (x->needs_null_check() && 1502 (needs_patching || 1503 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1504 // emit an explicit null check because the offset is too large 1505 __ null_check(object.result(), new CodeEmitInfo(info)); 1506 } 1507 1508 LIR_Address* address; 1509 if (needs_patching) { 1510 // we need to patch the offset in the instruction so don't allow 1511 // generate_address to try to be smart about emitting the -1. 1512 // Otherwise the patching code won't know how to find the 1513 // instruction to patch. 1514 address = new LIR_Address(object.result(), max_jint, field_type); 1515 } else { 1516 address = generate_address(object.result(), x->offset(), field_type); 1517 } 1518 1519 if (is_volatile && os::is_MP()) { 1520 __ membar_release(); 1521 } 1522 1523 if (is_oop) { 1524 // Do the pre-write barrier, if any. 1525 pre_barrier(LIR_OprFact::address(address), 1526 needs_patching, 1527 (info ? new CodeEmitInfo(info) : NULL)); 1528 } 1529 1530 if (is_volatile) { 1531 assert(!needs_patching && x->is_loaded(), 1532 "how do we know it's volatile if it's not loaded"); 1533 volatile_field_store(value.result(), address, info); 1534 } else { 1535 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 1536 __ store(value.result(), address, info, patch_code); 1537 } 1538 1539 if (is_oop) { 1540 #ifdef PRECISE_CARDMARK 1541 // Precise cardmarks don't work 1542 post_barrier(LIR_OprFact::address(address), value.result()); 1543 #else 1544 post_barrier(object.result(), value.result()); 1545 #endif // PRECISE_CARDMARK 1546 } 1547 1548 if (is_volatile && os::is_MP()) { 1549 __ membar(); 1550 } 1551 } 1552 1553 1554 void LIRGenerator::do_LoadField(LoadField* x) { 1555 bool needs_patching = x->needs_patching(); 1556 bool is_volatile = x->field()->is_volatile(); 1557 BasicType field_type = x->field_type(); 1558 1559 CodeEmitInfo* info = NULL; 1560 if (needs_patching) { 1561 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1562 info = state_for(x, x->state_before()); 1563 } else if (x->needs_null_check()) { 1564 NullCheck* nc = x->explicit_null_check(); 1565 if (nc == NULL) { 1566 info = state_for(x, x->lock_stack()); 1567 } else { 1568 info = state_for(nc); 1569 } 1570 } 1571 1572 LIRItem object(x->obj(), this); 1573 1574 object.load_item(); 1575 1576 if (PrintNotLoaded && needs_patching) { 1577 tty->print_cr(" ###class not loaded at load_%s bci %d", 1578 x->is_static() ? "static" : "field", x->bci()); 1579 } 1580 1581 if (x->needs_null_check() && 1582 (needs_patching || 1583 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1584 // emit an explicit null check because the offset is too large 1585 __ null_check(object.result(), new CodeEmitInfo(info)); 1586 } 1587 1588 LIR_Opr reg = rlock_result(x, field_type); 1589 LIR_Address* address; 1590 if (needs_patching) { 1591 // we need to patch the offset in the instruction so don't allow 1592 // generate_address to try to be smart about emitting the -1. 1593 // Otherwise the patching code won't know how to find the 1594 // instruction to patch. 1595 address = new LIR_Address(object.result(), max_jint, field_type); 1596 } else { 1597 address = generate_address(object.result(), x->offset(), field_type); 1598 } 1599 1600 if (is_volatile) { 1601 assert(!needs_patching && x->is_loaded(), 1602 "how do we know it's volatile if it's not loaded"); 1603 volatile_field_load(address, reg, info); 1604 } else { 1605 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 1606 __ load(address, reg, info, patch_code); 1607 } 1608 1609 if (is_volatile && os::is_MP()) { 1610 __ membar_acquire(); 1611 } 1612 } 1613 1614 1615 //------------------------java.nio.Buffer.checkIndex------------------------ 1616 1617 // int java.nio.Buffer.checkIndex(int) 1618 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) { 1619 // NOTE: by the time we are in checkIndex() we are guaranteed that 1620 // the buffer is non-null (because checkIndex is package-private and 1621 // only called from within other methods in the buffer). 1622 assert(x->number_of_arguments() == 2, "wrong type"); 1623 LIRItem buf (x->argument_at(0), this); 1624 LIRItem index(x->argument_at(1), this); 1625 buf.load_item(); 1626 index.load_item(); 1627 1628 LIR_Opr result = rlock_result(x); 1629 if (GenerateRangeChecks) { 1630 CodeEmitInfo* info = state_for(x); 1631 CodeStub* stub = new RangeCheckStub(info, index.result(), true); 1632 if (index.result()->is_constant()) { 1633 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); 1634 __ branch(lir_cond_belowEqual, T_INT, stub); 1635 } else { 1636 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(), 1637 java_nio_Buffer::limit_offset(), T_INT, info); 1638 __ branch(lir_cond_aboveEqual, T_INT, stub); 1639 } 1640 __ move(index.result(), result); 1641 } else { 1642 // Just load the index into the result register 1643 __ move(index.result(), result); 1644 } 1645 } 1646 1647 1648 //------------------------array access-------------------------------------- 1649 1650 1651 void LIRGenerator::do_ArrayLength(ArrayLength* x) { 1652 LIRItem array(x->array(), this); 1653 array.load_item(); 1654 LIR_Opr reg = rlock_result(x); 1655 1656 CodeEmitInfo* info = NULL; 1657 if (x->needs_null_check()) { 1658 NullCheck* nc = x->explicit_null_check(); 1659 if (nc == NULL) { 1660 info = state_for(x); 1661 } else { 1662 info = state_for(nc); 1663 } 1664 } 1665 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); 1666 } 1667 1668 1669 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { 1670 bool use_length = x->length() != NULL; 1671 LIRItem array(x->array(), this); 1672 LIRItem index(x->index(), this); 1673 LIRItem length(this); 1674 bool needs_range_check = true; 1675 1676 if (use_length) { 1677 needs_range_check = x->compute_needs_range_check(); 1678 if (needs_range_check) { 1679 length.set_instruction(x->length()); 1680 length.load_item(); 1681 } 1682 } 1683 1684 array.load_item(); 1685 if (index.is_constant() && can_inline_as_constant(x->index())) { 1686 // let it be a constant 1687 index.dont_load_item(); 1688 } else { 1689 index.load_item(); 1690 } 1691 1692 CodeEmitInfo* range_check_info = state_for(x); 1693 CodeEmitInfo* null_check_info = NULL; 1694 if (x->needs_null_check()) { 1695 NullCheck* nc = x->explicit_null_check(); 1696 if (nc != NULL) { 1697 null_check_info = state_for(nc); 1698 } else { 1699 null_check_info = range_check_info; 1700 } 1701 } 1702 1703 // emit array address setup early so it schedules better 1704 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); 1705 1706 if (GenerateRangeChecks && needs_range_check) { 1707 if (use_length) { 1708 // TODO: use a (modified) version of array_range_check that does not require a 1709 // constant length to be loaded to a register 1710 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 1711 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 1712 } else { 1713 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 1714 // The range check performs the null check, so clear it out for the load 1715 null_check_info = NULL; 1716 } 1717 } 1718 1719 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info); 1720 } 1721 1722 1723 void LIRGenerator::do_NullCheck(NullCheck* x) { 1724 if (x->can_trap()) { 1725 LIRItem value(x->obj(), this); 1726 value.load_item(); 1727 CodeEmitInfo* info = state_for(x); 1728 __ null_check(value.result(), info); 1729 } 1730 } 1731 1732 1733 void LIRGenerator::do_Throw(Throw* x) { 1734 LIRItem exception(x->exception(), this); 1735 exception.load_item(); 1736 set_no_result(x); 1737 LIR_Opr exception_opr = exception.result(); 1738 CodeEmitInfo* info = state_for(x, x->state()); 1739 1740 #ifndef PRODUCT 1741 if (PrintC1Statistics) { 1742 increment_counter(Runtime1::throw_count_address()); 1743 } 1744 #endif 1745 1746 // check if the instruction has an xhandler in any of the nested scopes 1747 bool unwind = false; 1748 if (info->exception_handlers()->length() == 0) { 1749 // this throw is not inside an xhandler 1750 unwind = true; 1751 } else { 1752 // get some idea of the throw type 1753 bool type_is_exact = true; 1754 ciType* throw_type = x->exception()->exact_type(); 1755 if (throw_type == NULL) { 1756 type_is_exact = false; 1757 throw_type = x->exception()->declared_type(); 1758 } 1759 if (throw_type != NULL && throw_type->is_instance_klass()) { 1760 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type; 1761 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact); 1762 } 1763 } 1764 1765 // do null check before moving exception oop into fixed register 1766 // to avoid a fixed interval with an oop during the null check. 1767 // Use a copy of the CodeEmitInfo because debug information is 1768 // different for null_check and throw. 1769 if (GenerateCompilerNullChecks && 1770 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) { 1771 // if the exception object wasn't created using new then it might be null. 1772 __ null_check(exception_opr, new CodeEmitInfo(info, true)); 1773 } 1774 1775 if (JvmtiExport::can_post_exceptions() && 1776 !block()->is_set(BlockBegin::default_exception_handler_flag)) { 1777 // we need to go through the exception lookup path to get JVMTI 1778 // notification done 1779 unwind = false; 1780 } 1781 1782 assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind, 1783 "should be no more handlers to dispatch to"); 1784 1785 if (DTraceMethodProbes && 1786 block()->is_set(BlockBegin::default_exception_handler_flag)) { 1787 // notify that this frame is unwinding 1788 BasicTypeList signature; 1789 signature.append(T_INT); // thread 1790 signature.append(T_OBJECT); // methodOop 1791 LIR_OprList* args = new LIR_OprList(); 1792 args->append(getThreadPointer()); 1793 LIR_Opr meth = new_register(T_OBJECT); 1794 __ oop2reg(method()->encoding(), meth); 1795 args->append(meth); 1796 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); 1797 } 1798 1799 // move exception oop into fixed register 1800 __ move(exception_opr, exceptionOopOpr()); 1801 1802 if (unwind) { 1803 __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info); 1804 } else { 1805 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info); 1806 } 1807 } 1808 1809 1810 void LIRGenerator::do_RoundFP(RoundFP* x) { 1811 LIRItem input(x->input(), this); 1812 input.load_item(); 1813 LIR_Opr input_opr = input.result(); 1814 assert(input_opr->is_register(), "why round if value is not in a register?"); 1815 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value"); 1816 if (input_opr->is_single_fpu()) { 1817 set_result(x, round_item(input_opr)); // This code path not currently taken 1818 } else { 1819 LIR_Opr result = new_register(T_DOUBLE); 1820 set_vreg_flag(result, must_start_in_memory); 1821 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result); 1822 set_result(x, result); 1823 } 1824 } 1825 1826 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { 1827 LIRItem base(x->base(), this); 1828 LIRItem idx(this); 1829 1830 base.load_item(); 1831 if (x->has_index()) { 1832 idx.set_instruction(x->index()); 1833 idx.load_nonconstant(); 1834 } 1835 1836 LIR_Opr reg = rlock_result(x, x->basic_type()); 1837 1838 int log2_scale = 0; 1839 if (x->has_index()) { 1840 assert(x->index()->type()->tag() == intTag, "should not find non-int index"); 1841 log2_scale = x->log2_scale(); 1842 } 1843 1844 assert(!x->has_index() || idx.value() == x->index(), "should match"); 1845 1846 LIR_Opr base_op = base.result(); 1847 #ifndef _LP64 1848 if (x->base()->type()->tag() == longTag) { 1849 base_op = new_register(T_INT); 1850 __ convert(Bytecodes::_l2i, base.result(), base_op); 1851 } else { 1852 assert(x->base()->type()->tag() == intTag, "must be"); 1853 } 1854 #endif 1855 1856 BasicType dst_type = x->basic_type(); 1857 LIR_Opr index_op = idx.result(); 1858 1859 LIR_Address* addr; 1860 if (index_op->is_constant()) { 1861 assert(log2_scale == 0, "must not have a scale"); 1862 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); 1863 } else { 1864 #ifdef X86 1865 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); 1866 #else 1867 if (index_op->is_illegal() || log2_scale == 0) { 1868 addr = new LIR_Address(base_op, index_op, dst_type); 1869 } else { 1870 LIR_Opr tmp = new_register(T_INT); 1871 __ shift_left(index_op, log2_scale, tmp); 1872 addr = new LIR_Address(base_op, tmp, dst_type); 1873 } 1874 #endif 1875 } 1876 1877 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) { 1878 __ unaligned_move(addr, reg); 1879 } else { 1880 __ move(addr, reg); 1881 } 1882 } 1883 1884 1885 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { 1886 int log2_scale = 0; 1887 BasicType type = x->basic_type(); 1888 1889 if (x->has_index()) { 1890 assert(x->index()->type()->tag() == intTag, "should not find non-int index"); 1891 log2_scale = x->log2_scale(); 1892 } 1893 1894 LIRItem base(x->base(), this); 1895 LIRItem value(x->value(), this); 1896 LIRItem idx(this); 1897 1898 base.load_item(); 1899 if (x->has_index()) { 1900 idx.set_instruction(x->index()); 1901 idx.load_item(); 1902 } 1903 1904 if (type == T_BYTE || type == T_BOOLEAN) { 1905 value.load_byte_item(); 1906 } else { 1907 value.load_item(); 1908 } 1909 1910 set_no_result(x); 1911 1912 LIR_Opr base_op = base.result(); 1913 #ifndef _LP64 1914 if (x->base()->type()->tag() == longTag) { 1915 base_op = new_register(T_INT); 1916 __ convert(Bytecodes::_l2i, base.result(), base_op); 1917 } else { 1918 assert(x->base()->type()->tag() == intTag, "must be"); 1919 } 1920 #endif 1921 1922 LIR_Opr index_op = idx.result(); 1923 if (log2_scale != 0) { 1924 // temporary fix (platform dependent code without shift on Intel would be better) 1925 index_op = new_register(T_INT); 1926 __ move(idx.result(), index_op); 1927 __ shift_left(index_op, log2_scale, index_op); 1928 } 1929 1930 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); 1931 __ move(value.result(), addr); 1932 } 1933 1934 1935 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) { 1936 BasicType type = x->basic_type(); 1937 LIRItem src(x->object(), this); 1938 LIRItem off(x->offset(), this); 1939 1940 off.load_item(); 1941 src.load_item(); 1942 1943 LIR_Opr reg = reg = rlock_result(x, x->basic_type()); 1944 1945 if (x->is_volatile() && os::is_MP()) __ membar_acquire(); 1946 get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile()); 1947 if (x->is_volatile() && os::is_MP()) __ membar(); 1948 } 1949 1950 1951 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) { 1952 BasicType type = x->basic_type(); 1953 LIRItem src(x->object(), this); 1954 LIRItem off(x->offset(), this); 1955 LIRItem data(x->value(), this); 1956 1957 src.load_item(); 1958 if (type == T_BOOLEAN || type == T_BYTE) { 1959 data.load_byte_item(); 1960 } else { 1961 data.load_item(); 1962 } 1963 off.load_item(); 1964 1965 set_no_result(x); 1966 1967 if (x->is_volatile() && os::is_MP()) __ membar_release(); 1968 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); 1969 } 1970 1971 1972 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) { 1973 LIRItem src(x->object(), this); 1974 LIRItem off(x->offset(), this); 1975 1976 src.load_item(); 1977 if (off.is_constant() && can_inline_as_constant(x->offset())) { 1978 // let it be a constant 1979 off.dont_load_item(); 1980 } else { 1981 off.load_item(); 1982 } 1983 1984 set_no_result(x); 1985 1986 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE); 1987 __ prefetch(addr, is_store); 1988 } 1989 1990 1991 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) { 1992 do_UnsafePrefetch(x, false); 1993 } 1994 1995 1996 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { 1997 do_UnsafePrefetch(x, true); 1998 } 1999 2000 2001 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) { 2002 int lng = x->length(); 2003 2004 for (int i = 0; i < lng; i++) { 2005 SwitchRange* one_range = x->at(i); 2006 int low_key = one_range->low_key(); 2007 int high_key = one_range->high_key(); 2008 BlockBegin* dest = one_range->sux(); 2009 if (low_key == high_key) { 2010 __ cmp(lir_cond_equal, value, low_key); 2011 __ branch(lir_cond_equal, T_INT, dest); 2012 } else if (high_key - low_key == 1) { 2013 __ cmp(lir_cond_equal, value, low_key); 2014 __ branch(lir_cond_equal, T_INT, dest); 2015 __ cmp(lir_cond_equal, value, high_key); 2016 __ branch(lir_cond_equal, T_INT, dest); 2017 } else { 2018 LabelObj* L = new LabelObj(); 2019 __ cmp(lir_cond_less, value, low_key); 2020 __ branch(lir_cond_less, L->label()); 2021 __ cmp(lir_cond_lessEqual, value, high_key); 2022 __ branch(lir_cond_lessEqual, T_INT, dest); 2023 __ branch_destination(L->label()); 2024 } 2025 } 2026 __ jump(default_sux); 2027 } 2028 2029 2030 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) { 2031 SwitchRangeList* res = new SwitchRangeList(); 2032 int len = x->length(); 2033 if (len > 0) { 2034 BlockBegin* sux = x->sux_at(0); 2035 int key = x->lo_key(); 2036 BlockBegin* default_sux = x->default_sux(); 2037 SwitchRange* range = new SwitchRange(key, sux); 2038 for (int i = 0; i < len; i++, key++) { 2039 BlockBegin* new_sux = x->sux_at(i); 2040 if (sux == new_sux) { 2041 // still in same range 2042 range->set_high_key(key); 2043 } else { 2044 // skip tests which explicitly dispatch to the default 2045 if (sux != default_sux) { 2046 res->append(range); 2047 } 2048 range = new SwitchRange(key, new_sux); 2049 } 2050 sux = new_sux; 2051 } 2052 if (res->length() == 0 || res->last() != range) res->append(range); 2053 } 2054 return res; 2055 } 2056 2057 2058 // we expect the keys to be sorted by increasing value 2059 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) { 2060 SwitchRangeList* res = new SwitchRangeList(); 2061 int len = x->length(); 2062 if (len > 0) { 2063 BlockBegin* default_sux = x->default_sux(); 2064 int key = x->key_at(0); 2065 BlockBegin* sux = x->sux_at(0); 2066 SwitchRange* range = new SwitchRange(key, sux); 2067 for (int i = 1; i < len; i++) { 2068 int new_key = x->key_at(i); 2069 BlockBegin* new_sux = x->sux_at(i); 2070 if (key+1 == new_key && sux == new_sux) { 2071 // still in same range 2072 range->set_high_key(new_key); 2073 } else { 2074 // skip tests which explicitly dispatch to the default 2075 if (range->sux() != default_sux) { 2076 res->append(range); 2077 } 2078 range = new SwitchRange(new_key, new_sux); 2079 } 2080 key = new_key; 2081 sux = new_sux; 2082 } 2083 if (res->length() == 0 || res->last() != range) res->append(range); 2084 } 2085 return res; 2086 } 2087 2088 2089 void LIRGenerator::do_TableSwitch(TableSwitch* x) { 2090 LIRItem tag(x->tag(), this); 2091 tag.load_item(); 2092 set_no_result(x); 2093 2094 if (x->is_safepoint()) { 2095 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 2096 } 2097 2098 // move values into phi locations 2099 move_to_phi(x->state()); 2100 2101 int lo_key = x->lo_key(); 2102 int hi_key = x->hi_key(); 2103 int len = x->length(); 2104 CodeEmitInfo* info = state_for(x, x->state()); 2105 LIR_Opr value = tag.result(); 2106 if (UseTableRanges) { 2107 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 2108 } else { 2109 for (int i = 0; i < len; i++) { 2110 __ cmp(lir_cond_equal, value, i + lo_key); 2111 __ branch(lir_cond_equal, T_INT, x->sux_at(i)); 2112 } 2113 __ jump(x->default_sux()); 2114 } 2115 } 2116 2117 2118 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) { 2119 LIRItem tag(x->tag(), this); 2120 tag.load_item(); 2121 set_no_result(x); 2122 2123 if (x->is_safepoint()) { 2124 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 2125 } 2126 2127 // move values into phi locations 2128 move_to_phi(x->state()); 2129 2130 LIR_Opr value = tag.result(); 2131 if (UseTableRanges) { 2132 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 2133 } else { 2134 int len = x->length(); 2135 for (int i = 0; i < len; i++) { 2136 __ cmp(lir_cond_equal, value, x->key_at(i)); 2137 __ branch(lir_cond_equal, T_INT, x->sux_at(i)); 2138 } 2139 __ jump(x->default_sux()); 2140 } 2141 } 2142 2143 2144 void LIRGenerator::do_Goto(Goto* x) { 2145 set_no_result(x); 2146 2147 if (block()->next()->as_OsrEntry()) { 2148 // need to free up storage used for OSR entry point 2149 LIR_Opr osrBuffer = block()->next()->operand(); 2150 BasicTypeList signature; 2151 signature.append(T_INT); 2152 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 2153 __ move(osrBuffer, cc->args()->at(0)); 2154 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), 2155 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args()); 2156 } 2157 2158 if (x->is_safepoint()) { 2159 ValueStack* state = x->state_before() ? x->state_before() : x->state(); 2160 2161 // increment backedge counter if needed 2162 increment_backedge_counter(state_for(x, state)); 2163 2164 CodeEmitInfo* safepoint_info = state_for(x, state); 2165 __ safepoint(safepoint_poll_register(), safepoint_info); 2166 } 2167 2168 // emit phi-instruction move after safepoint since this simplifies 2169 // describing the state as the safepoint. 2170 move_to_phi(x->state()); 2171 2172 __ jump(x->default_sux()); 2173 } 2174 2175 2176 void LIRGenerator::do_Base(Base* x) { 2177 __ std_entry(LIR_OprFact::illegalOpr); 2178 // Emit moves from physical registers / stack slots to virtual registers 2179 CallingConvention* args = compilation()->frame_map()->incoming_arguments(); 2180 IRScope* irScope = compilation()->hir()->top_scope(); 2181 int java_index = 0; 2182 for (int i = 0; i < args->length(); i++) { 2183 LIR_Opr src = args->at(i); 2184 assert(!src->is_illegal(), "check"); 2185 BasicType t = src->type(); 2186 2187 // Types which are smaller than int are passed as int, so 2188 // correct the type which passed. 2189 switch (t) { 2190 case T_BYTE: 2191 case T_BOOLEAN: 2192 case T_SHORT: 2193 case T_CHAR: 2194 t = T_INT; 2195 break; 2196 } 2197 2198 LIR_Opr dest = new_register(t); 2199 __ move(src, dest); 2200 2201 // Assign new location to Local instruction for this local 2202 Local* local = x->state()->local_at(java_index)->as_Local(); 2203 assert(local != NULL, "Locals for incoming arguments must have been created"); 2204 assert(as_ValueType(t)->tag() == local->type()->tag(), "check"); 2205 local->set_operand(dest); 2206 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL); 2207 java_index += type2size[t]; 2208 } 2209 2210 if (DTraceMethodProbes) { 2211 BasicTypeList signature; 2212 signature.append(T_INT); // thread 2213 signature.append(T_OBJECT); // methodOop 2214 LIR_OprList* args = new LIR_OprList(); 2215 args->append(getThreadPointer()); 2216 LIR_Opr meth = new_register(T_OBJECT); 2217 __ oop2reg(method()->encoding(), meth); 2218 args->append(meth); 2219 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL); 2220 } 2221 2222 if (method()->is_synchronized()) { 2223 LIR_Opr obj; 2224 if (method()->is_static()) { 2225 obj = new_register(T_OBJECT); 2226 __ oop2reg(method()->holder()->java_mirror()->encoding(), obj); 2227 } else { 2228 Local* receiver = x->state()->local_at(0)->as_Local(); 2229 assert(receiver != NULL, "must already exist"); 2230 obj = receiver->operand(); 2231 } 2232 assert(obj->is_valid(), "must be valid"); 2233 2234 if (method()->is_synchronized() && GenerateSynchronizationCode) { 2235 LIR_Opr lock = new_register(T_INT); 2236 __ load_stack_address_monitor(0, lock); 2237 2238 CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL); 2239 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); 2240 2241 // receiver is guaranteed non-NULL so don't need CodeEmitInfo 2242 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); 2243 } 2244 } 2245 2246 // increment invocation counters if needed 2247 increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL)); 2248 2249 // all blocks with a successor must end with an unconditional jump 2250 // to the successor even if they are consecutive 2251 __ jump(x->default_sux()); 2252 } 2253 2254 2255 void LIRGenerator::do_OsrEntry(OsrEntry* x) { 2256 // construct our frame and model the production of incoming pointer 2257 // to the OSR buffer. 2258 __ osr_entry(LIR_Assembler::osrBufferPointer()); 2259 LIR_Opr result = rlock_result(x); 2260 __ move(LIR_Assembler::osrBufferPointer(), result); 2261 } 2262 2263 2264 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { 2265 int i = x->has_receiver() ? 1 : 0; 2266 for (; i < args->length(); i++) { 2267 LIRItem* param = args->at(i); 2268 LIR_Opr loc = arg_list->at(i); 2269 if (loc->is_register()) { 2270 param->load_item_force(loc); 2271 } else { 2272 LIR_Address* addr = loc->as_address_ptr(); 2273 param->load_for_store(addr->type()); 2274 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2275 __ unaligned_move(param->result(), addr); 2276 } else { 2277 __ move(param->result(), addr); 2278 } 2279 } 2280 } 2281 2282 if (x->has_receiver()) { 2283 LIRItem* receiver = args->at(0); 2284 LIR_Opr loc = arg_list->at(0); 2285 if (loc->is_register()) { 2286 receiver->load_item_force(loc); 2287 } else { 2288 assert(loc->is_address(), "just checking"); 2289 receiver->load_for_store(T_OBJECT); 2290 __ move(receiver->result(), loc); 2291 } 2292 } 2293 } 2294 2295 2296 // Visits all arguments, returns appropriate items without loading them 2297 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) { 2298 LIRItemList* argument_items = new LIRItemList(); 2299 if (x->has_receiver()) { 2300 LIRItem* receiver = new LIRItem(x->receiver(), this); 2301 argument_items->append(receiver); 2302 } 2303 int idx = x->has_receiver() ? 1 : 0; 2304 for (int i = 0; i < x->number_of_arguments(); i++) { 2305 LIRItem* param = new LIRItem(x->argument_at(i), this); 2306 argument_items->append(param); 2307 idx += (param->type()->is_double_word() ? 2 : 1); 2308 } 2309 return argument_items; 2310 } 2311 2312 2313 // The invoke with receiver has following phases: 2314 // a) traverse and load/lock receiver; 2315 // b) traverse all arguments -> item-array (invoke_visit_argument) 2316 // c) push receiver on stack 2317 // d) load each of the items and push on stack 2318 // e) unlock receiver 2319 // f) move receiver into receiver-register %o0 2320 // g) lock result registers and emit call operation 2321 // 2322 // Before issuing a call, we must spill-save all values on stack 2323 // that are in caller-save register. "spill-save" moves thos registers 2324 // either in a free callee-save register or spills them if no free 2325 // callee save register is available. 2326 // 2327 // The problem is where to invoke spill-save. 2328 // - if invoked between e) and f), we may lock callee save 2329 // register in "spill-save" that destroys the receiver register 2330 // before f) is executed 2331 // - if we rearange the f) to be earlier, by loading %o0, it 2332 // may destroy a value on the stack that is currently in %o0 2333 // and is waiting to be spilled 2334 // - if we keep the receiver locked while doing spill-save, 2335 // we cannot spill it as it is spill-locked 2336 // 2337 void LIRGenerator::do_Invoke(Invoke* x) { 2338 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true); 2339 2340 LIR_OprList* arg_list = cc->args(); 2341 LIRItemList* args = invoke_visit_arguments(x); 2342 LIR_Opr receiver = LIR_OprFact::illegalOpr; 2343 2344 // setup result register 2345 LIR_Opr result_register = LIR_OprFact::illegalOpr; 2346 if (x->type() != voidType) { 2347 result_register = result_register_for(x->type()); 2348 } 2349 2350 CodeEmitInfo* info = state_for(x, x->state()); 2351 2352 invoke_load_arguments(x, args, arg_list); 2353 2354 if (x->has_receiver()) { 2355 args->at(0)->load_item_force(LIR_Assembler::receiverOpr()); 2356 receiver = args->at(0)->result(); 2357 } 2358 2359 // emit invoke code 2360 bool optimized = x->target_is_loaded() && x->target_is_final(); 2361 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); 2362 2363 switch (x->code()) { 2364 case Bytecodes::_invokestatic: 2365 __ call_static(x->target(), result_register, 2366 SharedRuntime::get_resolve_static_call_stub(), 2367 arg_list, info); 2368 break; 2369 case Bytecodes::_invokespecial: 2370 case Bytecodes::_invokevirtual: 2371 case Bytecodes::_invokeinterface: 2372 // for final target we still produce an inline cache, in order 2373 // to be able to call mixed mode 2374 if (x->code() == Bytecodes::_invokespecial || optimized) { 2375 __ call_opt_virtual(x->target(), receiver, result_register, 2376 SharedRuntime::get_resolve_opt_virtual_call_stub(), 2377 arg_list, info); 2378 } else if (x->vtable_index() < 0) { 2379 __ call_icvirtual(x->target(), receiver, result_register, 2380 SharedRuntime::get_resolve_virtual_call_stub(), 2381 arg_list, info); 2382 } else { 2383 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size(); 2384 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes(); 2385 __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info); 2386 } 2387 break; 2388 default: 2389 ShouldNotReachHere(); 2390 break; 2391 } 2392 2393 if (x->type()->is_float() || x->type()->is_double()) { 2394 // Force rounding of results from non-strictfp when in strictfp 2395 // scope (or when we don't know the strictness of the callee, to 2396 // be safe.) 2397 if (method()->is_strict()) { 2398 if (!x->target_is_loaded() || !x->target_is_strictfp()) { 2399 result_register = round_item(result_register); 2400 } 2401 } 2402 } 2403 2404 if (result_register->is_valid()) { 2405 LIR_Opr result = rlock_result(x); 2406 __ move(result_register, result); 2407 } 2408 } 2409 2410 2411 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) { 2412 assert(x->number_of_arguments() == 1, "wrong type"); 2413 LIRItem value (x->argument_at(0), this); 2414 LIR_Opr reg = rlock_result(x); 2415 value.load_item(); 2416 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type())); 2417 __ move(tmp, reg); 2418 } 2419 2420 2421 2422 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval() 2423 void LIRGenerator::do_IfOp(IfOp* x) { 2424 #ifdef ASSERT 2425 { 2426 ValueTag xtag = x->x()->type()->tag(); 2427 ValueTag ttag = x->tval()->type()->tag(); 2428 assert(xtag == intTag || xtag == objectTag, "cannot handle others"); 2429 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others"); 2430 assert(ttag == x->fval()->type()->tag(), "cannot handle others"); 2431 } 2432 #endif 2433 2434 LIRItem left(x->x(), this); 2435 LIRItem right(x->y(), this); 2436 left.load_item(); 2437 if (can_inline_as_constant(right.value())) { 2438 right.dont_load_item(); 2439 } else { 2440 right.load_item(); 2441 } 2442 2443 LIRItem t_val(x->tval(), this); 2444 LIRItem f_val(x->fval(), this); 2445 t_val.dont_load_item(); 2446 f_val.dont_load_item(); 2447 LIR_Opr reg = rlock_result(x); 2448 2449 __ cmp(lir_cond(x->cond()), left.result(), right.result()); 2450 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg); 2451 } 2452 2453 2454 void LIRGenerator::do_Intrinsic(Intrinsic* x) { 2455 switch (x->id()) { 2456 case vmIntrinsics::_intBitsToFloat : 2457 case vmIntrinsics::_doubleToRawLongBits : 2458 case vmIntrinsics::_longBitsToDouble : 2459 case vmIntrinsics::_floatToRawIntBits : { 2460 do_FPIntrinsics(x); 2461 break; 2462 } 2463 2464 case vmIntrinsics::_currentTimeMillis: { 2465 assert(x->number_of_arguments() == 0, "wrong type"); 2466 LIR_Opr reg = result_register_for(x->type()); 2467 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(), 2468 reg, new LIR_OprList()); 2469 LIR_Opr result = rlock_result(x); 2470 __ move(reg, result); 2471 break; 2472 } 2473 2474 case vmIntrinsics::_nanoTime: { 2475 assert(x->number_of_arguments() == 0, "wrong type"); 2476 LIR_Opr reg = result_register_for(x->type()); 2477 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(), 2478 reg, new LIR_OprList()); 2479 LIR_Opr result = rlock_result(x); 2480 __ move(reg, result); 2481 break; 2482 } 2483 2484 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; 2485 case vmIntrinsics::_getClass: do_getClass(x); break; 2486 case vmIntrinsics::_currentThread: do_currentThread(x); break; 2487 2488 case vmIntrinsics::_dlog: // fall through 2489 case vmIntrinsics::_dlog10: // fall through 2490 case vmIntrinsics::_dabs: // fall through 2491 case vmIntrinsics::_dsqrt: // fall through 2492 case vmIntrinsics::_dtan: // fall through 2493 case vmIntrinsics::_dsin : // fall through 2494 case vmIntrinsics::_dcos : do_MathIntrinsic(x); break; 2495 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break; 2496 2497 // java.nio.Buffer.checkIndex 2498 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break; 2499 2500 case vmIntrinsics::_compareAndSwapObject: 2501 do_CompareAndSwap(x, objectType); 2502 break; 2503 case vmIntrinsics::_compareAndSwapInt: 2504 do_CompareAndSwap(x, intType); 2505 break; 2506 case vmIntrinsics::_compareAndSwapLong: 2507 do_CompareAndSwap(x, longType); 2508 break; 2509 2510 // sun.misc.AtomicLongCSImpl.attemptUpdate 2511 case vmIntrinsics::_attemptUpdate: 2512 do_AttemptUpdate(x); 2513 break; 2514 2515 default: ShouldNotReachHere(); break; 2516 } 2517 } 2518 2519 2520 void LIRGenerator::do_ProfileCall(ProfileCall* x) { 2521 // Need recv in a temporary register so it interferes with the other temporaries 2522 LIR_Opr recv = LIR_OprFact::illegalOpr; 2523 LIR_Opr mdo = new_register(T_OBJECT); 2524 LIR_Opr tmp = new_register(T_INT); 2525 if (x->recv() != NULL) { 2526 LIRItem value(x->recv(), this); 2527 value.load_item(); 2528 recv = new_register(T_OBJECT); 2529 __ move(value.result(), recv); 2530 } 2531 __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); 2532 } 2533 2534 2535 void LIRGenerator::do_ProfileCounter(ProfileCounter* x) { 2536 LIRItem mdo(x->mdo(), this); 2537 mdo.load_item(); 2538 2539 increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment()); 2540 } 2541 2542 2543 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { 2544 LIRItemList args(1); 2545 LIRItem value(arg1, this); 2546 args.append(&value); 2547 BasicTypeList signature; 2548 signature.append(as_BasicType(arg1->type())); 2549 2550 return call_runtime(&signature, &args, entry, result_type, info); 2551 } 2552 2553 2554 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) { 2555 LIRItemList args(2); 2556 LIRItem value1(arg1, this); 2557 LIRItem value2(arg2, this); 2558 args.append(&value1); 2559 args.append(&value2); 2560 BasicTypeList signature; 2561 signature.append(as_BasicType(arg1->type())); 2562 signature.append(as_BasicType(arg2->type())); 2563 2564 return call_runtime(&signature, &args, entry, result_type, info); 2565 } 2566 2567 2568 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args, 2569 address entry, ValueType* result_type, CodeEmitInfo* info) { 2570 // get a result register 2571 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 2572 LIR_Opr result = LIR_OprFact::illegalOpr; 2573 if (result_type->tag() != voidTag) { 2574 result = new_register(result_type); 2575 phys_reg = result_register_for(result_type); 2576 } 2577 2578 // move the arguments into the correct location 2579 CallingConvention* cc = frame_map()->c_calling_convention(signature); 2580 assert(cc->length() == args->length(), "argument mismatch"); 2581 for (int i = 0; i < args->length(); i++) { 2582 LIR_Opr arg = args->at(i); 2583 LIR_Opr loc = cc->at(i); 2584 if (loc->is_register()) { 2585 __ move(arg, loc); 2586 } else { 2587 LIR_Address* addr = loc->as_address_ptr(); 2588 // if (!can_store_as_constant(arg)) { 2589 // LIR_Opr tmp = new_register(arg->type()); 2590 // __ move(arg, tmp); 2591 // arg = tmp; 2592 // } 2593 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2594 __ unaligned_move(arg, addr); 2595 } else { 2596 __ move(arg, addr); 2597 } 2598 } 2599 } 2600 2601 if (info) { 2602 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 2603 } else { 2604 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 2605 } 2606 if (result->is_valid()) { 2607 __ move(phys_reg, result); 2608 } 2609 return result; 2610 } 2611 2612 2613 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args, 2614 address entry, ValueType* result_type, CodeEmitInfo* info) { 2615 // get a result register 2616 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 2617 LIR_Opr result = LIR_OprFact::illegalOpr; 2618 if (result_type->tag() != voidTag) { 2619 result = new_register(result_type); 2620 phys_reg = result_register_for(result_type); 2621 } 2622 2623 // move the arguments into the correct location 2624 CallingConvention* cc = frame_map()->c_calling_convention(signature); 2625 2626 assert(cc->length() == args->length(), "argument mismatch"); 2627 for (int i = 0; i < args->length(); i++) { 2628 LIRItem* arg = args->at(i); 2629 LIR_Opr loc = cc->at(i); 2630 if (loc->is_register()) { 2631 arg->load_item_force(loc); 2632 } else { 2633 LIR_Address* addr = loc->as_address_ptr(); 2634 arg->load_for_store(addr->type()); 2635 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2636 __ unaligned_move(arg->result(), addr); 2637 } else { 2638 __ move(arg->result(), addr); 2639 } 2640 } 2641 } 2642 2643 if (info) { 2644 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 2645 } else { 2646 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 2647 } 2648 if (result->is_valid()) { 2649 __ move(phys_reg, result); 2650 } 2651 return result; 2652 } 2653 2654 2655 2656 void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) { 2657 #ifdef TIERED 2658 if (_compilation->env()->comp_level() == CompLevel_fast_compile && 2659 (method()->code_size() >= Tier1BytecodeLimit || backedge)) { 2660 int limit = InvocationCounter::Tier1InvocationLimit; 2661 int offset = in_bytes(methodOopDesc::invocation_counter_offset() + 2662 InvocationCounter::counter_offset()); 2663 if (backedge) { 2664 limit = InvocationCounter::Tier1BackEdgeLimit; 2665 offset = in_bytes(methodOopDesc::backedge_counter_offset() + 2666 InvocationCounter::counter_offset()); 2667 } 2668 2669 LIR_Opr meth = new_register(T_OBJECT); 2670 __ oop2reg(method()->encoding(), meth); 2671 LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment); 2672 __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit)); 2673 CodeStub* overflow = new CounterOverflowStub(info, info->bci()); 2674 __ branch(lir_cond_aboveEqual, T_INT, overflow); 2675 __ branch_destination(overflow->continuation()); 2676 } 2677 #endif 2678 } 2679 2680