hotspot/src/share/vm/c1/c1_LIRAssembler.cpp

Print this page
rev 611 : Merge
   1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)c1_LIRAssembler.cpp  1.135 07/07/02 16:50:41 JVM"
   3 #endif
   4 /*
   5  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  


  60       case Bytecodes::_checkcast:
  61       case Bytecodes::_ldc:
  62       case Bytecodes::_ldc_w:
  63         break;
  64       default:
  65         ShouldNotReachHere();
  66     }
  67   } else {
  68     ShouldNotReachHere();
  69   }
  70 #endif
  71 }
  72 
  73 
  74 //---------------------------------------------------------------
  75 
  76 
  77 LIR_Assembler::LIR_Assembler(Compilation* c): 
  78    _compilation(c)
  79  , _masm(c->masm())

  80  , _frame_map(c->frame_map())
  81  , _current_block(NULL)
  82  , _pending_non_safepoint(NULL)
  83  , _pending_non_safepoint_offset(0)
  84 {
  85   _slow_case_stubs = new CodeStubList();
  86 }
  87 
  88 
  89 LIR_Assembler::~LIR_Assembler() {
  90 }
  91 
  92 
  93 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
  94   _slow_case_stubs->append(stub);
  95 }
  96 
  97 
  98 void LIR_Assembler::check_codespace() {
  99   CodeSection* cs = _masm->code_section();


 201   if (block->is_set(BlockBegin::backward_branch_target_flag)) {
 202     align_backward_branch_target(); 
 203   }
 204 
 205   // if this block is the start of an exception handler, record the
 206   // PC offset of the first instruction for later construction of
 207   // the ExceptionHandlerTable
 208   if (block->is_set(BlockBegin::exception_entry_flag)) {
 209     block->set_exception_handler_pco(code_offset());
 210   }
 211 
 212 #ifndef PRODUCT
 213   if (PrintLIRWithAssembly) {
 214     // don't print Phi's
 215     InstructionPrinter ip(false);
 216     block->print(ip);
 217   }
 218 #endif /* PRODUCT */
 219 
 220   assert(block->lir() != NULL, "must have LIR");
 221   IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 222 
 223 #ifndef PRODUCT
 224   if (CommentedAssembly) {
 225     stringStream st;
 226     st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci());
 227     _masm->block_comment(st.as_string());
 228   }
 229 #endif
 230 
 231   emit_lir_list(block->lir());
 232 
 233   IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 234 }
 235 
 236 
 237 void LIR_Assembler::emit_lir_list(LIR_List* list) {
 238   peephole(list);
 239 
 240   int n = list->length();
 241   for (int i = 0; i < n; i++) {
 242     LIR_Op* op = list->at(i);
 243 
 244     check_codespace();
 245     CHECK_BAILOUT();
 246 
 247 #ifndef PRODUCT
 248     if (CommentedAssembly) {
 249       // Don't record out every op since that's too verbose.  Print
 250       // branches since they include block and stub names.  Also print
 251       // patching moves since they generate funny looking code.
 252       if (op->code() == lir_branch ||
 253           (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {


 420   }
 421 
 422   // emit the static call stub stuff out of line
 423   emit_static_call_stub();
 424 
 425   switch (op->code()) {
 426   case lir_static_call:  
 427     call(op->addr(), relocInfo::static_call_type, op->info());
 428     break;
 429   case lir_optvirtual_call: 
 430     call(op->addr(), relocInfo::opt_virtual_call_type, op->info());
 431     break;
 432   case lir_icvirtual_call:
 433     ic_call(op->addr(), op->info());
 434     break;
 435   case lir_virtual_call:
 436     vtable_call(op->vtable_offset(), op->info());
 437     break;
 438   default: ShouldNotReachHere();
 439   }
 440 #if defined(IA32) && defined(TIERED)
 441   // C2 leave fpu stack dirty clean it
 442   if (UseSSE < 2) {
 443     int i;
 444     for ( i = 1; i <= 7 ; i++ ) {
 445       ffree(i);
 446     }
 447     if (!op->result_opr()->is_float_kind()) {
 448       ffree(0);
 449     }
 450   }
 451 #endif // IA32 && TIERED
 452 }
 453 
 454 
 455 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
 456   _masm->bind (*(op->label()));
 457 }
 458 
 459 
 460 void LIR_Assembler::emit_op1(LIR_Op1* op) {
 461   switch (op->code()) {
 462     case lir_move:   
 463       if (op->move_kind() == lir_move_volatile) {
 464         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
 465         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
 466       } else {
 467         move_op(op->in_opr(), op->result_opr(), op->type(),
 468                 op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
 469       }
 470       break;
 471 


   1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)c1_LIRAssembler.cpp  1.135 07/07/02 16:50:41 JVM"
   3 #endif
   4 /*
   5  * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  


  60       case Bytecodes::_checkcast:
  61       case Bytecodes::_ldc:
  62       case Bytecodes::_ldc_w:
  63         break;
  64       default:
  65         ShouldNotReachHere();
  66     }
  67   } else {
  68     ShouldNotReachHere();
  69   }
  70 #endif
  71 }
  72 
  73 
  74 //---------------------------------------------------------------
  75 
  76 
  77 LIR_Assembler::LIR_Assembler(Compilation* c): 
  78    _compilation(c)
  79  , _masm(c->masm())
  80  , _bs(Universe::heap()->barrier_set())
  81  , _frame_map(c->frame_map())
  82  , _current_block(NULL)
  83  , _pending_non_safepoint(NULL)
  84  , _pending_non_safepoint_offset(0)
  85 {
  86   _slow_case_stubs = new CodeStubList();
  87 }
  88 
  89 
  90 LIR_Assembler::~LIR_Assembler() {
  91 }
  92 
  93 
  94 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
  95   _slow_case_stubs->append(stub);
  96 }
  97 
  98 
  99 void LIR_Assembler::check_codespace() {
 100   CodeSection* cs = _masm->code_section();


 202   if (block->is_set(BlockBegin::backward_branch_target_flag)) {
 203     align_backward_branch_target(); 
 204   }
 205 
 206   // if this block is the start of an exception handler, record the
 207   // PC offset of the first instruction for later construction of
 208   // the ExceptionHandlerTable
 209   if (block->is_set(BlockBegin::exception_entry_flag)) {
 210     block->set_exception_handler_pco(code_offset());
 211   }
 212 
 213 #ifndef PRODUCT
 214   if (PrintLIRWithAssembly) {
 215     // don't print Phi's
 216     InstructionPrinter ip(false);
 217     block->print(ip);
 218   }
 219 #endif /* PRODUCT */
 220 
 221   assert(block->lir() != NULL, "must have LIR");
 222   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 223 
 224 #ifndef PRODUCT
 225   if (CommentedAssembly) {
 226     stringStream st;
 227     st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci());
 228     _masm->block_comment(st.as_string());
 229   }
 230 #endif
 231 
 232   emit_lir_list(block->lir());
 233 
 234   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 235 }
 236 
 237 
 238 void LIR_Assembler::emit_lir_list(LIR_List* list) {
 239   peephole(list);
 240 
 241   int n = list->length();
 242   for (int i = 0; i < n; i++) {
 243     LIR_Op* op = list->at(i);
 244 
 245     check_codespace();
 246     CHECK_BAILOUT();
 247 
 248 #ifndef PRODUCT
 249     if (CommentedAssembly) {
 250       // Don't record out every op since that's too verbose.  Print
 251       // branches since they include block and stub names.  Also print
 252       // patching moves since they generate funny looking code.
 253       if (op->code() == lir_branch ||
 254           (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {


 421   }
 422 
 423   // emit the static call stub stuff out of line
 424   emit_static_call_stub();
 425 
 426   switch (op->code()) {
 427   case lir_static_call:  
 428     call(op->addr(), relocInfo::static_call_type, op->info());
 429     break;
 430   case lir_optvirtual_call: 
 431     call(op->addr(), relocInfo::opt_virtual_call_type, op->info());
 432     break;
 433   case lir_icvirtual_call:
 434     ic_call(op->addr(), op->info());
 435     break;
 436   case lir_virtual_call:
 437     vtable_call(op->vtable_offset(), op->info());
 438     break;
 439   default: ShouldNotReachHere();
 440   }
 441 #if defined(X86) && defined(TIERED)
 442   // C2 leave fpu stack dirty clean it
 443   if (UseSSE < 2) {
 444     int i;
 445     for ( i = 1; i <= 7 ; i++ ) {
 446       ffree(i);
 447     }
 448     if (!op->result_opr()->is_float_kind()) {
 449       ffree(0);
 450     }
 451   }
 452 #endif // X86 && TIERED
 453 }
 454 
 455 
 456 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
 457   _masm->bind (*(op->label()));
 458 }
 459 
 460 
 461 void LIR_Assembler::emit_op1(LIR_Op1* op) {
 462   switch (op->code()) {
 463     case lir_move:   
 464       if (op->move_kind() == lir_move_volatile) {
 465         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
 466         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
 467       } else {
 468         move_op(op->in_opr(), op->result_opr(), op->type(),
 469                 op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
 470       }
 471       break;
 472