1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "asm/assembler.inline.hpp"
  28 #include "asm/codeBuffer.hpp"
  29 #include "runtime/icache.hpp"
  30 #include "runtime/os.hpp"
  31 #ifdef TARGET_ARCH_x86
  32 # include "assembler_x86.inline.hpp"
  33 #endif
  34 #ifdef TARGET_ARCH_sparc
  35 # include "assembler_sparc.inline.hpp"
  36 #endif
  37 #ifdef TARGET_ARCH_zero
  38 # include "assembler_zero.inline.hpp"
  39 #endif
  40 #ifdef TARGET_ARCH_arm
  41 # include "assembler_arm.inline.hpp"
  42 #endif
  43 #ifdef TARGET_ARCH_ppc
  44 # include "assembler_ppc.inline.hpp"
  45 #endif
  46 
  47 
  48 // Implementation of AbstractAssembler
  49 //
  50 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
  51 // the assembler keeps a copy of the code buffers boundaries & modifies them when
  52 // emitting bytes rather than using the code buffers accessor functions all the time.
  53 // The code buffer is updated via set_code_end(...) after emitting a whole instruction.
  54 
  55 AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
  56   if (code == NULL)  return;
  57   CodeSection* cs = code->insts();
  58   cs->clear_mark();   // new assembler kills old mark
  59   _code_section = cs;
  60   _code_begin  = cs->start();
  61   _code_limit  = cs->limit();
  62   _code_pos    = cs->end();
  63   _oop_recorder= code->oop_recorder();
  64   if (_code_begin == NULL)  {
  65     vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
  66                                      code->name()));
  67   }
  68 }
  69 
  70 void AbstractAssembler::set_code_section(CodeSection* cs) {
  71   assert(cs->outer() == code_section()->outer(), "sanity");
  72   assert(cs->is_allocated(), "need to pre-allocate this section");
  73   cs->clear_mark();  // new assembly into this section kills old mark
  74   _code_section = cs;
  75   _code_begin  = cs->start();
  76   _code_limit  = cs->limit();
  77   _code_pos    = cs->end();
  78 }
  79 
  80 // Inform CodeBuffer that incoming code and relocation will be for stubs
  81 address AbstractAssembler::start_a_stub(int required_space) {
  82   CodeBuffer*  cb = code();
  83   CodeSection* cs = cb->stubs();
  84   assert(_code_section == cb->insts(), "not in insts?");
  85   sync();
  86   if (cs->maybe_expand_to_ensure_remaining(required_space)
  87       && cb->blob() == NULL) {
  88     return NULL;
  89   }
  90   set_code_section(cs);
  91   return pc();
  92 }
  93 
  94 // Inform CodeBuffer that incoming code and relocation will be code
  95 // Should not be called if start_a_stub() returned NULL
  96 void AbstractAssembler::end_a_stub() {
  97   assert(_code_section == code()->stubs(), "not in stubs?");
  98   sync();
  99   set_code_section(code()->insts());
 100 }
 101 
 102 // Inform CodeBuffer that incoming code and relocation will be for stubs
 103 address AbstractAssembler::start_a_const(int required_space, int required_align) {
 104   CodeBuffer*  cb = code();
 105   CodeSection* cs = cb->consts();
 106   assert(_code_section == cb->insts(), "not in insts?");
 107   sync();
 108   address end = cs->end();
 109   int pad = -(intptr_t)end & (required_align-1);
 110   if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
 111     if (cb->blob() == NULL)  return NULL;
 112     end = cs->end();  // refresh pointer
 113   }
 114   if (pad > 0) {
 115     while (--pad >= 0) { *end++ = 0; }
 116     cs->set_end(end);
 117   }
 118   set_code_section(cs);
 119   return end;
 120 }
 121 
 122 // Inform CodeBuffer that incoming code and relocation will be code
 123 // Should not be called if start_a_const() returned NULL
 124 void AbstractAssembler::end_a_const() {
 125   assert(_code_section == code()->consts(), "not in consts?");
 126   sync();
 127   set_code_section(code()->insts());
 128 }
 129 
 130 
 131 void AbstractAssembler::flush() {
 132   sync();
 133   ICache::invalidate_range(addr_at(0), offset());
 134 }
 135 
 136 
 137 void AbstractAssembler::a_byte(int x) {
 138   emit_byte(x);
 139 }
 140 
 141 
 142 void AbstractAssembler::a_long(jint x) {
 143   emit_long(x);
 144 }
 145 
 146 // Labels refer to positions in the (to be) generated code.  There are bound
 147 // and unbound
 148 //
 149 // Bound labels refer to known positions in the already generated code.
 150 // offset() is the position the label refers to.
 151 //
 152 // Unbound labels refer to unknown positions in the code to be generated; it
 153 // may contain a list of unresolved displacements that refer to it
 154 #ifndef PRODUCT
 155 void AbstractAssembler::print(Label& L) {
 156   if (L.is_bound()) {
 157     tty->print_cr("bound label to %d|%d", L.loc_pos(), L.loc_sect());
 158   } else if (L.is_unbound()) {
 159     L.print_instructions((MacroAssembler*)this);
 160   } else {
 161     tty->print_cr("label in inconsistent state (loc = %d)", L.loc());
 162   }
 163 }
 164 #endif // PRODUCT
 165 
 166 
 167 void AbstractAssembler::bind(Label& L) {
 168   if (L.is_bound()) {
 169     // Assembler can bind a label more than once to the same place.
 170     guarantee(L.loc() == locator(), "attempt to redefine label");
 171     return;
 172   }
 173   L.bind_loc(locator());
 174   L.patch_instructions((MacroAssembler*)this);
 175 }
 176 
 177 void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) {
 178   if (UseStackBanging) {
 179     // Each code entry causes one stack bang n pages down the stack where n
 180     // is configurable by StackBangPages.  The setting depends on the maximum
 181     // depth of VM call stack or native before going back into java code,
 182     // since only java code can raise a stack overflow exception using the
 183     // stack banging mechanism.  The VM and native code does not detect stack
 184     // overflow.
 185     // The code in JavaCalls::call() checks that there is at least n pages
 186     // available, so all entry code needs to do is bang once for the end of
 187     // this shadow zone.
 188     // The entry code may need to bang additional pages if the framesize
 189     // is greater than a page.
 190 
 191     const int page_size = os::vm_page_size();
 192     int bang_end = StackShadowPages*page_size;
 193 
 194     // This is how far the previous frame's stack banging extended.
 195     const int bang_end_safe = bang_end;
 196 
 197     if (frame_size_in_bytes > page_size) {
 198       bang_end += frame_size_in_bytes;
 199     }
 200 
 201     int bang_offset = bang_end_safe;
 202     while (bang_offset <= bang_end) {
 203       // Need at least one stack bang at end of shadow zone.
 204       bang_stack_with_offset(bang_offset);
 205       bang_offset += page_size;
 206     }
 207   } // end (UseStackBanging)
 208 }
 209 
 210 void Label::add_patch_at(CodeBuffer* cb, int branch_loc) {
 211   assert(_loc == -1, "Label is unbound");
 212   if (_patch_index < PatchCacheSize) {
 213     _patches[_patch_index] = branch_loc;
 214   } else {
 215     if (_patch_overflow == NULL) {
 216       _patch_overflow = cb->create_patch_overflow();
 217     }
 218     _patch_overflow->push(branch_loc);
 219   }
 220   ++_patch_index;
 221 }
 222 
 223 void Label::patch_instructions(MacroAssembler* masm) {
 224   assert(is_bound(), "Label is bound");
 225   CodeBuffer* cb = masm->code();
 226   int target_sect = CodeBuffer::locator_sect(loc());
 227   address target = cb->locator_address(loc());
 228   while (_patch_index > 0) {
 229     --_patch_index;
 230     int branch_loc;
 231     if (_patch_index >= PatchCacheSize) {
 232       branch_loc = _patch_overflow->pop();
 233     } else {
 234       branch_loc = _patches[_patch_index];
 235     }
 236     int branch_sect = CodeBuffer::locator_sect(branch_loc);
 237     address branch = cb->locator_address(branch_loc);
 238     if (branch_sect == CodeBuffer::SECT_CONSTS) {
 239       // The thing to patch is a constant word.
 240       *(address*)branch = target;
 241       continue;
 242     }
 243 
 244 #ifdef ASSERT
 245     // Cross-section branches only work if the
 246     // intermediate section boundaries are frozen.
 247     if (target_sect != branch_sect) {
 248       for (int n = MIN2(target_sect, branch_sect),
 249                nlimit = (target_sect + branch_sect) - n;
 250            n < nlimit; n++) {
 251         CodeSection* cs = cb->code_section(n);
 252         assert(cs->is_frozen(), "cross-section branch needs stable offsets");
 253       }
 254     }
 255 #endif //ASSERT
 256 
 257     // Push the target offset into the branch instruction.
 258     masm->pd_patch_instruction(branch, target);
 259   }
 260 }
 261 
 262 struct DelayedConstant {
 263   typedef void (*value_fn_t)();
 264   BasicType type;
 265   intptr_t value;
 266   value_fn_t value_fn;
 267   // This limit of 20 is generous for initial uses.
 268   // The limit needs to be large enough to store the field offsets
 269   // into classes which do not have statically fixed layouts.
 270   // (Initial use is for method handle object offsets.)
 271   // Look for uses of "delayed_value" in the source code
 272   // and make sure this number is generous enough to handle all of them.
 273   enum { DC_LIMIT = 20 };
 274   static DelayedConstant delayed_constants[DC_LIMIT];
 275   static DelayedConstant* add(BasicType type, value_fn_t value_fn);
 276   bool match(BasicType t, value_fn_t cfn) {
 277     return type == t && value_fn == cfn;
 278   }
 279   static void update_all();
 280 };
 281 
 282 DelayedConstant DelayedConstant::delayed_constants[DC_LIMIT];
 283 // Default C structure initialization rules have the following effect here:
 284 // = { { (BasicType)0, (intptr_t)NULL }, ... };
 285 
 286 DelayedConstant* DelayedConstant::add(BasicType type,
 287                                       DelayedConstant::value_fn_t cfn) {
 288   for (int i = 0; i < DC_LIMIT; i++) {
 289     DelayedConstant* dcon = &delayed_constants[i];
 290     if (dcon->match(type, cfn))
 291       return dcon;
 292     if (dcon->value_fn == NULL) {
 293       // (cmpxchg not because this is multi-threaded but because I'm paranoid)
 294       if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) {
 295         dcon->type = type;
 296         return dcon;
 297       }
 298     }
 299   }
 300   // If this assert is hit (in pre-integration testing!) then re-evaluate
 301   // the comment on the definition of DC_LIMIT.
 302   guarantee(false, "too many delayed constants");
 303   return NULL;
 304 }
 305 
 306 void DelayedConstant::update_all() {
 307   for (int i = 0; i < DC_LIMIT; i++) {
 308     DelayedConstant* dcon = &delayed_constants[i];
 309     if (dcon->value_fn != NULL && dcon->value == 0) {
 310       typedef int     (*int_fn_t)();
 311       typedef address (*address_fn_t)();
 312       switch (dcon->type) {
 313       case T_INT:     dcon->value = (intptr_t) ((int_fn_t)    dcon->value_fn)(); break;
 314       case T_ADDRESS: dcon->value = (intptr_t) ((address_fn_t)dcon->value_fn)(); break;
 315       }
 316     }
 317   }
 318 }
 319 
 320 intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) {
 321   DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn);
 322   return &dcon->value;
 323 }
 324 intptr_t* AbstractAssembler::delayed_value_addr(address(*value_fn)()) {
 325   DelayedConstant* dcon = DelayedConstant::add(T_ADDRESS, (DelayedConstant::value_fn_t) value_fn);
 326   return &dcon->value;
 327 }
 328 void AbstractAssembler::update_delayed_values() {
 329   DelayedConstant::update_all();
 330 }
 331 
 332 
 333 
 334 
 335 void AbstractAssembler::block_comment(const char* comment) {
 336   if (sect() == CodeBuffer::SECT_INSTS) {
 337     code_section()->outer()->block_comment(offset(), comment);
 338   }
 339 }
 340 
 341 bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
 342   // Exception handler checks the nmethod's implicit null checks table
 343   // only when this method returns false.
 344 #ifdef _LP64
 345   if (UseCompressedOops && Universe::narrow_oop_base() != NULL) {
 346     assert (Universe::heap() != NULL, "java heap should be initialized");
 347     // The first page after heap_base is unmapped and
 348     // the 'offset' is equal to [heap_base + offset] for
 349     // narrow oop implicit null checks.
 350     uintptr_t base = (uintptr_t)Universe::narrow_oop_base();
 351     if ((uintptr_t)offset >= base) {
 352       // Normalize offset for the next check.
 353       offset = (intptr_t)(pointer_delta((void*)offset, (void*)base, 1));
 354     }
 355   }
 356 #endif
 357   return offset < 0 || os::vm_page_size() <= offset;
 358 }
 359 
 360 #ifndef PRODUCT
 361 void Label::print_instructions(MacroAssembler* masm) const {
 362   CodeBuffer* cb = masm->code();
 363   for (int i = 0; i < _patch_index; ++i) {
 364     int branch_loc;
 365     if (i >= PatchCacheSize) {
 366       branch_loc = _patch_overflow->at(i - PatchCacheSize);
 367     } else {
 368       branch_loc = _patches[i];
 369     }
 370     int branch_pos  = CodeBuffer::locator_pos(branch_loc);
 371     int branch_sect = CodeBuffer::locator_sect(branch_loc);
 372     address branch = cb->locator_address(branch_loc);
 373     tty->print_cr("unbound label");
 374     tty->print("@ %d|%d ", branch_pos, branch_sect);
 375     if (branch_sect == CodeBuffer::SECT_CONSTS) {
 376       tty->print_cr(PTR_FORMAT, *(address*)branch);
 377       continue;
 378     }
 379     masm->pd_print_patched_instruction(branch);
 380     tty->cr();
 381   }
 382 }
 383 #endif // ndef PRODUCT