< prev index next >

src/cpu/sparc/vm/icBuffer_sparc.cpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/collectedHeap.inline.hpp"
  29 #include "interpreter/bytecodes.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "nativeInst_sparc.hpp"
  32 #include "oops/oop.inline.hpp"
  33 
  34 int InlineCacheBuffer::ic_stub_code_size() {
  35 #ifdef _LP64
  36   return (NativeMovConstReg::instruction_size +  // sethi;add
  37           NativeJump::instruction_size +          // sethi; jmp; delay slot
  38           (1*BytesPerInstWord) + 1);            // flush + 1 extra byte
  39 #else
  40   return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
  41 #endif
  42 }
  43 
  44 void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
  45   ResourceMark rm;
  46   CodeBuffer     code(code_begin, ic_stub_code_size());
  47   MacroAssembler* masm            = new MacroAssembler(&code);
  48   // note: even though the code contains an embedded metadata, we do not need reloc info
  49   // because
  50   // (1) the metadata is old (i.e., doesn't matter for scavenges)
  51   // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
  52   AddressLiteral cached_value_addrlit((address)cached_value, relocInfo::none);
  53   // Force the set to generate the fixed sequence so next_instruction_address works
  54   masm->patchable_set(cached_value_addrlit, G5_inline_cache_reg);
  55   assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub");
  56   assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub");
  57   AddressLiteral entry(entry_point);
  58   masm->JUMP(entry, G3_scratch, 0);
  59   masm->delayed()->nop();
  60   masm->flush();
  61 }
   1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/collectedHeap.inline.hpp"
  29 #include "interpreter/bytecodes.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "nativeInst_sparc.hpp"
  32 #include "oops/oop.inline.hpp"
  33 
  34 int InlineCacheBuffer::ic_stub_code_size() {

  35   return (NativeMovConstReg::instruction_size +  // sethi;add
  36           NativeJump::instruction_size +          // sethi; jmp; delay slot
  37           (1*BytesPerInstWord) + 1);            // flush + 1 extra byte



  38 }
  39 
  40 void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
  41   ResourceMark rm;
  42   CodeBuffer     code(code_begin, ic_stub_code_size());
  43   MacroAssembler* masm            = new MacroAssembler(&code);
  44   // note: even though the code contains an embedded metadata, we do not need reloc info
  45   // because
  46   // (1) the metadata is old (i.e., doesn't matter for scavenges)
  47   // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
  48   AddressLiteral cached_value_addrlit((address)cached_value, relocInfo::none);
  49   // Force the set to generate the fixed sequence so next_instruction_address works
  50   masm->patchable_set(cached_value_addrlit, G5_inline_cache_reg);
  51   assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub");
  52   assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub");
  53   AddressLiteral entry(entry_point);
  54   masm->JUMP(entry, G3_scratch, 0);
  55   masm->delayed()->nop();
  56   masm->flush();
  57 }
< prev index next >