1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2015-2018, Azul Systems, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_Compiler.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_GraphBuilder.hpp"
  31 #include "c1/c1_LinearScan.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueType.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "interpreter/linkResolver.hpp"
  37 #include "jfr/support/jfrIntrinsics.hpp"
  38 #include "memory/allocation.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "prims/nativeLookup.hpp"
  42 #include "runtime/arguments.hpp"
  43 #include "runtime/interfaceSupport.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/bitMap.inline.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 
  49 Compiler::Compiler() : AbstractCompiler(compiler_c1) {
  50 }
  51 
  52 void Compiler::init_c1_runtime() {
  53   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
  54   Arena* arena = new (mtCompiler) Arena(mtCompiler);
  55   Runtime1::initialize(buffer_blob);
  56   FrameMap::initialize();
  57   // initialize data structures
  58   ValueType::initialize(arena);
  59   GraphBuilder::initialize();
  60   // note: to use more than one instance of LinearScan at a time this function call has to
  61   //       be moved somewhere outside of this constructor:
  62   Interval::initialize(arena);
  63 }
  64 
  65 
  66 void Compiler::initialize() {
  67   // Buffer blob must be allocated per C1 compiler thread at startup
  68   BufferBlob* buffer_blob = init_buffer_blob();
  69 
  70   if (should_perform_init()) {
  71     if (buffer_blob == NULL) {
  72       // When we come here we are in state 'initializing'; entire C1 compilation
  73       // can be shut down.
  74       set_state(failed);
  75     } else {
  76       init_c1_runtime();
  77       set_state(initialized);
  78     }
  79   }
  80 }
  81 
  82 int Compiler::code_buffer_size() {
  83   assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
  84   return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
  85 }
  86 
  87 BufferBlob* Compiler::init_buffer_blob() {
  88   // Allocate buffer blob once at startup since allocation for each
  89   // compilation seems to be too expensive (at least on Intel win32).
  90   assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once");
  91 
  92   // setup CodeBuffer.  Preallocate a BufferBlob of size
  93   // NMethodSizeLimit plus some extra space for constants.
  94   int code_buffer_size = Compilation::desired_max_code_buffer_size() +
  95     Compilation::desired_max_constant_size();
  96 
  97   BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size);
  98   if (buffer_blob != NULL) {
  99     CompilerThread::current()->set_buffer_blob(buffer_blob);
 100   }
 101 
 102   return buffer_blob;
 103 }
 104 
 105 bool Compiler::is_intrinsic_supported(const methodHandle& method) {
 106   vmIntrinsics::ID id = method->intrinsic_id();
 107   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 108 
 109   if (method->is_synchronized()) {
 110     // C1 does not support intrinsification of synchronized methods.
 111     return false;
 112   }
 113 
 114   switch (id) {
 115   case vmIntrinsics::_compareAndSetLong:
 116     if (!VM_Version::supports_cx8()) return false;
 117     break;
 118   case vmIntrinsics::_getAndAddInt:
 119     if (!VM_Version::supports_atomic_getadd4()) return false;
 120     break;
 121   case vmIntrinsics::_getAndAddLong:
 122     if (!VM_Version::supports_atomic_getadd8()) return false;
 123     break;
 124   case vmIntrinsics::_getAndSetInt:
 125     if (!VM_Version::supports_atomic_getset4()) return false;
 126     break;
 127   case vmIntrinsics::_getAndSetLong:
 128     if (!VM_Version::supports_atomic_getset8()) return false;
 129     break;
 130   case vmIntrinsics::_getAndSetObject:
 131 #ifdef _LP64
 132     if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) return false;
 133     if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) return false;
 134 #else
 135     if (!VM_Version::supports_atomic_getset4()) return false;
 136 #endif
 137     break;
 138   case vmIntrinsics::_onSpinWait:
 139     if (!VM_Version::supports_on_spin_wait()) return false;
 140     break;
 141   case vmIntrinsics::_arraycopy:
 142   case vmIntrinsics::_currentTimeMillis:
 143   case vmIntrinsics::_nanoTime:
 144   case vmIntrinsics::_Reference_get:
 145     // Use the intrinsic version of Reference.get() so that the value in
 146     // the referent field can be registered by the G1 pre-barrier code.
 147     // Also to prevent commoning reads from this field across safepoint
 148     // since GC can change its value.
 149   case vmIntrinsics::_loadFence:
 150   case vmIntrinsics::_storeFence:
 151   case vmIntrinsics::_fullFence:
 152   case vmIntrinsics::_floatToRawIntBits:
 153   case vmIntrinsics::_intBitsToFloat:
 154   case vmIntrinsics::_doubleToRawLongBits:
 155   case vmIntrinsics::_longBitsToDouble:
 156   case vmIntrinsics::_getClass:
 157   case vmIntrinsics::_isInstance:
 158   case vmIntrinsics::_isPrimitive:
 159   case vmIntrinsics::_currentThread:
 160   case vmIntrinsics::_dabs:
 161   case vmIntrinsics::_dsqrt:
 162   case vmIntrinsics::_dsin:
 163   case vmIntrinsics::_dcos:
 164   case vmIntrinsics::_dtan:
 165   case vmIntrinsics::_dlog:
 166   case vmIntrinsics::_dlog10:
 167   case vmIntrinsics::_dexp:
 168   case vmIntrinsics::_dpow:
 169   case vmIntrinsics::_fmaD:
 170   case vmIntrinsics::_fmaF:
 171   case vmIntrinsics::_getObject:
 172   case vmIntrinsics::_getBoolean:
 173   case vmIntrinsics::_getByte:
 174   case vmIntrinsics::_getShort:
 175   case vmIntrinsics::_getChar:
 176   case vmIntrinsics::_getInt:
 177   case vmIntrinsics::_getLong:
 178   case vmIntrinsics::_getFloat:
 179   case vmIntrinsics::_getDouble:
 180   case vmIntrinsics::_putObject:
 181   case vmIntrinsics::_putBoolean:
 182   case vmIntrinsics::_putByte:
 183   case vmIntrinsics::_putShort:
 184   case vmIntrinsics::_putChar:
 185   case vmIntrinsics::_putInt:
 186   case vmIntrinsics::_putLong:
 187   case vmIntrinsics::_putFloat:
 188   case vmIntrinsics::_putDouble:
 189   case vmIntrinsics::_getObjectVolatile:
 190   case vmIntrinsics::_getBooleanVolatile:
 191   case vmIntrinsics::_getByteVolatile:
 192   case vmIntrinsics::_getShortVolatile:
 193   case vmIntrinsics::_getCharVolatile:
 194   case vmIntrinsics::_getIntVolatile:
 195   case vmIntrinsics::_getLongVolatile:
 196   case vmIntrinsics::_getFloatVolatile:
 197   case vmIntrinsics::_getDoubleVolatile:
 198   case vmIntrinsics::_putObjectVolatile:
 199   case vmIntrinsics::_putBooleanVolatile:
 200   case vmIntrinsics::_putByteVolatile:
 201   case vmIntrinsics::_putShortVolatile:
 202   case vmIntrinsics::_putCharVolatile:
 203   case vmIntrinsics::_putIntVolatile:
 204   case vmIntrinsics::_putLongVolatile:
 205   case vmIntrinsics::_putFloatVolatile:
 206   case vmIntrinsics::_putDoubleVolatile:
 207   case vmIntrinsics::_getShortUnaligned:
 208   case vmIntrinsics::_getCharUnaligned:
 209   case vmIntrinsics::_getIntUnaligned:
 210   case vmIntrinsics::_getLongUnaligned:
 211   case vmIntrinsics::_putShortUnaligned:
 212   case vmIntrinsics::_putCharUnaligned:
 213   case vmIntrinsics::_putIntUnaligned:
 214   case vmIntrinsics::_putLongUnaligned:
 215   case vmIntrinsics::_checkIndex:
 216   case vmIntrinsics::_updateCRC32:
 217   case vmIntrinsics::_updateBytesCRC32:
 218   case vmIntrinsics::_updateByteBufferCRC32:
 219 #if defined(SPARC) || defined(S390) || defined(PPC64) || defined(AARCH64) || defined(AARCH32)
 220   case vmIntrinsics::_updateBytesCRC32C:
 221   case vmIntrinsics::_updateDirectByteBufferCRC32C:
 222 #endif
 223 #ifdef AARCH32
 224   case vmIntrinsics::_aescrypt_encryptBlock:
 225   case vmIntrinsics::_aescrypt_decryptBlock:
 226   case vmIntrinsics::_sha_implCompress:
 227   case vmIntrinsics::_sha2_implCompress:
 228   case vmIntrinsics::_sha5_implCompress:
 229   case vmIntrinsics::_montgomeryMultiply:
 230   case vmIntrinsics::_montgomerySquare:
 231 #endif
 232   case vmIntrinsics::_vectorizedMismatch:
 233   case vmIntrinsics::_compareAndSetInt:
 234   case vmIntrinsics::_compareAndSetObject:
 235   case vmIntrinsics::_getCharStringU:
 236   case vmIntrinsics::_putCharStringU:
 237 #ifdef JFR_HAVE_INTRINSICS
 238   case vmIntrinsics::_counterTime:
 239   case vmIntrinsics::_getEventWriter:
 240 #if defined(_LP64) || !defined(TRACE_ID_SHIFT)
 241   case vmIntrinsics::_getClassId:
 242 #endif
 243 #endif
 244     break;
 245   default:
 246     return false; // Intrinsics not on the previous list are not available.
 247   }
 248 
 249   return true;
 250 }
 251 
 252 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci, DirectiveSet* directive) {
 253   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
 254   assert(buffer_blob != NULL, "Must exist");
 255   // invoke compilation
 256   {
 257     // We are nested here because we need for the destructor
 258     // of Compilation to occur before we release the any
 259     // competing compiler thread
 260     ResourceMark rm;
 261     Compilation c(this, env, method, entry_bci, buffer_blob, directive);
 262   }
 263 }
 264 
 265 
 266 void Compiler::print_timers() {
 267   Compilation::print_timers();
 268 }