1 /* 2 * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_Compiler.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_GraphBuilder.hpp" 30 #include "c1/c1_LinearScan.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueType.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/compilerOracle.hpp" 36 #include "interpreter/linkResolver.hpp" 37 #include "memory/allocation.hpp" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "prims/nativeLookup.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/interfaceSupport.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 45 46 Compiler::Compiler() : AbstractCompiler(c1) { 47 } 48 49 void Compiler::init_c1_runtime() { 50 BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); 51 Arena* arena = new (mtCompiler) Arena(mtCompiler); 52 Runtime1::initialize(buffer_blob); 53 FrameMap::initialize(); 54 // initialize data structures 55 ValueType::initialize(arena); 56 GraphBuilder::initialize(); 57 // note: to use more than one instance of LinearScan at a time this function call has to 58 // be moved somewhere outside of this constructor: 59 Interval::initialize(arena); 60 } 61 62 63 void Compiler::initialize() { 64 // Buffer blob must be allocated per C1 compiler thread at startup 65 BufferBlob* buffer_blob = init_buffer_blob(); 66 67 if (should_perform_init()) { 68 if (buffer_blob == NULL) { 69 // When we come here we are in state 'initializing'; entire C1 compilation 70 // can be shut down. 71 set_state(failed); 72 } else { 73 init_c1_runtime(); 74 set_state(initialized); 75 } 76 } 77 } 78 79 int Compiler::code_buffer_size() { 80 assert(SegmentedCodeCache, "Should be only used with a segmented code cache"); 81 return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size(); 82 } 83 84 BufferBlob* Compiler::init_buffer_blob() { 85 // Allocate buffer blob once at startup since allocation for each 86 // compilation seems to be too expensive (at least on Intel win32). 87 assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once"); 88 89 // setup CodeBuffer. Preallocate a BufferBlob of size 90 // NMethodSizeLimit plus some extra space for constants. 91 int code_buffer_size = Compilation::desired_max_code_buffer_size() + 92 Compilation::desired_max_constant_size(); 93 94 BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size); 95 if (buffer_blob != NULL) { 96 CompilerThread::current()->set_buffer_blob(buffer_blob); 97 } 98 99 return buffer_blob; 100 } 101 102 bool Compiler::is_intrinsic_supported(methodHandle method) { 103 vmIntrinsics::ID id = method->intrinsic_id(); 104 assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); 105 106 if (method->is_synchronized()) { 107 // C1 does not support intrinsification of synchronized methods. 108 return false; 109 } 110 111 switch (id) { 112 case vmIntrinsics::_compareAndSwapLong: 113 if (!VM_Version::supports_cx8()) return false; 114 break; 115 case vmIntrinsics::_getAndAddInt: 116 if (!VM_Version::supports_atomic_getadd4()) return false; 117 break; 118 case vmIntrinsics::_getAndAddLong: 119 if (!VM_Version::supports_atomic_getadd8()) return false; 120 break; 121 case vmIntrinsics::_getAndSetInt: 122 if (!VM_Version::supports_atomic_getset4()) return false; 123 break; 124 case vmIntrinsics::_getAndSetLong: 125 if (!VM_Version::supports_atomic_getset8()) return false; 126 break; 127 case vmIntrinsics::_getAndSetObject: 128 #ifdef _LP64 129 if (!UseCompressedOops && !VM_Version::supports_atomic_getset8()) return false; 130 if (UseCompressedOops && !VM_Version::supports_atomic_getset4()) return false; 131 #else 132 if (!VM_Version::supports_atomic_getset4()) return false; 133 #endif 134 break; 135 case vmIntrinsics::_arraycopy: 136 case vmIntrinsics::_currentTimeMillis: 137 case vmIntrinsics::_nanoTime: 138 case vmIntrinsics::_Reference_get: 139 // Use the intrinsic version of Reference.get() so that the value in 140 // the referent field can be registered by the G1 pre-barrier code. 141 // Also to prevent commoning reads from this field across safepoint 142 // since GC can change its value. 143 case vmIntrinsics::_loadFence: 144 case vmIntrinsics::_storeFence: 145 case vmIntrinsics::_fullFence: 146 case vmIntrinsics::_floatToRawIntBits: 147 case vmIntrinsics::_intBitsToFloat: 148 case vmIntrinsics::_doubleToRawLongBits: 149 case vmIntrinsics::_longBitsToDouble: 150 case vmIntrinsics::_getClass: 151 case vmIntrinsics::_isInstance: 152 case vmIntrinsics::_currentThread: 153 case vmIntrinsics::_dabs: 154 case vmIntrinsics::_dsqrt: 155 case vmIntrinsics::_dsin: 156 case vmIntrinsics::_dcos: 157 case vmIntrinsics::_dtan: 158 case vmIntrinsics::_dlog: 159 case vmIntrinsics::_dlog10: 160 case vmIntrinsics::_dexp: 161 case vmIntrinsics::_dpow: 162 case vmIntrinsics::_getObject: 163 case vmIntrinsics::_getBoolean: 164 case vmIntrinsics::_getByte: 165 case vmIntrinsics::_getShort: 166 case vmIntrinsics::_getChar: 167 case vmIntrinsics::_getInt: 168 case vmIntrinsics::_getLong: 169 case vmIntrinsics::_getFloat: 170 case vmIntrinsics::_getDouble: 171 case vmIntrinsics::_putObject: 172 case vmIntrinsics::_putBoolean: 173 case vmIntrinsics::_putByte: 174 case vmIntrinsics::_putShort: 175 case vmIntrinsics::_putChar: 176 case vmIntrinsics::_putInt: 177 case vmIntrinsics::_putLong: 178 case vmIntrinsics::_putFloat: 179 case vmIntrinsics::_putDouble: 180 case vmIntrinsics::_getObjectVolatile: 181 case vmIntrinsics::_getBooleanVolatile: 182 case vmIntrinsics::_getByteVolatile: 183 case vmIntrinsics::_getShortVolatile: 184 case vmIntrinsics::_getCharVolatile: 185 case vmIntrinsics::_getIntVolatile: 186 case vmIntrinsics::_getLongVolatile: 187 case vmIntrinsics::_getFloatVolatile: 188 case vmIntrinsics::_getDoubleVolatile: 189 case vmIntrinsics::_putObjectVolatile: 190 case vmIntrinsics::_putBooleanVolatile: 191 case vmIntrinsics::_putByteVolatile: 192 case vmIntrinsics::_putShortVolatile: 193 case vmIntrinsics::_putCharVolatile: 194 case vmIntrinsics::_putIntVolatile: 195 case vmIntrinsics::_putLongVolatile: 196 case vmIntrinsics::_putFloatVolatile: 197 case vmIntrinsics::_putDoubleVolatile: 198 case vmIntrinsics::_getByte_raw: 199 case vmIntrinsics::_getShort_raw: 200 case vmIntrinsics::_getChar_raw: 201 case vmIntrinsics::_getInt_raw: 202 case vmIntrinsics::_getLong_raw: 203 case vmIntrinsics::_getFloat_raw: 204 case vmIntrinsics::_getDouble_raw: 205 case vmIntrinsics::_putByte_raw: 206 case vmIntrinsics::_putShort_raw: 207 case vmIntrinsics::_putChar_raw: 208 case vmIntrinsics::_putInt_raw: 209 case vmIntrinsics::_putLong_raw: 210 case vmIntrinsics::_putFloat_raw: 211 case vmIntrinsics::_putDouble_raw: 212 case vmIntrinsics::_putOrderedObject: 213 case vmIntrinsics::_putOrderedInt: 214 case vmIntrinsics::_putOrderedLong: 215 case vmIntrinsics::_getShortUnaligned: 216 case vmIntrinsics::_getCharUnaligned: 217 case vmIntrinsics::_getIntUnaligned: 218 case vmIntrinsics::_getLongUnaligned: 219 case vmIntrinsics::_putShortUnaligned: 220 case vmIntrinsics::_putCharUnaligned: 221 case vmIntrinsics::_putIntUnaligned: 222 case vmIntrinsics::_putLongUnaligned: 223 case vmIntrinsics::_checkIndex: 224 case vmIntrinsics::_updateCRC32: 225 case vmIntrinsics::_updateBytesCRC32: 226 case vmIntrinsics::_updateByteBufferCRC32: 227 case vmIntrinsics::_compareAndSwapInt: 228 case vmIntrinsics::_compareAndSwapObject: 229 #ifdef TRACE_HAVE_INTRINSICS 230 case vmIntrinsics::_classID: 231 case vmIntrinsics::_threadID: 232 case vmIntrinsics::_counterTime: 233 #endif 234 break; 235 default: 236 return false; // Intrinsics not on the previous list are not available. 237 } 238 239 return true; 240 } 241 242 bool Compiler::is_intrinsic_disabled_by_flag(methodHandle method) { 243 vmIntrinsics::ID id = method->intrinsic_id(); 244 assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); 245 246 if (!InlineNatives && id != vmIntrinsics::_Reference_get) { 247 return true; 248 } 249 250 if (!InlineClassNatives && id == vmIntrinsics::_getClass) { 251 return true; 252 } 253 254 return false; 255 } 256 257 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) { 258 BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); 259 assert(buffer_blob != NULL, "Must exist"); 260 // invoke compilation 261 { 262 // We are nested here because we need for the destructor 263 // of Compilation to occur before we release the any 264 // competing compiler thread 265 ResourceMark rm; 266 Compilation c(this, env, method, entry_bci, buffer_blob); 267 } 268 } 269 270 271 void Compiler::print_timers() { 272 Compilation::print_timers(); 273 } 274 275 bool Compiler::is_intrinsic_available(methodHandle method) { 276 return is_intrinsic_supported(method) && 277 !vmIntrinsics::is_disabled_by_flags(method->intrinsic_id()) && 278 !is_intrinsic_disabled_by_flag(method); 279 }