1 /* 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_CALLGENERATOR_HPP 26 #define SHARE_VM_OPTO_CALLGENERATOR_HPP 27 28 #include "compiler/compileBroker.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/compile.hpp" 31 #include "opto/type.hpp" 32 #include "runtime/deoptimization.hpp" 33 34 //---------------------------CallGenerator------------------------------------- 35 // The subclasses of this class handle generation of ideal nodes for 36 // call sites and method entry points. 37 38 class CallGenerator : public ResourceObj { 39 public: 40 enum { 41 xxxunusedxxx 42 }; 43 44 private: 45 ciMethod* _method; // The method being called. 46 47 protected: 48 CallGenerator(ciMethod* method) : _method(method) {} 49 50 public: 51 // Accessors 52 ciMethod* method() const { return _method; } 53 54 // is_inline: At least some code implementing the method is copied here. 55 virtual bool is_inline() const { return false; } 56 // is_intrinsic: There's a method-specific way of generating the inline code. 57 virtual bool is_intrinsic() const { return false; } 58 // is_parse: Bytecodes implementing the specific method are copied here. 59 virtual bool is_parse() const { return false; } 60 // is_virtual: The call uses the receiver type to select or check the method. 61 virtual bool is_virtual() const { return false; } 62 // is_deferred: The decision whether to inline or not is deferred. 63 virtual bool is_deferred() const { return false; } 64 // is_predicted: Uses an explicit check against a predicted type. 65 virtual bool is_predicted() const { return false; } 66 // is_trap: Does not return to the caller. (E.g., uncommon trap.) 67 virtual bool is_trap() const { return false; } 68 69 // is_late_inline: supports conversion of call into an inline 70 virtual bool is_late_inline() const { return false; } 71 // Replace the call with an inline version of the code 72 virtual void do_late_inline() { ShouldNotReachHere(); } 73 74 virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; } 75 76 // Note: It is possible for a CG to be both inline and virtual. 77 // (The hashCode intrinsic does a vtable check and an inlined fast path.) 78 79 // Utilities: 80 const TypeFunc* tf() const; 81 82 // The given jvms has state and arguments for a call to my method. 83 // Edges after jvms->argoff() carry all (pre-popped) argument values. 84 // 85 // Update the map with state and return values (if any) and return it. 86 // The return values (0, 1, or 2) must be pushed on the map's stack, 87 // and the sp of the jvms incremented accordingly. 88 // 89 // The jvms is returned on success. Alternatively, a copy of the 90 // given jvms, suitably updated, may be returned, in which case the 91 // caller should discard the original jvms. 92 // 93 // The non-Parm edges of the returned map will contain updated global state, 94 // and one or two edges before jvms->sp() will carry any return values. 95 // Other map edges may contain locals or monitors, and should not 96 // be changed in meaning. 97 // 98 // If the call traps, the returned map must have a control edge of top. 99 // If the call can throw, the returned map must report has_exceptions(). 100 // 101 // If the result is NULL, it means that this CallGenerator was unable 102 // to handle the given call, and another CallGenerator should be consulted. 103 virtual JVMState* generate(JVMState* jvms) = 0; 104 105 // How to generate a call site that is inlined: 106 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1); 107 // How to generate code for an on-stack replacement handler. 108 static CallGenerator* for_osr(ciMethod* m, int osr_bci); 109 110 // How to generate vanilla out-of-line call sites: 111 static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special 112 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface 113 static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic 114 115 static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee); 116 static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee); 117 118 // How to generate a replace a direct call with an inline version 119 static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); 120 121 // How to make a call but defer the decision whether to inline or not. 122 static CallGenerator* for_warm_call(WarmCallInfo* ci, 123 CallGenerator* if_cold, 124 CallGenerator* if_hot); 125 126 // How to make a call that optimistically assumes a receiver type: 127 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver, 128 CallGenerator* if_missed, 129 CallGenerator* if_hit, 130 float hit_prob); 131 132 // How to make a call that optimistically assumes a MethodHandle target: 133 static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, 134 CallGenerator* if_missed, 135 CallGenerator* if_hit, 136 float hit_prob); 137 138 // How to make a call that gives up and goes back to the interpreter: 139 static CallGenerator* for_uncommon_trap(ciMethod* m, 140 Deoptimization::DeoptReason reason, 141 Deoptimization::DeoptAction action); 142 143 // Registry for intrinsics: 144 static CallGenerator* for_intrinsic(ciMethod* m); 145 static void register_intrinsic(ciMethod* m, CallGenerator* cg); 146 static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic, 147 CallGenerator* cg); 148 virtual Node* generate_predicate(JVMState* jvms) { return NULL; }; 149 150 static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) { 151 if (PrintInlining) 152 C->print_inlining(callee, inline_level, bci, msg); 153 } 154 }; 155 156 157 //------------------------InlineCallGenerator---------------------------------- 158 class InlineCallGenerator : public CallGenerator { 159 protected: 160 InlineCallGenerator(ciMethod* method) : CallGenerator(method) {} 161 162 public: 163 virtual bool is_inline() const { return true; } 164 }; 165 166 167 //---------------------------WarmCallInfo-------------------------------------- 168 // A struct to collect information about a given call site. 169 // Helps sort call sites into "hot", "medium", and "cold". 170 // Participates in the queueing of "medium" call sites for possible inlining. 171 class WarmCallInfo : public ResourceObj { 172 private: 173 174 CallNode* _call; // The CallNode which may be inlined. 175 CallGenerator* _hot_cg;// CG for expanding the call node 176 177 // These are the metrics we use to evaluate call sites: 178 179 float _count; // How often do we expect to reach this site? 180 float _profit; // How much time do we expect to save by inlining? 181 float _work; // How long do we expect the average call to take? 182 float _size; // How big do we expect the inlined code to be? 183 184 float _heat; // Combined score inducing total order on call sites. 185 WarmCallInfo* _next; // Next cooler call info in pending queue. 186 187 // Count is the number of times this call site is expected to be executed. 188 // Large count is favorable for inlining, because the extra compilation 189 // work will be amortized more completely. 190 191 // Profit is a rough measure of the amount of time we expect to save 192 // per execution of this site if we inline it. (1.0 == call overhead) 193 // Large profit favors inlining. Negative profit disables inlining. 194 195 // Work is a rough measure of the amount of time a typical out-of-line 196 // call from this site is expected to take. (1.0 == call, no-op, return) 197 // Small work is somewhat favorable for inlining, since methods with 198 // short "hot" traces are more likely to inline smoothly. 199 200 // Size is the number of graph nodes we expect this method to produce, 201 // not counting the inlining of any further warm calls it may include. 202 // Small size favors inlining, since small methods are more likely to 203 // inline smoothly. The size is estimated by examining the native code 204 // if available. The method bytecodes are also examined, assuming 205 // empirically observed node counts for each kind of bytecode. 206 207 // Heat is the combined "goodness" of a site's inlining. If we were 208 // omniscient, it would be the difference of two sums of future execution 209 // times of code emitted for this site (amortized across multiple sites if 210 // sharing applies). The two sums are for versions of this call site with 211 // and without inlining. 212 213 // We approximate this mythical quantity by playing with averages, 214 // rough estimates, and assumptions that history repeats itself. 215 // The basic formula count * profit is heuristically adjusted 216 // by looking at the expected compilation and execution times of 217 // of the inlined call. 218 219 // Note: Some of these metrics may not be present in the final product, 220 // but exist in development builds to experiment with inline policy tuning. 221 222 // This heuristic framework does not model well the very significant 223 // effects of multiple-level inlining. It is possible to see no immediate 224 // profit from inlining X->Y, but to get great profit from a subsequent 225 // inlining X->Y->Z. 226 227 // This framework does not take well into account the problem of N**2 code 228 // size in a clique of mutually inlinable methods. 229 230 WarmCallInfo* next() const { return _next; } 231 void set_next(WarmCallInfo* n) { _next = n; } 232 233 static WarmCallInfo _always_hot; 234 static WarmCallInfo _always_cold; 235 236 // Constructor intitialization of always_hot and always_cold 237 WarmCallInfo(float c, float p, float w, float s) { 238 _call = NULL; 239 _hot_cg = NULL; 240 _next = NULL; 241 _count = c; 242 _profit = p; 243 _work = w; 244 _size = s; 245 _heat = 0; 246 } 247 248 public: 249 // Because WarmInfo objects live over the entire lifetime of the 250 // Compile object, they are allocated into the comp_arena, which 251 // does not get resource marked or reset during the compile process 252 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); } 253 void operator delete( void * ) { } // fast deallocation 254 255 static WarmCallInfo* always_hot(); 256 static WarmCallInfo* always_cold(); 257 258 WarmCallInfo() { 259 _call = NULL; 260 _hot_cg = NULL; 261 _next = NULL; 262 _count = _profit = _work = _size = _heat = 0; 263 } 264 265 CallNode* call() const { return _call; } 266 float count() const { return _count; } 267 float size() const { return _size; } 268 float work() const { return _work; } 269 float profit() const { return _profit; } 270 float heat() const { return _heat; } 271 272 void set_count(float x) { _count = x; } 273 void set_size(float x) { _size = x; } 274 void set_work(float x) { _work = x; } 275 void set_profit(float x) { _profit = x; } 276 void set_heat(float x) { _heat = x; } 277 278 // Load initial heuristics from profiles, etc. 279 // The heuristics can be tweaked further by the caller. 280 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor); 281 282 static float MAX_VALUE() { return +1.0e10; } 283 static float MIN_VALUE() { return -1.0e10; } 284 285 float compute_heat() const; 286 287 void set_call(CallNode* call) { _call = call; } 288 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; } 289 290 // Do not queue very hot or very cold calls. 291 // Make very cold ones out of line immediately. 292 // Inline very hot ones immediately. 293 // These queries apply various tunable limits 294 // to the above metrics in a systematic way. 295 // Test for coldness before testing for hotness. 296 bool is_cold() const; 297 bool is_hot() const; 298 299 // Force a warm call to be hot. This worklists the call node for inlining. 300 void make_hot(); 301 302 // Force a warm call to be cold. This worklists the call node for out-of-lining. 303 void make_cold(); 304 305 // A reproducible total ordering, in which heat is the major key. 306 bool warmer_than(WarmCallInfo* that); 307 308 // List management. These methods are called with the list head, 309 // and return the new list head, inserting or removing the receiver. 310 WarmCallInfo* insert_into(WarmCallInfo* head); 311 WarmCallInfo* remove_from(WarmCallInfo* head); 312 313 #ifndef PRODUCT 314 void print() const; 315 void print_all() const; 316 int count_all() const; 317 #endif 318 }; 319 320 #endif // SHARE_VM_OPTO_CALLGENERATOR_HPP