1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_CALLGENERATOR_HPP 26 #define SHARE_VM_OPTO_CALLGENERATOR_HPP 27 28 #include "compiler/compileBroker.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/compile.hpp" 31 #include "opto/type.hpp" 32 #include "runtime/deoptimization.hpp" 33 34 class Parse; 35 36 //---------------------------CallGenerator------------------------------------- 37 // The subclasses of this class handle generation of ideal nodes for 38 // call sites and method entry points. 39 40 class CallGenerator : public ResourceObj { 41 public: 42 enum { 43 xxxunusedxxx 44 }; 45 46 private: 47 ciMethod* _method; // The method being called. 48 49 protected: 50 CallGenerator(ciMethod* method) : _method(method) {} 51 52 public: 53 // Accessors 54 ciMethod* method() const { return _method; } 55 56 // is_inline: At least some code implementing the method is copied here. 57 virtual bool is_inline() const { return false; } 58 // is_intrinsic: There's a method-specific way of generating the inline code. 59 virtual bool is_intrinsic() const { return false; } 60 // is_parse: Bytecodes implementing the specific method are copied here. 61 virtual bool is_parse() const { return false; } 62 // is_virtual: The call uses the receiver type to select or check the method. 63 virtual bool is_virtual() const { return false; } 64 // is_deferred: The decision whether to inline or not is deferred. 65 virtual bool is_deferred() const { return false; } 66 // is_predicted: Uses an explicit check against a predicted type. 67 virtual bool is_predicted() const { return false; } 68 // is_trap: Does not return to the caller. (E.g., uncommon trap.) 69 virtual bool is_trap() const { return false; } 70 // does_virtual_dispatch: Should try inlining as normal method first. 71 virtual bool does_virtual_dispatch() const { return false; } 72 73 // is_late_inline: supports conversion of call into an inline 74 virtual bool is_late_inline() const { return false; } 75 // same but for method handle calls 76 virtual bool is_mh_late_inline() const { return false; } 77 78 // for method handle calls: have we tried inlinining the call already? 79 virtual bool already_attempted() const { ShouldNotReachHere(); return false; } 80 81 // Replace the call with an inline version of the code 82 virtual void do_late_inline() { ShouldNotReachHere(); } 83 84 virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; } 85 86 // Note: It is possible for a CG to be both inline and virtual. 87 // (The hashCode intrinsic does a vtable check and an inlined fast path.) 88 89 // Utilities: 90 const TypeFunc* tf() const; 91 92 // The given jvms has state and arguments for a call to my method. 93 // Edges after jvms->argoff() carry all (pre-popped) argument values. 94 // 95 // Update the map with state and return values (if any) and return it. 96 // The return values (0, 1, or 2) must be pushed on the map's stack, 97 // and the sp of the jvms incremented accordingly. 98 // 99 // The jvms is returned on success. Alternatively, a copy of the 100 // given jvms, suitably updated, may be returned, in which case the 101 // caller should discard the original jvms. 102 // 103 // The non-Parm edges of the returned map will contain updated global state, 104 // and one or two edges before jvms->sp() will carry any return values. 105 // Other map edges may contain locals or monitors, and should not 106 // be changed in meaning. 107 // 108 // If the call traps, the returned map must have a control edge of top. 109 // If the call can throw, the returned map must report has_exceptions(). 110 // 111 // If the result is NULL, it means that this CallGenerator was unable 112 // to handle the given call, and another CallGenerator should be consulted. 113 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0; 114 115 // How to generate a call site that is inlined: 116 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1); 117 // How to generate code for an on-stack replacement handler. 118 static CallGenerator* for_osr(ciMethod* m, int osr_bci); 119 120 // How to generate vanilla out-of-line call sites: 121 static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special 122 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface 123 static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic 124 125 static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden); 126 static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const); 127 128 // How to generate a replace a direct call with an inline version 129 static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); 130 static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const); 131 static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg); 132 static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg); 133 134 // How to make a call but defer the decision whether to inline or not. 135 static CallGenerator* for_warm_call(WarmCallInfo* ci, 136 CallGenerator* if_cold, 137 CallGenerator* if_hot); 138 139 // How to make a call that optimistically assumes a receiver type: 140 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver, 141 CallGenerator* if_missed, 142 CallGenerator* if_hit, 143 float hit_prob); 144 145 // How to make a call that optimistically assumes a MethodHandle target: 146 static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, 147 CallGenerator* if_missed, 148 CallGenerator* if_hit, 149 float hit_prob); 150 151 // How to make a call that gives up and goes back to the interpreter: 152 static CallGenerator* for_uncommon_trap(ciMethod* m, 153 Deoptimization::DeoptReason reason, 154 Deoptimization::DeoptAction action); 155 156 // Registry for intrinsics: 157 static CallGenerator* for_intrinsic(ciMethod* m); 158 static void register_intrinsic(ciMethod* m, CallGenerator* cg); 159 static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic, 160 CallGenerator* cg); 161 virtual Node* generate_predicate(JVMState* jvms) { return NULL; }; 162 163 virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); } 164 165 static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) { 166 if (C->print_inlining()) { 167 C->print_inlining(callee, inline_level, bci, msg); 168 } 169 } 170 }; 171 172 173 //------------------------InlineCallGenerator---------------------------------- 174 class InlineCallGenerator : public CallGenerator { 175 protected: 176 InlineCallGenerator(ciMethod* method) : CallGenerator(method) {} 177 178 public: 179 virtual bool is_inline() const { return true; } 180 }; 181 182 183 //---------------------------WarmCallInfo-------------------------------------- 184 // A struct to collect information about a given call site. 185 // Helps sort call sites into "hot", "medium", and "cold". 186 // Participates in the queueing of "medium" call sites for possible inlining. 187 class WarmCallInfo : public ResourceObj { 188 private: 189 190 CallNode* _call; // The CallNode which may be inlined. 191 CallGenerator* _hot_cg;// CG for expanding the call node 192 193 // These are the metrics we use to evaluate call sites: 194 195 float _count; // How often do we expect to reach this site? 196 float _profit; // How much time do we expect to save by inlining? 197 float _work; // How long do we expect the average call to take? 198 float _size; // How big do we expect the inlined code to be? 199 200 float _heat; // Combined score inducing total order on call sites. 201 WarmCallInfo* _next; // Next cooler call info in pending queue. 202 203 // Count is the number of times this call site is expected to be executed. 204 // Large count is favorable for inlining, because the extra compilation 205 // work will be amortized more completely. 206 207 // Profit is a rough measure of the amount of time we expect to save 208 // per execution of this site if we inline it. (1.0 == call overhead) 209 // Large profit favors inlining. Negative profit disables inlining. 210 211 // Work is a rough measure of the amount of time a typical out-of-line 212 // call from this site is expected to take. (1.0 == call, no-op, return) 213 // Small work is somewhat favorable for inlining, since methods with 214 // short "hot" traces are more likely to inline smoothly. 215 216 // Size is the number of graph nodes we expect this method to produce, 217 // not counting the inlining of any further warm calls it may include. 218 // Small size favors inlining, since small methods are more likely to 219 // inline smoothly. The size is estimated by examining the native code 220 // if available. The method bytecodes are also examined, assuming 221 // empirically observed node counts for each kind of bytecode. 222 223 // Heat is the combined "goodness" of a site's inlining. If we were 224 // omniscient, it would be the difference of two sums of future execution 225 // times of code emitted for this site (amortized across multiple sites if 226 // sharing applies). The two sums are for versions of this call site with 227 // and without inlining. 228 229 // We approximate this mythical quantity by playing with averages, 230 // rough estimates, and assumptions that history repeats itself. 231 // The basic formula count * profit is heuristically adjusted 232 // by looking at the expected compilation and execution times of 233 // of the inlined call. 234 235 // Note: Some of these metrics may not be present in the final product, 236 // but exist in development builds to experiment with inline policy tuning. 237 238 // This heuristic framework does not model well the very significant 239 // effects of multiple-level inlining. It is possible to see no immediate 240 // profit from inlining X->Y, but to get great profit from a subsequent 241 // inlining X->Y->Z. 242 243 // This framework does not take well into account the problem of N**2 code 244 // size in a clique of mutually inlinable methods. 245 246 WarmCallInfo* next() const { return _next; } 247 void set_next(WarmCallInfo* n) { _next = n; } 248 249 static WarmCallInfo _always_hot; 250 static WarmCallInfo _always_cold; 251 252 // Constructor intitialization of always_hot and always_cold 253 WarmCallInfo(float c, float p, float w, float s) { 254 _call = NULL; 255 _hot_cg = NULL; 256 _next = NULL; 257 _count = c; 258 _profit = p; 259 _work = w; 260 _size = s; 261 _heat = 0; 262 } 263 264 public: 265 // Because WarmInfo objects live over the entire lifetime of the 266 // Compile object, they are allocated into the comp_arena, which 267 // does not get resource marked or reset during the compile process 268 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 269 void operator delete( void * ) { } // fast deallocation 270 271 static WarmCallInfo* always_hot(); 272 static WarmCallInfo* always_cold(); 273 274 WarmCallInfo() { 275 _call = NULL; 276 _hot_cg = NULL; 277 _next = NULL; 278 _count = _profit = _work = _size = _heat = 0; 279 } 280 281 CallNode* call() const { return _call; } 282 float count() const { return _count; } 283 float size() const { return _size; } 284 float work() const { return _work; } 285 float profit() const { return _profit; } 286 float heat() const { return _heat; } 287 288 void set_count(float x) { _count = x; } 289 void set_size(float x) { _size = x; } 290 void set_work(float x) { _work = x; } 291 void set_profit(float x) { _profit = x; } 292 void set_heat(float x) { _heat = x; } 293 294 // Load initial heuristics from profiles, etc. 295 // The heuristics can be tweaked further by the caller. 296 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor); 297 298 static float MAX_VALUE() { return +1.0e10; } 299 static float MIN_VALUE() { return -1.0e10; } 300 301 float compute_heat() const; 302 303 void set_call(CallNode* call) { _call = call; } 304 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; } 305 306 // Do not queue very hot or very cold calls. 307 // Make very cold ones out of line immediately. 308 // Inline very hot ones immediately. 309 // These queries apply various tunable limits 310 // to the above metrics in a systematic way. 311 // Test for coldness before testing for hotness. 312 bool is_cold() const; 313 bool is_hot() const; 314 315 // Force a warm call to be hot. This worklists the call node for inlining. 316 void make_hot(); 317 318 // Force a warm call to be cold. This worklists the call node for out-of-lining. 319 void make_cold(); 320 321 // A reproducible total ordering, in which heat is the major key. 322 bool warmer_than(WarmCallInfo* that); 323 324 // List management. These methods are called with the list head, 325 // and return the new list head, inserting or removing the receiver. 326 WarmCallInfo* insert_into(WarmCallInfo* head); 327 WarmCallInfo* remove_from(WarmCallInfo* head); 328 329 #ifndef PRODUCT 330 void print() const; 331 void print_all() const; 332 int count_all() const; 333 #endif 334 }; 335 336 #endif // SHARE_VM_OPTO_CALLGENERATOR_HPP