1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_CALLGENERATOR_HPP 26 #define SHARE_OPTO_CALLGENERATOR_HPP 27 28 #include "compiler/compileBroker.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/compile.hpp" 31 #include "opto/type.hpp" 32 #include "runtime/deoptimization.hpp" 33 34 //---------------------------CallGenerator------------------------------------- 35 // The subclasses of this class handle generation of ideal nodes for 36 // call sites and method entry points. 37 38 class CallGenerator : public ResourceObj { 39 public: 40 enum { 41 xxxunusedxxx 42 }; 43 44 private: 45 ciMethod* _method; // The method being called. 46 47 protected: 48 CallGenerator(ciMethod* method) : _method(method) {} 49 50 public: 51 // Accessors 52 ciMethod* method() const { return _method; } 53 54 // is_inline: At least some code implementing the method is copied here. 55 virtual bool is_inline() const { return false; } 56 // is_intrinsic: There's a method-specific way of generating the inline code. 57 virtual bool is_intrinsic() const { return false; } 58 // is_parse: Bytecodes implementing the specific method are copied here. 59 virtual bool is_parse() const { return false; } 60 // is_virtual: The call uses the receiver type to select or check the method. 61 virtual bool is_virtual() const { return false; } 62 // is_deferred: The decision whether to inline or not is deferred. 63 virtual bool is_deferred() const { return false; } 64 // is_predicated: Uses an explicit check (predicate). 65 virtual bool is_predicated() const { return false; } 66 virtual int predicates_count() const { return 0; } 67 // is_trap: Does not return to the caller. (E.g., uncommon trap.) 68 virtual bool is_trap() const { return false; } 69 // does_virtual_dispatch: Should try inlining as normal method first. 70 virtual bool does_virtual_dispatch() const { return false; } 71 72 // is_late_inline: supports conversion of call into an inline 73 virtual bool is_late_inline() const { return false; } 74 // same but for method handle calls 75 virtual bool is_mh_late_inline() const { return false; } 76 virtual bool is_string_late_inline() const{ return false; } 77 78 // for method handle calls: have we tried inlinining the call already? 79 virtual bool already_attempted() const { ShouldNotReachHere(); return false; } 80 81 // Replace the call with an inline version of the code 82 virtual void do_late_inline() { ShouldNotReachHere(); } 83 84 virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; } 85 86 virtual void set_unique_id(jlong id) { fatal("unique id only for late inlines"); }; 87 virtual jlong unique_id() const { fatal("unique id only for late inlines"); return 0; }; 88 89 // Note: It is possible for a CG to be both inline and virtual. 90 // (The hashCode intrinsic does a vtable check and an inlined fast path.) 91 92 // Utilities: 93 const TypeFunc* tf() const; 94 95 // The given jvms has state and arguments for a call to my method. 96 // Edges after jvms->argoff() carry all (pre-popped) argument values. 97 // 98 // Update the map with state and return values (if any) and return it. 99 // The return values (0, 1, or 2) must be pushed on the map's stack, 100 // and the sp of the jvms incremented accordingly. 101 // 102 // The jvms is returned on success. Alternatively, a copy of the 103 // given jvms, suitably updated, may be returned, in which case the 104 // caller should discard the original jvms. 105 // 106 // The non-Parm edges of the returned map will contain updated global state, 107 // and one or two edges before jvms->sp() will carry any return values. 108 // Other map edges may contain locals or monitors, and should not 109 // be changed in meaning. 110 // 111 // If the call traps, the returned map must have a control edge of top. 112 // If the call can throw, the returned map must report has_exceptions(). 113 // 114 // If the result is NULL, it means that this CallGenerator was unable 115 // to handle the given call, and another CallGenerator should be consulted. 116 virtual JVMState* generate(JVMState* jvms) = 0; 117 118 // How to generate a call site that is inlined: 119 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1); 120 // How to generate code for an on-stack replacement handler. 121 static CallGenerator* for_osr(ciMethod* m, int osr_bci); 122 123 // How to generate vanilla out-of-line call sites: 124 static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special 125 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface 126 127 static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden); 128 static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const); 129 130 // How to generate a replace a direct call with an inline version 131 static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); 132 static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const); 133 static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg); 134 static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg); 135 136 // How to make a call but defer the decision whether to inline or not. 137 static CallGenerator* for_warm_call(WarmCallInfo* ci, 138 CallGenerator* if_cold, 139 CallGenerator* if_hot); 140 141 // How to make a call that optimistically assumes a receiver type: 142 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver, 143 CallGenerator* if_missed, 144 CallGenerator* if_hit, 145 float hit_prob); 146 147 // How to make a call that optimistically assumes a MethodHandle target: 148 static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, 149 CallGenerator* if_missed, 150 CallGenerator* if_hit, 151 float hit_prob); 152 153 // How to make a call that gives up and goes back to the interpreter: 154 static CallGenerator* for_uncommon_trap(ciMethod* m, 155 Deoptimization::DeoptReason reason, 156 Deoptimization::DeoptAction action); 157 158 // Registry for intrinsics: 159 static CallGenerator* for_intrinsic(ciMethod* m); 160 static void register_intrinsic(ciMethod* m, CallGenerator* cg); 161 static CallGenerator* for_predicated_intrinsic(CallGenerator* intrinsic, 162 CallGenerator* cg); 163 virtual Node* generate_predicate(JVMState* jvms, int predicate) { return NULL; }; 164 165 virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); } 166 167 static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) { 168 if (C->print_inlining()) { 169 C->print_inlining(callee, inline_level, bci, msg); 170 } 171 } 172 173 static void print_inlining_failure(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) { 174 print_inlining(C, callee, inline_level, bci, msg); 175 C->log_inline_failure(msg); 176 } 177 178 static bool is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m); 179 static bool is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m); 180 static bool is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m); 181 }; 182 183 184 //------------------------InlineCallGenerator---------------------------------- 185 class InlineCallGenerator : public CallGenerator { 186 protected: 187 InlineCallGenerator(ciMethod* method) : CallGenerator(method) {} 188 189 public: 190 virtual bool is_inline() const { return true; } 191 }; 192 193 194 //---------------------------WarmCallInfo-------------------------------------- 195 // A struct to collect information about a given call site. 196 // Helps sort call sites into "hot", "medium", and "cold". 197 // Participates in the queueing of "medium" call sites for possible inlining. 198 class WarmCallInfo : public ResourceObj { 199 private: 200 201 CallNode* _call; // The CallNode which may be inlined. 202 CallGenerator* _hot_cg;// CG for expanding the call node 203 204 // These are the metrics we use to evaluate call sites: 205 206 float _count; // How often do we expect to reach this site? 207 float _profit; // How much time do we expect to save by inlining? 208 float _work; // How long do we expect the average call to take? 209 float _size; // How big do we expect the inlined code to be? 210 211 float _heat; // Combined score inducing total order on call sites. 212 WarmCallInfo* _next; // Next cooler call info in pending queue. 213 214 // Count is the number of times this call site is expected to be executed. 215 // Large count is favorable for inlining, because the extra compilation 216 // work will be amortized more completely. 217 218 // Profit is a rough measure of the amount of time we expect to save 219 // per execution of this site if we inline it. (1.0 == call overhead) 220 // Large profit favors inlining. Negative profit disables inlining. 221 222 // Work is a rough measure of the amount of time a typical out-of-line 223 // call from this site is expected to take. (1.0 == call, no-op, return) 224 // Small work is somewhat favorable for inlining, since methods with 225 // short "hot" traces are more likely to inline smoothly. 226 227 // Size is the number of graph nodes we expect this method to produce, 228 // not counting the inlining of any further warm calls it may include. 229 // Small size favors inlining, since small methods are more likely to 230 // inline smoothly. The size is estimated by examining the native code 231 // if available. The method bytecodes are also examined, assuming 232 // empirically observed node counts for each kind of bytecode. 233 234 // Heat is the combined "goodness" of a site's inlining. If we were 235 // omniscient, it would be the difference of two sums of future execution 236 // times of code emitted for this site (amortized across multiple sites if 237 // sharing applies). The two sums are for versions of this call site with 238 // and without inlining. 239 240 // We approximate this mythical quantity by playing with averages, 241 // rough estimates, and assumptions that history repeats itself. 242 // The basic formula count * profit is heuristically adjusted 243 // by looking at the expected compilation and execution times of 244 // of the inlined call. 245 246 // Note: Some of these metrics may not be present in the final product, 247 // but exist in development builds to experiment with inline policy tuning. 248 249 // This heuristic framework does not model well the very significant 250 // effects of multiple-level inlining. It is possible to see no immediate 251 // profit from inlining X->Y, but to get great profit from a subsequent 252 // inlining X->Y->Z. 253 254 // This framework does not take well into account the problem of N**2 code 255 // size in a clique of mutually inlinable methods. 256 257 WarmCallInfo* next() const { return _next; } 258 void set_next(WarmCallInfo* n) { _next = n; } 259 260 static WarmCallInfo _always_hot; 261 static WarmCallInfo _always_cold; 262 263 // Constructor intitialization of always_hot and always_cold 264 WarmCallInfo(float c, float p, float w, float s) { 265 _call = NULL; 266 _hot_cg = NULL; 267 _next = NULL; 268 _count = c; 269 _profit = p; 270 _work = w; 271 _size = s; 272 _heat = 0; 273 } 274 275 public: 276 // Because WarmInfo objects live over the entire lifetime of the 277 // Compile object, they are allocated into the comp_arena, which 278 // does not get resource marked or reset during the compile process 279 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 280 void operator delete( void * ) { } // fast deallocation 281 282 static WarmCallInfo* always_hot(); 283 static WarmCallInfo* always_cold(); 284 285 WarmCallInfo() { 286 _call = NULL; 287 _hot_cg = NULL; 288 _next = NULL; 289 _count = _profit = _work = _size = _heat = 0; 290 } 291 292 CallNode* call() const { return _call; } 293 float count() const { return _count; } 294 float size() const { return _size; } 295 float work() const { return _work; } 296 float profit() const { return _profit; } 297 float heat() const { return _heat; } 298 299 void set_count(float x) { _count = x; } 300 void set_size(float x) { _size = x; } 301 void set_work(float x) { _work = x; } 302 void set_profit(float x) { _profit = x; } 303 void set_heat(float x) { _heat = x; } 304 305 // Load initial heuristics from profiles, etc. 306 // The heuristics can be tweaked further by the caller. 307 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor); 308 309 static float MAX_VALUE() { return +1.0e10; } 310 static float MIN_VALUE() { return -1.0e10; } 311 312 float compute_heat() const; 313 314 void set_call(CallNode* call) { _call = call; } 315 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; } 316 317 // Do not queue very hot or very cold calls. 318 // Make very cold ones out of line immediately. 319 // Inline very hot ones immediately. 320 // These queries apply various tunable limits 321 // to the above metrics in a systematic way. 322 // Test for coldness before testing for hotness. 323 bool is_cold() const; 324 bool is_hot() const; 325 326 // Force a warm call to be hot. This worklists the call node for inlining. 327 void make_hot(); 328 329 // Force a warm call to be cold. This worklists the call node for out-of-lining. 330 void make_cold(); 331 332 // A reproducible total ordering, in which heat is the major key. 333 bool warmer_than(WarmCallInfo* that); 334 335 // List management. These methods are called with the list head, 336 // and return the new list head, inserting or removing the receiver. 337 WarmCallInfo* insert_into(WarmCallInfo* head); 338 WarmCallInfo* remove_from(WarmCallInfo* head); 339 340 #ifndef PRODUCT 341 void print() const; 342 void print_all() const; 343 int count_all() const; 344 #endif 345 }; 346 347 #endif // SHARE_OPTO_CALLGENERATOR_HPP