Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/opto/parse.hpp
+++ new/src/share/vm/opto/parse.hpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_OPTO_PARSE_HPP
26 26 #define SHARE_VM_OPTO_PARSE_HPP
27 27
28 28 #include "ci/ciMethodData.hpp"
29 29 #include "ci/ciTypeFlow.hpp"
30 30 #include "compiler/methodLiveness.hpp"
31 31 #include "libadt/vectset.hpp"
32 32 #include "oops/generateOopMap.hpp"
33 33 #include "opto/graphKit.hpp"
34 34 #include "opto/subnode.hpp"
35 35
36 36 class BytecodeParseHistogram;
37 37 class InlineTree;
38 38 class Parse;
39 39 class SwitchRange;
40 40
41 41
42 42 //------------------------------InlineTree-------------------------------------
43 43 class InlineTree : public ResourceObj {
44 44 Compile* C; // cache
45 45 JVMState* _caller_jvms; // state of caller
46 46 ciMethod* _method; // method being called by the caller_jvms
47 47 InlineTree* _caller_tree;
48 48 uint _count_inline_bcs; // Accumulated count of inlined bytecodes
49 49 // Call-site count / interpreter invocation count, scaled recursively.
50 50 // Always between 0.0 and 1.0. Represents the percentage of the method's
51 51 // total execution time used at this call site.
52 52 const float _site_invoke_ratio;
53 53 const int _site_depth_adjust;
54 54 float compute_callee_frequency( int caller_bci ) const;
55 55
56 56 GrowableArray<InlineTree*> _subtrees;
57 57 friend class Compile;
58 58
59 59 protected:
60 60 InlineTree(Compile* C,
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
61 61 const InlineTree* caller_tree,
62 62 ciMethod* callee_method,
63 63 JVMState* caller_jvms,
64 64 int caller_bci,
65 65 float site_invoke_ratio,
66 66 int site_depth_adjust);
67 67 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
68 68 JVMState* caller_jvms,
69 69 int caller_bci);
70 70 const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result);
71 - const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
72 - const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
71 + const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
72 + const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
73 73 void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const;
74 74
75 75 InlineTree *caller_tree() const { return _caller_tree; }
76 76 InlineTree* callee_at(int bci, ciMethod* m) const;
77 77 int inline_depth() const { return stack_depth() + _site_depth_adjust; }
78 78 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
79 79
80 80 public:
81 81 static InlineTree* build_inline_tree_root();
82 82 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
83 83
84 84 // For temporary (stack-allocated, stateless) ilts:
85 85 InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust);
86 86
87 87 // InlineTree enum
88 88 enum InlineStyle {
89 89 Inline_do_not_inline = 0, //
90 90 Inline_cha_is_monomorphic = 1, //
91 91 Inline_type_profile_monomorphic = 2 //
92 92 };
93 93
94 94 // See if it is OK to inline.
95 95 // The receiver is the inline tree for the caller.
96 96 //
97 97 // The result is a temperature indication. If it is hot or cold,
98 98 // inlining is immediate or undesirable. Otherwise, the info block
99 99 // returned is newly allocated and may be enqueued.
100 100 //
101 101 // If the method is inlinable, a new inline subtree is created on the fly,
102 102 // and may be accessed by find_subtree_from_root.
103 103 // The call_method is the dest_method for a special or static invocation.
104 104 // The call_method is an optimized virtual method candidate otherwise.
105 105 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci);
106 106
107 107 // Information about inlined method
108 108 JVMState* caller_jvms() const { return _caller_jvms; }
109 109 ciMethod *method() const { return _method; }
110 110 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
111 111 uint count_inline_bcs() const { return _count_inline_bcs; }
112 112 float site_invoke_ratio() const { return _site_invoke_ratio; };
113 113
114 114 #ifndef PRODUCT
115 115 private:
116 116 uint _count_inlines; // Count of inlined methods
117 117 public:
118 118 // Debug information collected during parse
119 119 uint count_inlines() const { return _count_inlines; };
120 120 #endif
121 121 GrowableArray<InlineTree*> subtrees() { return _subtrees; }
122 122 };
123 123
124 124
125 125 //-----------------------------------------------------------------------------
126 126 //------------------------------Parse------------------------------------------
127 127 // Parse bytecodes, build a Graph
128 128 class Parse : public GraphKit {
129 129 public:
130 130 // Per-block information needed by the parser:
131 131 class Block {
132 132 private:
133 133 ciTypeFlow::Block* _flow;
134 134 int _pred_count; // how many predecessors in CFG?
135 135 int _preds_parsed; // how many of these have been parsed?
136 136 uint _count; // how many times executed? Currently only set by _goto's
137 137 bool _is_parsed; // has this block been parsed yet?
138 138 bool _is_handler; // is this block an exception handler?
139 139 bool _has_merged_backedge; // does this block have merged backedge?
140 140 SafePointNode* _start_map; // all values flowing into this block
141 141 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap
142 142
143 143 int _num_successors; // Includes only normal control flow.
144 144 int _all_successors; // Include exception paths also.
145 145 Block** _successors;
146 146
147 147 // Use init_node/init_graph to initialize Blocks.
148 148 // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
149 149 Block() : _live_locals(NULL,0) { ShouldNotReachHere(); }
150 150
151 151 public:
152 152
153 153 // Set up the block data structure itself.
154 154 void init_node(Parse* outer, int po);
155 155 // Set up the block's relations to other blocks.
156 156 void init_graph(Parse* outer);
157 157
158 158 ciTypeFlow::Block* flow() const { return _flow; }
159 159 int pred_count() const { return _pred_count; }
160 160 int preds_parsed() const { return _preds_parsed; }
161 161 bool is_parsed() const { return _is_parsed; }
162 162 bool is_handler() const { return _is_handler; }
163 163 void set_count( uint x ) { _count = x; }
164 164 uint count() const { return _count; }
165 165
166 166 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; }
167 167 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; }
168 168
169 169 // True after any predecessor flows control into this block
170 170 bool is_merged() const { return _start_map != NULL; }
171 171
172 172 #ifdef ASSERT
173 173 // True after backedge predecessor flows control into this block
174 174 bool has_merged_backedge() const { return _has_merged_backedge; }
175 175 void mark_merged_backedge(Block* pred) {
176 176 assert(is_SEL_head(), "should be loop head");
177 177 if (pred != NULL && is_SEL_backedge(pred)) {
178 178 assert(is_parsed(), "block should be parsed before merging backedges");
179 179 _has_merged_backedge = true;
180 180 }
181 181 }
182 182 #endif
183 183
184 184 // True when all non-exception predecessors have been parsed.
185 185 bool is_ready() const { return preds_parsed() == pred_count(); }
186 186
187 187 int num_successors() const { return _num_successors; }
188 188 int all_successors() const { return _all_successors; }
189 189 Block* successor_at(int i) const {
190 190 assert((uint)i < (uint)all_successors(), "");
191 191 return _successors[i];
192 192 }
193 193 Block* successor_for_bci(int bci);
194 194
195 195 int start() const { return flow()->start(); }
196 196 int limit() const { return flow()->limit(); }
197 197 int rpo() const { return flow()->rpo(); }
198 198 int start_sp() const { return flow()->stack_size(); }
199 199
200 200 bool is_loop_head() const { return flow()->is_loop_head(); }
201 201 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); }
202 202 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
203 203 bool is_invariant_local(uint i) const {
204 204 const JVMState* jvms = start_map()->jvms();
205 205 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
206 206 return flow()->is_invariant_local(i - jvms->locoff());
207 207 }
208 208 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); }
209 209
210 210 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); }
211 211
212 212 const Type* stack_type_at(int i) const;
213 213 const Type* local_type_at(int i) const;
214 214 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
215 215
216 216 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; }
217 217
218 218 // Call this just before parsing a block.
219 219 void mark_parsed() {
220 220 assert(!_is_parsed, "must parse each block exactly once");
221 221 _is_parsed = true;
222 222 }
223 223
224 224 // Return the phi/region input index for the "current" pred,
225 225 // and bump the pred number. For historical reasons these index
226 226 // numbers are handed out in descending order. The last index is
227 227 // always PhiNode::Input (i.e., 1). The value returned is known
228 228 // as a "path number" because it distinguishes by which path we are
229 229 // entering the block.
230 230 int next_path_num() {
231 231 assert(preds_parsed() < pred_count(), "too many preds?");
232 232 return pred_count() - _preds_parsed++;
233 233 }
234 234
235 235 // Add a previously unaccounted predecessor to this block.
236 236 // This operates by increasing the size of the block's region
237 237 // and all its phi nodes (if any). The value returned is a
238 238 // path number ("pnum").
239 239 int add_new_path();
240 240
241 241 // Initialize me by recording the parser's map. My own map must be NULL.
242 242 void record_state(Parse* outer);
243 243 };
244 244
245 245 #ifndef PRODUCT
246 246 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
247 247 class BytecodeParseHistogram : public ResourceObj {
248 248 private:
249 249 enum BPHType {
250 250 BPH_transforms,
251 251 BPH_values
252 252 };
253 253 static bool _initialized;
254 254 static uint _bytecodes_parsed [Bytecodes::number_of_codes];
255 255 static uint _nodes_constructed[Bytecodes::number_of_codes];
256 256 static uint _nodes_transformed[Bytecodes::number_of_codes];
257 257 static uint _new_values [Bytecodes::number_of_codes];
258 258
259 259 Bytecodes::Code _initial_bytecode;
260 260 int _initial_node_count;
261 261 int _initial_transforms;
262 262 int _initial_values;
263 263
264 264 Parse *_parser;
265 265 Compile *_compiler;
266 266
267 267 // Initialization
268 268 static void reset();
269 269
270 270 // Return info being collected, select with global flag 'BytecodeParseInfo'
271 271 int current_count(BPHType info_selector);
272 272
273 273 public:
274 274 BytecodeParseHistogram(Parse *p, Compile *c);
275 275 static bool initialized();
276 276
277 277 // Record info when starting to parse one bytecode
278 278 void set_initial_state( Bytecodes::Code bc );
279 279 // Record results of parsing one bytecode
280 280 void record_change();
281 281
282 282 // Profile printing
283 283 static void print(float cutoff = 0.01F); // cutoff in percent
284 284 };
285 285
286 286 public:
287 287 // Record work done during parsing
288 288 BytecodeParseHistogram* _parse_histogram;
289 289 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
290 290 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; }
291 291 #endif
292 292
293 293 private:
294 294 friend class Block;
295 295
296 296 // Variables which characterize this compilation as a whole:
297 297
298 298 JVMState* _caller; // JVMS which carries incoming args & state.
299 299 float _expected_uses; // expected number of calls to this code
300 300 float _prof_factor; // discount applied to my profile counts
301 301 int _depth; // Inline tree depth, for debug printouts
302 302 const TypeFunc*_tf; // My kind of function type
303 303 int _entry_bci; // the osr bci or InvocationEntryBci
304 304
305 305 ciTypeFlow* _flow; // Results of previous flow pass.
306 306 Block* _blocks; // Array of basic-block structs.
307 307 int _block_count; // Number of elements in _blocks.
308 308
309 309 GraphKit _exits; // Record all normal returns and throws here.
310 310 bool _wrote_final; // Did we write a final field?
311 311 bool _count_invocations; // update and test invocation counter
312 312 bool _method_data_update; // update method data oop
313 313
314 314 // Variables which track Java semantics during bytecode parsing:
315 315
316 316 Block* _block; // block currently getting parsed
317 317 ciBytecodeStream _iter; // stream of this method's bytecodes
318 318
319 319 int _blocks_merged; // Progress meter: state merges from BB preds
320 320 int _blocks_parsed; // Progress meter: BBs actually parsed
321 321
322 322 const FastLockNode* _synch_lock; // FastLockNode for synchronized method
323 323
324 324 #ifndef PRODUCT
325 325 int _max_switch_depth; // Debugging SwitchRanges.
326 326 int _est_switch_depth; // Debugging SwitchRanges.
327 327 #endif
328 328
329 329 public:
330 330 // Constructor
331 331 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
332 332
333 333 virtual Parse* is_Parse() const { return (Parse*)this; }
334 334
335 335 public:
336 336 // Accessors.
337 337 JVMState* caller() const { return _caller; }
338 338 float expected_uses() const { return _expected_uses; }
339 339 float prof_factor() const { return _prof_factor; }
340 340 int depth() const { return _depth; }
341 341 const TypeFunc* tf() const { return _tf; }
342 342 // entry_bci() -- see osr_bci, etc.
343 343
344 344 ciTypeFlow* flow() const { return _flow; }
345 345 // blocks() -- see rpo_at, start_block, etc.
346 346 int block_count() const { return _block_count; }
347 347
348 348 GraphKit& exits() { return _exits; }
349 349 bool wrote_final() const { return _wrote_final; }
350 350 void set_wrote_final(bool z) { _wrote_final = z; }
351 351 bool count_invocations() const { return _count_invocations; }
352 352 bool method_data_update() const { return _method_data_update; }
353 353
354 354 Block* block() const { return _block; }
355 355 ciBytecodeStream& iter() { return _iter; }
356 356 Bytecodes::Code bc() const { return _iter.cur_bc(); }
357 357
358 358 void set_block(Block* b) { _block = b; }
359 359
360 360 // Derived accessors:
361 361 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; }
362 362 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; }
363 363 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; }
364 364
365 365 void set_parse_bci(int bci);
366 366
367 367 // Must this parse be aborted?
368 368 bool failing() { return C->failing(); }
369 369
370 370 Block* rpo_at(int rpo) {
371 371 assert(0 <= rpo && rpo < _block_count, "oob");
372 372 return &_blocks[rpo];
373 373 }
374 374 Block* start_block() {
375 375 return rpo_at(flow()->start_block()->rpo());
376 376 }
377 377 // Can return NULL if the flow pass did not complete a block.
378 378 Block* successor_for_bci(int bci) {
379 379 return block()->successor_for_bci(bci);
380 380 }
381 381
382 382 private:
383 383 // Create a JVMS & map for the initial state of this method.
384 384 SafePointNode* create_entry_map();
385 385
386 386 // OSR helpers
387 387 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
388 388 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
389 389 void load_interpreter_state(Node* osr_buf);
390 390
391 391 // Functions for managing basic blocks:
392 392 void init_blocks();
393 393 void load_state_from(Block* b);
394 394 void store_state_to(Block* b) { b->record_state(this); }
395 395
396 396 // Parse all the basic blocks.
397 397 void do_all_blocks();
398 398
399 399 // Parse the current basic block
400 400 void do_one_block();
401 401
402 402 // Raise an error if we get a bad ciTypeFlow CFG.
403 403 void handle_missing_successor(int bci);
404 404
405 405 // first actions (before BCI 0)
406 406 void do_method_entry();
407 407
408 408 // implementation of monitorenter/monitorexit
409 409 void do_monitor_enter();
410 410 void do_monitor_exit();
411 411
412 412 // Eagerly create phie throughout the state, to cope with back edges.
413 413 void ensure_phis_everywhere();
414 414
415 415 // Merge the current mapping into the basic block starting at bci
416 416 void merge( int target_bci);
417 417 // Same as plain merge, except that it allocates a new path number.
418 418 void merge_new_path( int target_bci);
419 419 // Merge the current mapping into an exception handler.
420 420 void merge_exception(int target_bci);
421 421 // Helper: Merge the current mapping into the given basic block
422 422 void merge_common(Block* target, int pnum);
423 423 // Helper functions for merging individual cells.
424 424 PhiNode *ensure_phi( int idx, bool nocreate = false);
425 425 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
426 426 // Helper to merge the current memory state into the given basic block
427 427 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
428 428
429 429 // Parse this bytecode, and alter the Parsers JVM->Node mapping
430 430 void do_one_bytecode();
431 431
432 432 // helper function to generate array store check
433 433 void array_store_check();
434 434 // Helper function to generate array load
435 435 void array_load(BasicType etype);
436 436 // Helper function to generate array store
437 437 void array_store(BasicType etype);
438 438 // Helper function to compute array addressing
439 439 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
440 440
441 441 // Pass current map to exits
442 442 void return_current(Node* value);
443 443
444 444 // Register finalizers on return from Object.<init>
445 445 void call_register_finalizer();
446 446
447 447 // Insert a compiler safepoint into the graph
448 448 void add_safepoint();
449 449
450 450 // Insert a compiler safepoint into the graph, if there is a back-branch.
451 451 void maybe_add_safepoint(int target_bci) {
452 452 if (UseLoopSafepoints && target_bci <= bci()) {
453 453 add_safepoint();
454 454 }
455 455 }
456 456
457 457 // Note: Intrinsic generation routines may be found in library_call.cpp.
458 458
459 459 // Helper function to setup Ideal Call nodes
460 460 void do_call();
461 461
462 462 // Helper function to uncommon-trap or bailout for non-compilable call-sites
463 463 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
464 464
465 465 // Helper function to identify inlining potential at call-site
466 466 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
467 467 ciMethod *dest_method, const TypeOopPtr* receiver_type);
468 468
469 469 // Helper function to setup for type-profile based inlining
470 470 bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method);
471 471
472 472 // Helper functions for type checking bytecodes:
473 473 void do_checkcast();
474 474 void do_instanceof();
475 475
476 476 // Helper functions for shifting & arithmetic
477 477 void modf();
478 478 void modd();
479 479 void l2f();
480 480
481 481 void do_irem();
482 482
483 483 // implementation of _get* and _put* bytecodes
484 484 void do_getstatic() { do_field_access(true, false); }
485 485 void do_getfield () { do_field_access(true, true); }
486 486 void do_putstatic() { do_field_access(false, false); }
487 487 void do_putfield () { do_field_access(false, true); }
488 488
489 489 // common code for making initial checks and forming addresses
490 490 void do_field_access(bool is_get, bool is_field);
491 491 bool static_field_ok_in_clinit(ciField *field, ciMethod *method);
492 492
493 493 // common code for actually performing the load or store
494 494 void do_get_xxx(Node* obj, ciField* field, bool is_field);
495 495 void do_put_xxx(Node* obj, ciField* field, bool is_field);
496 496
497 497 // loading from a constant field or the constant pool
498 498 // returns false if push failed (non-perm field constants only, not ldcs)
499 499 bool push_constant(ciConstant con, bool require_constant = false);
500 500
501 501 // implementation of object creation bytecodes
502 502 void emit_guard_for_new(ciInstanceKlass* klass);
503 503 void do_new();
504 504 void do_newarray(BasicType elemtype);
505 505 void do_anewarray();
506 506 void do_multianewarray();
507 507 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
508 508
509 509 // implementation of jsr/ret
510 510 void do_jsr();
511 511 void do_ret();
512 512
513 513 float dynamic_branch_prediction(float &cnt);
514 514 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
515 515 bool seems_never_taken(float prob);
516 516 bool seems_stable_comparison(BoolTest::mask btest, Node* c);
517 517
518 518 void do_ifnull(BoolTest::mask btest, Node* c);
519 519 void do_if(BoolTest::mask btest, Node* c);
520 520 int repush_if_args();
521 521 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
522 522 Block* path, Block* other_path);
523 523 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
524 524 Node* jump_if_join(Node* iffalse, Node* iftrue);
525 525 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);
526 526 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index);
527 527 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index);
528 528
529 529 friend class SwitchRange;
530 530 void do_tableswitch();
531 531 void do_lookupswitch();
532 532 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
533 533 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
534 534
535 535 // helper functions for methodData style profiling
536 536 void test_counter_against_threshold(Node* cnt, int limit);
537 537 void increment_and_test_invocation_counter(int limit);
538 538 void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit);
539 539 Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
540 540 void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
541 541 void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant);
542 542
543 543 void profile_method_entry();
544 544 void profile_taken_branch(int target_bci, bool force_update = false);
545 545 void profile_not_taken_branch(bool force_update = false);
546 546 void profile_call(Node* receiver);
547 547 void profile_generic_call();
548 548 void profile_receiver_type(Node* receiver);
549 549 void profile_ret(int target_bci);
550 550 void profile_null_checkcast();
551 551 void profile_switch_case(int table_index);
552 552
553 553 // helper function for call statistics
554 554 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
555 555
556 556 Node_Notes* make_node_notes(Node_Notes* caller_nn);
557 557
558 558 // Helper functions for handling normal and abnormal exits.
559 559 void build_exits();
560 560
561 561 // Fix up all exceptional control flow exiting a single bytecode.
562 562 void do_exceptions();
563 563
564 564 // Fix up all exiting control flow at the end of the parse.
565 565 void do_exits();
566 566
567 567 // Add Catch/CatchProjs
568 568 // The call is either a Java call or the VM's rethrow stub
569 569 void catch_call_exceptions(ciExceptionHandlerStream&);
570 570
571 571 // Handle all exceptions thrown by the inlined method.
572 572 // Also handles exceptions for individual bytecodes.
573 573 void catch_inline_exceptions(SafePointNode* ex_map);
574 574
575 575 // Merge the given map into correct exceptional exit state.
576 576 // Assumes that there is no applicable local handler.
577 577 void throw_to_exit(SafePointNode* ex_map);
578 578
579 579 public:
580 580 #ifndef PRODUCT
581 581 // Handle PrintOpto, etc.
582 582 void show_parse_info();
583 583 void dump_map_adr_mem() const;
584 584 static void print_statistics(); // Print some performance counters
585 585 void dump();
586 586 void dump_bci(int bci);
587 587 #endif
588 588 };
589 589
590 590 #endif // SHARE_VM_OPTO_PARSE_HPP
↓ open down ↓ |
508 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX