407 // Must this parse be aborted?
408 bool failing() { return C->failing(); }
409
410 Block* rpo_at(int rpo) {
411 assert(0 <= rpo && rpo < _block_count, "oob");
412 return &_blocks[rpo];
413 }
414 Block* start_block() {
415 return rpo_at(flow()->start_block()->rpo());
416 }
417 // Can return NULL if the flow pass did not complete a block.
418 Block* successor_for_bci(int bci) {
419 return block()->successor_for_bci(bci);
420 }
421
422 private:
423 // Create a JVMS & map for the initial state of this method.
424 SafePointNode* create_entry_map();
425
426 // OSR helpers
427 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
428 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
429 void load_interpreter_state(Node* osr_buf);
430
431 // Functions for managing basic blocks:
432 void init_blocks();
433 void load_state_from(Block* b);
434 void store_state_to(Block* b) { b->record_state(this); }
435
436 // Parse all the basic blocks.
437 void do_all_blocks();
438
439 // Parse the current basic block
440 void do_one_block();
441
442 // Raise an error if we get a bad ciTypeFlow CFG.
443 void handle_missing_successor(int bci);
444
445 // first actions (before BCI 0)
446 void do_method_entry();
447
453 void ensure_phis_everywhere();
454
455 // Merge the current mapping into the basic block starting at bci
456 void merge( int target_bci);
457 // Same as plain merge, except that it allocates a new path number.
458 void merge_new_path( int target_bci);
459 // Merge the current mapping into an exception handler.
460 void merge_exception(int target_bci);
461 // Helper: Merge the current mapping into the given basic block
462 void merge_common(Block* target, int pnum);
463 // Helper functions for merging individual cells.
464 PhiNode *ensure_phi( int idx, bool nocreate = false);
465 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
466 // Helper to merge the current memory state into the given basic block
467 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
468
469 // Parse this bytecode, and alter the Parsers JVM->Node mapping
470 void do_one_bytecode();
471
472 // helper function to generate array store check
473 void array_store_check();
474 // Helper function to generate array load
475 void array_load(BasicType etype);
476 // Helper function to generate array store
477 void array_store(BasicType etype);
478 // Helper function to compute array addressing
479 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
480
481 void rtm_deopt();
482
483 // Pass current map to exits
484 void return_current(Node* value);
485
486 // Register finalizers on return from Object.<init>
487 void call_register_finalizer();
488
489 // Insert a compiler safepoint into the graph
490 void add_safepoint();
491
492 // Insert a compiler safepoint into the graph, if there is a back-branch.
493 void maybe_add_safepoint(int target_bci) {
518
519 void do_irem();
520
521 // implementation of _get* and _put* bytecodes
522 void do_getstatic() { do_field_access(true, false); }
523 void do_getfield () { do_field_access(true, true); }
524 void do_putstatic() { do_field_access(false, false); }
525 void do_putfield () { do_field_access(false, true); }
526
527 // common code for making initial checks and forming addresses
528 void do_field_access(bool is_get, bool is_field);
529 bool static_field_ok_in_clinit(ciField *field, ciMethod *method);
530
531 // common code for actually performing the load or store
532 void do_get_xxx(Node* obj, ciField* field, bool is_field);
533 void do_put_xxx(Node* obj, ciField* field, bool is_field);
534
535 // implementation of object creation bytecodes
536 void emit_guard_for_new(ciInstanceKlass* klass);
537 void do_new();
538 void do_newarray(BasicType elemtype);
539 void do_anewarray();
540 void do_multianewarray();
541 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
542
543 // implementation of jsr/ret
544 void do_jsr();
545 void do_ret();
546
547 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
548 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
549 bool seems_never_taken(float prob) const;
550 bool path_is_suitable_for_uncommon_trap(float prob) const;
551 bool seems_stable_comparison() const;
552
553 void do_ifnull(BoolTest::mask btest, Node* c);
554 void do_if(BoolTest::mask btest, Node* c);
555 int repush_if_args();
556 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
557 Block* path, Block* other_path);
558 void sharpen_type_after_if(BoolTest::mask btest,
559 Node* con, const Type* tcon,
560 Node* val, const Type* tval);
561 void maybe_add_predicate_after_if(Block* path);
562 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
563 Node* jump_if_join(Node* iffalse, Node* iftrue);
564 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index, bool unc);
565 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index, bool unc);
566 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index, bool unc);
567
568 friend class SwitchRange;
569 void do_tableswitch();
570 void do_lookupswitch();
571 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
572 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
573 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
574
575 void decrement_age();
576 // helper functions for methodData style profiling
577 void test_counter_against_threshold(Node* cnt, int limit);
|
407 // Must this parse be aborted?
408 bool failing() { return C->failing(); }
409
410 Block* rpo_at(int rpo) {
411 assert(0 <= rpo && rpo < _block_count, "oob");
412 return &_blocks[rpo];
413 }
414 Block* start_block() {
415 return rpo_at(flow()->start_block()->rpo());
416 }
417 // Can return NULL if the flow pass did not complete a block.
418 Block* successor_for_bci(int bci) {
419 return block()->successor_for_bci(bci);
420 }
421
422 private:
423 // Create a JVMS & map for the initial state of this method.
424 SafePointNode* create_entry_map();
425
426 // OSR helpers
427 Node* fetch_interpreter_state(int index, const Type* type, Node* local_addrs, Node* local_addrs_base);
428 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
429 void load_interpreter_state(Node* osr_buf);
430
431 // Functions for managing basic blocks:
432 void init_blocks();
433 void load_state_from(Block* b);
434 void store_state_to(Block* b) { b->record_state(this); }
435
436 // Parse all the basic blocks.
437 void do_all_blocks();
438
439 // Parse the current basic block
440 void do_one_block();
441
442 // Raise an error if we get a bad ciTypeFlow CFG.
443 void handle_missing_successor(int bci);
444
445 // first actions (before BCI 0)
446 void do_method_entry();
447
453 void ensure_phis_everywhere();
454
455 // Merge the current mapping into the basic block starting at bci
456 void merge( int target_bci);
457 // Same as plain merge, except that it allocates a new path number.
458 void merge_new_path( int target_bci);
459 // Merge the current mapping into an exception handler.
460 void merge_exception(int target_bci);
461 // Helper: Merge the current mapping into the given basic block
462 void merge_common(Block* target, int pnum);
463 // Helper functions for merging individual cells.
464 PhiNode *ensure_phi( int idx, bool nocreate = false);
465 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
466 // Helper to merge the current memory state into the given basic block
467 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
468
469 // Parse this bytecode, and alter the Parsers JVM->Node mapping
470 void do_one_bytecode();
471
472 // helper function to generate array store check
473 Node* array_store_check();
474 // Helper function to generate array load
475 void array_load(BasicType etype);
476 // Helper function to generate array store
477 void array_store(BasicType etype);
478 // Helper function to compute array addressing
479 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
480
481 void rtm_deopt();
482
483 // Pass current map to exits
484 void return_current(Node* value);
485
486 // Register finalizers on return from Object.<init>
487 void call_register_finalizer();
488
489 // Insert a compiler safepoint into the graph
490 void add_safepoint();
491
492 // Insert a compiler safepoint into the graph, if there is a back-branch.
493 void maybe_add_safepoint(int target_bci) {
518
519 void do_irem();
520
521 // implementation of _get* and _put* bytecodes
522 void do_getstatic() { do_field_access(true, false); }
523 void do_getfield () { do_field_access(true, true); }
524 void do_putstatic() { do_field_access(false, false); }
525 void do_putfield () { do_field_access(false, true); }
526
527 // common code for making initial checks and forming addresses
528 void do_field_access(bool is_get, bool is_field);
529 bool static_field_ok_in_clinit(ciField *field, ciMethod *method);
530
531 // common code for actually performing the load or store
532 void do_get_xxx(Node* obj, ciField* field, bool is_field);
533 void do_put_xxx(Node* obj, ciField* field, bool is_field);
534
535 // implementation of object creation bytecodes
536 void emit_guard_for_new(ciInstanceKlass* klass);
537 void do_new();
538 void do_defaultvalue();
539 void do_withfield();
540 void do_newarray(BasicType elemtype);
541 void do_newarray();
542 void do_multianewarray();
543 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
544
545 // implementation of jsr/ret
546 void do_jsr();
547 void do_ret();
548
549 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
550 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
551 bool seems_never_taken(float prob) const;
552 bool path_is_suitable_for_uncommon_trap(float prob) const;
553 bool seems_stable_comparison() const;
554
555 void do_ifnull(BoolTest::mask btest, Node* c);
556 void do_if(BoolTest::mask btest, Node* c, bool new_path = false, Node** ctrl_taken = NULL);
557 void do_acmp(BoolTest::mask btest, Node* a, Node* b);
558 int repush_if_args();
559 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path);
560 void sharpen_type_after_if(BoolTest::mask btest,
561 Node* con, const Type* tcon,
562 Node* val, const Type* tval);
563 void maybe_add_predicate_after_if(Block* path);
564 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
565 Node* jump_if_join(Node* iffalse, Node* iftrue);
566 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index, bool unc);
567 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index, bool unc);
568 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index, bool unc);
569
570 friend class SwitchRange;
571 void do_tableswitch();
572 void do_lookupswitch();
573 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
574 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
575 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
576
577 void decrement_age();
578 // helper functions for methodData style profiling
579 void test_counter_against_threshold(Node* cnt, int limit);
|