src/share/vm/opto/output.cpp

Print this page




1870     Pipeline_Use::elaborated_elements,
1871     sizeof(Pipeline_Use::elaborated_elements));
1872 }
1873 
1874 // Perform instruction scheduling and bundling over the sequence of
1875 // instructions in backwards order.
1876 void Compile::ScheduleAndBundle() {
1877 
1878   // Don't optimize this if it isn't a method
1879   if (!_method)
1880     return;
1881 
1882   // Don't optimize this if scheduling is disabled
1883   if (!do_scheduling())
1884     return;
1885 
1886   // Scheduling code works only with pairs (8 bytes) maximum.
1887   if (max_vector_size() > 8)
1888     return;
1889 
1890   NOT_PRODUCT( TracePhase t2("isched", &_t_instrSched, TimeCompiler); )
1891 
1892   // Create a data structure for all the scheduling information
1893   Scheduling scheduling(Thread::current()->resource_area(), *this);
1894 
1895   // Walk backwards over each basic block, computing the needed alignment
1896   // Walk over all the basic blocks
1897   scheduling.DoScheduling();
1898 }
1899 
1900 // Compute the latency of all the instructions.  This is fairly simple,
1901 // because we already have a legal ordering.  Walk over the instructions
1902 // from first to last, and compute the latency of the instruction based
1903 // on the latency of the preceding instruction(s).
1904 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
1905 #ifndef PRODUCT
1906   if (_cfg->C->trace_opto_output())
1907     tty->print("# -> ComputeLocalLatenciesForward\n");
1908 #endif
1909 
1910   // Walk over all the schedulable instructions




1870     Pipeline_Use::elaborated_elements,
1871     sizeof(Pipeline_Use::elaborated_elements));
1872 }
1873 
1874 // Perform instruction scheduling and bundling over the sequence of
1875 // instructions in backwards order.
1876 void Compile::ScheduleAndBundle() {
1877 
1878   // Don't optimize this if it isn't a method
1879   if (!_method)
1880     return;
1881 
1882   // Don't optimize this if scheduling is disabled
1883   if (!do_scheduling())
1884     return;
1885 
1886   // Scheduling code works only with pairs (8 bytes) maximum.
1887   if (max_vector_size() > 8)
1888     return;
1889 
1890   TracePhase t2("isched", &timers[_t_instrSched]);
1891 
1892   // Create a data structure for all the scheduling information
1893   Scheduling scheduling(Thread::current()->resource_area(), *this);
1894 
1895   // Walk backwards over each basic block, computing the needed alignment
1896   // Walk over all the basic blocks
1897   scheduling.DoScheduling();
1898 }
1899 
1900 // Compute the latency of all the instructions.  This is fairly simple,
1901 // because we already have a legal ordering.  Walk over the instructions
1902 // from first to last, and compute the latency of the instruction based
1903 // on the latency of the preceding instruction(s).
1904 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
1905 #ifndef PRODUCT
1906   if (_cfg->C->trace_opto_output())
1907     tty->print("# -> ComputeLocalLatenciesForward\n");
1908 #endif
1909 
1910   // Walk over all the schedulable instructions