< prev index next >

src/share/vm/opto/output.cpp

Print this page




1863   _bundle_instr_count = 0;
1864   _bundle_use.reset();
1865 
1866   memcpy(_bundle_use_elements,
1867     Pipeline_Use::elaborated_elements,
1868     sizeof(Pipeline_Use::elaborated_elements));
1869 }
1870 
1871 // Perform instruction scheduling and bundling over the sequence of
1872 // instructions in backwards order.
1873 void Compile::ScheduleAndBundle() {
1874 
1875   // Don't optimize this if it isn't a method
1876   if (!_method)
1877     return;
1878 
1879   // Don't optimize this if scheduling is disabled
1880   if (!do_scheduling())
1881     return;
1882 
1883   // Scheduling code works only with pairs (8 bytes) maximum.
1884   if (max_vector_size() > 8)
1885     return;
1886 
1887   TracePhase tp("isched", &timers[_t_instrSched]);
1888 
1889   // Create a data structure for all the scheduling information
1890   Scheduling scheduling(Thread::current()->resource_area(), *this);
1891 
1892   // Walk backwards over each basic block, computing the needed alignment
1893   // Walk over all the basic blocks
1894   scheduling.DoScheduling();
1895 }
1896 
1897 // Compute the latency of all the instructions.  This is fairly simple,
1898 // because we already have a legal ordering.  Walk over the instructions
1899 // from first to last, and compute the latency of the instruction based
1900 // on the latency of the preceding instruction(s).
1901 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
1902 #ifndef PRODUCT
1903   if (_cfg->C->trace_opto_output())
1904     tty->print("# -> ComputeLocalLatenciesForward\n");




1863   _bundle_instr_count = 0;
1864   _bundle_use.reset();
1865 
1866   memcpy(_bundle_use_elements,
1867     Pipeline_Use::elaborated_elements,
1868     sizeof(Pipeline_Use::elaborated_elements));
1869 }
1870 
1871 // Perform instruction scheduling and bundling over the sequence of
1872 // instructions in backwards order.
1873 void Compile::ScheduleAndBundle() {
1874 
1875   // Don't optimize this if it isn't a method
1876   if (!_method)
1877     return;
1878 
1879   // Don't optimize this if scheduling is disabled
1880   if (!do_scheduling())
1881     return;
1882 
1883   // Scheduling code works only with pairs (16 bytes) maximum.
1884   if (max_vector_size() > 16)
1885     return;
1886 
1887   TracePhase tp("isched", &timers[_t_instrSched]);
1888 
1889   // Create a data structure for all the scheduling information
1890   Scheduling scheduling(Thread::current()->resource_area(), *this);
1891 
1892   // Walk backwards over each basic block, computing the needed alignment
1893   // Walk over all the basic blocks
1894   scheduling.DoScheduling();
1895 }
1896 
1897 // Compute the latency of all the instructions.  This is fairly simple,
1898 // because we already have a legal ordering.  Walk over the instructions
1899 // from first to last, and compute the latency of the instruction based
1900 // on the latency of the preceding instruction(s).
1901 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
1902 #ifndef PRODUCT
1903   if (_cfg->C->trace_opto_output())
1904     tty->print("# -> ComputeLocalLatenciesForward\n");


< prev index next >