1 /*
2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
1089 uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
1090
1091 // Count and start of calls
1092 uint *call_returns = NEW_RESOURCE_ARRAY(uint, nblocks+1);
1093
1094 uint return_offset = 0;
1095 int nop_size = (new MachNopNode())->size(_regalloc);
1096
1097 int previous_offset = 0;
1098 int current_offset = 0;
1099 int last_call_offset = -1;
1100 int last_avoid_back_to_back_offset = -1;
1101 #ifdef ASSERT
1102 uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
1103 uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
1104 uint* jmp_size = NEW_RESOURCE_ARRAY(uint,nblocks);
1105 uint* jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks);
1106 #endif
1107
1108 // Create an array of unused labels, one for each basic block, if printing is enabled
1109 #ifndef PRODUCT
1110 int *node_offsets = NULL;
1111 uint node_offset_limit = unique();
1112
1113 if (print_assembly())
1114 node_offsets = NEW_RESOURCE_ARRAY(int, node_offset_limit);
1115 #endif
1116
1117 NonSafepointEmitter non_safepoints(this); // emit non-safepoints lazily
1118
1119 // Emit the constant table.
1120 if (has_mach_constant_base_node()) {
1121 constant_table().emit(*cb);
1122 }
1123
1124 // Create an array of labels, one for each basic block
1125 Label *blk_labels = NEW_RESOURCE_ARRAY(Label, nblocks+1);
1126 for (uint i=0; i <= nblocks; i++) {
1127 blk_labels[i].init();
1128 }
1129
1130 // ------------------
1131 // Now fill in the code buffer
1132 Node *delay_slot = NULL;
1133
1134 for (uint i = 0; i < nblocks; i++) {
1364 // it's followed by a flag-kill and a null-check. Happens on
1365 // Intel all the time, with add-to-memory kind of opcodes.
1366 previous_offset = current_offset;
1367 }
1368
1369 // Not an else-if!
1370 // If this is a trap based cmp then add its offset to the list.
1371 if (mach->is_TrapBasedCheckNode()) {
1372 inct_starts[inct_cnt++] = current_offset;
1373 }
1374 }
1375
1376 // Verify that there is sufficient space remaining
1377 cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1378 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1379 C->record_failure("CodeCache is full");
1380 return;
1381 }
1382
1383 // Save the offset for the listing
1384 #ifndef PRODUCT
1385 if (node_offsets && n->_idx < node_offset_limit)
1386 node_offsets[n->_idx] = cb->insts_size();
1387 #endif
1388
1389 // "Normal" instruction case
1390 DEBUG_ONLY( uint instr_offset = cb->insts_size(); )
1391 n->emit(*cb, _regalloc);
1392 current_offset = cb->insts_size();
1393
1394 // Above we only verified that there is enough space in the instruction section.
1395 // However, the instruction may emit stubs that cause code buffer expansion.
1396 // Bail out here if expansion failed due to a lack of code cache space.
1397 if (failing()) {
1398 return;
1399 }
1400
1401 #ifdef ASSERT
1402 if (n->size(_regalloc) < (current_offset-instr_offset)) {
1403 n->dump();
1404 assert(false, "wrong size of mach node");
1405 }
1406 #endif
1413 // in the case that return address is not actually at current_offset.
1414 // This is a small price to pay.
1415
1416 if (is_mcall) {
1417 last_call_offset = current_offset;
1418 }
1419
1420 if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
1421 // Avoid back to back some instructions.
1422 last_avoid_back_to_back_offset = current_offset;
1423 }
1424
1425 // See if this instruction has a delay slot
1426 if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
1427 guarantee(delay_slot != NULL, "expecting delay slot node");
1428
1429 // Back up 1 instruction
1430 cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
1431
1432 // Save the offset for the listing
1433 #ifndef PRODUCT
1434 if (node_offsets && delay_slot->_idx < node_offset_limit)
1435 node_offsets[delay_slot->_idx] = cb->insts_size();
1436 #endif
1437
1438 // Support a SafePoint in the delay slot
1439 if (delay_slot->is_MachSafePoint()) {
1440 MachNode *mach = delay_slot->as_Mach();
1441 // !!!!! Stubs only need an oopmap right now, so bail out
1442 if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
1443 // Write the oopmap directly to the code blob??!!
1444 delay_slot = NULL;
1445 continue;
1446 }
1447
1448 int adjusted_offset = current_offset - Pipeline::instr_unit_size();
1449 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1450 adjusted_offset);
1451 // Generate an OopMap entry
1452 Process_OopMap_Node(mach, adjusted_offset);
1453 }
1454
1455 // Insert the delay slot instruction
1524 if (failing()) {
1525 return; // CodeBuffer::expand failed
1526 }
1527 // Emit the deopt handler code.
1528 _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb));
1529
1530 // Emit the MethodHandle deopt handler code (if required).
1531 if (has_method_handle_invokes() && !failing()) {
1532 // We can use the same code as for the normal deopt handler, we
1533 // just need a different entry point address.
1534 _code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb));
1535 }
1536 }
1537
1538 // One last check for failed CodeBuffer::expand:
1539 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1540 C->record_failure("CodeCache is full");
1541 return;
1542 }
1543
1544 #ifndef PRODUCT
1545 // Dump the assembly code, including basic-block numbers
1546 if (print_assembly()) {
1547 ttyLocker ttyl; // keep the following output all in one block
1548 if (!VMThread::should_terminate()) { // test this under the tty lock
1549 // This output goes directly to the tty, not the compiler log.
1550 // To enable tools to match it up with the compilation activity,
1551 // be sure to tag this tty output with the compile ID.
1552 if (xtty != NULL) {
1553 xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
1554 is_osr_compilation() ? " compile_kind='osr'" :
1555 "");
1556 }
1557 if (method() != NULL) {
1558 method()->print_metadata();
1559 } else if (stub_name() != NULL) {
1560 tty->print_cr("Generating RuntimeStub - %s", stub_name());
1561 }
1562 dump_asm(node_offsets, node_offset_limit);
1563 if (xtty != NULL) {
1564 // print_metadata and dump_asm above may safepoint which makes us loose the ttylock.
1565 // Retake lock too make sure the end tag is coherent, and that xmlStream->pop_tag is done
1566 // thread safe
1567 ttyLocker ttyl2;
1568 xtty->tail("opto_assembly");
1569 }
1570 }
1571 }
1572 #endif
1573
1574 }
1575
1576 void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1577 _inc_table.set_size(cnt);
1578
1579 uint inct_cnt = 0;
1580 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1581 Block* block = _cfg->get_block(i);
1582 Node *n = NULL;
1583 int j;
1584
1585 // Find the branch; ignore trailing NOPs.
1586 for (j = block->number_of_nodes() - 1; j >= 0; j--) {
1587 n = block->get_node(j);
1588 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
1589 break;
1590 }
1591 }
1592
1593 // If we didn't find anything, continue
|
1 /*
2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
1089 uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
1090
1091 // Count and start of calls
1092 uint *call_returns = NEW_RESOURCE_ARRAY(uint, nblocks+1);
1093
1094 uint return_offset = 0;
1095 int nop_size = (new MachNopNode())->size(_regalloc);
1096
1097 int previous_offset = 0;
1098 int current_offset = 0;
1099 int last_call_offset = -1;
1100 int last_avoid_back_to_back_offset = -1;
1101 #ifdef ASSERT
1102 uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
1103 uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
1104 uint* jmp_size = NEW_RESOURCE_ARRAY(uint,nblocks);
1105 uint* jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks);
1106 #endif
1107
1108 // Create an array of unused labels, one for each basic block, if printing is enabled
1109 #if defined(SUPPORT_OPTO_ASSEMBLY)
1110 int *node_offsets = NULL;
1111 uint node_offset_limit = unique();
1112
1113 if (print_assembly()) {
1114 node_offsets = NEW_RESOURCE_ARRAY(int, node_offset_limit);
1115 }
1116 if (node_offsets != NULL) {
1117 // We need to initialize. Unused array elements may contain garbage and mess up PrintOptoAssembly.
1118 memset(node_offsets, 0, node_offset_limit*sizeof(int));
1119 }
1120 #endif
1121
1122 NonSafepointEmitter non_safepoints(this); // emit non-safepoints lazily
1123
1124 // Emit the constant table.
1125 if (has_mach_constant_base_node()) {
1126 constant_table().emit(*cb);
1127 }
1128
1129 // Create an array of labels, one for each basic block
1130 Label *blk_labels = NEW_RESOURCE_ARRAY(Label, nblocks+1);
1131 for (uint i=0; i <= nblocks; i++) {
1132 blk_labels[i].init();
1133 }
1134
1135 // ------------------
1136 // Now fill in the code buffer
1137 Node *delay_slot = NULL;
1138
1139 for (uint i = 0; i < nblocks; i++) {
1369 // it's followed by a flag-kill and a null-check. Happens on
1370 // Intel all the time, with add-to-memory kind of opcodes.
1371 previous_offset = current_offset;
1372 }
1373
1374 // Not an else-if!
1375 // If this is a trap based cmp then add its offset to the list.
1376 if (mach->is_TrapBasedCheckNode()) {
1377 inct_starts[inct_cnt++] = current_offset;
1378 }
1379 }
1380
1381 // Verify that there is sufficient space remaining
1382 cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1383 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1384 C->record_failure("CodeCache is full");
1385 return;
1386 }
1387
1388 // Save the offset for the listing
1389 #if defined(SUPPORT_OPTO_ASSEMBLY)
1390 if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1391 node_offsets[n->_idx] = cb->insts_size();
1392 }
1393 #endif
1394
1395 // "Normal" instruction case
1396 DEBUG_ONLY( uint instr_offset = cb->insts_size(); )
1397 n->emit(*cb, _regalloc);
1398 current_offset = cb->insts_size();
1399
1400 // Above we only verified that there is enough space in the instruction section.
1401 // However, the instruction may emit stubs that cause code buffer expansion.
1402 // Bail out here if expansion failed due to a lack of code cache space.
1403 if (failing()) {
1404 return;
1405 }
1406
1407 #ifdef ASSERT
1408 if (n->size(_regalloc) < (current_offset-instr_offset)) {
1409 n->dump();
1410 assert(false, "wrong size of mach node");
1411 }
1412 #endif
1419 // in the case that return address is not actually at current_offset.
1420 // This is a small price to pay.
1421
1422 if (is_mcall) {
1423 last_call_offset = current_offset;
1424 }
1425
1426 if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
1427 // Avoid back to back some instructions.
1428 last_avoid_back_to_back_offset = current_offset;
1429 }
1430
1431 // See if this instruction has a delay slot
1432 if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
1433 guarantee(delay_slot != NULL, "expecting delay slot node");
1434
1435 // Back up 1 instruction
1436 cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
1437
1438 // Save the offset for the listing
1439 #if defined(SUPPORT_OPTO_ASSEMBLY)
1440 if ((node_offsets != NULL) && (delay_slot->_idx < node_offset_limit)) {
1441 node_offsets[delay_slot->_idx] = cb->insts_size();
1442 }
1443 #endif
1444
1445 // Support a SafePoint in the delay slot
1446 if (delay_slot->is_MachSafePoint()) {
1447 MachNode *mach = delay_slot->as_Mach();
1448 // !!!!! Stubs only need an oopmap right now, so bail out
1449 if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
1450 // Write the oopmap directly to the code blob??!!
1451 delay_slot = NULL;
1452 continue;
1453 }
1454
1455 int adjusted_offset = current_offset - Pipeline::instr_unit_size();
1456 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1457 adjusted_offset);
1458 // Generate an OopMap entry
1459 Process_OopMap_Node(mach, adjusted_offset);
1460 }
1461
1462 // Insert the delay slot instruction
1531 if (failing()) {
1532 return; // CodeBuffer::expand failed
1533 }
1534 // Emit the deopt handler code.
1535 _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb));
1536
1537 // Emit the MethodHandle deopt handler code (if required).
1538 if (has_method_handle_invokes() && !failing()) {
1539 // We can use the same code as for the normal deopt handler, we
1540 // just need a different entry point address.
1541 _code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb));
1542 }
1543 }
1544
1545 // One last check for failed CodeBuffer::expand:
1546 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1547 C->record_failure("CodeCache is full");
1548 return;
1549 }
1550
1551 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) || defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_OPTO_ASSEMBLY)
1552 if (print_assembly()) {
1553 tty->cr();
1554 tty->print_cr("============================= C2-compiled nmethod ==============================");
1555 }
1556 #endif
1557
1558 #if defined(SUPPORT_OPTO_ASSEMBLY)
1559 // Dump the assembly code, including basic-block numbers
1560 if (print_assembly()) {
1561 ttyLocker ttyl; // keep the following output all in one block
1562 if (!VMThread::should_terminate()) { // test this under the tty lock
1563 // This output goes directly to the tty, not the compiler log.
1564 // To enable tools to match it up with the compilation activity,
1565 // be sure to tag this tty output with the compile ID.
1566 if (xtty != NULL) {
1567 xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
1568 is_osr_compilation() ? " compile_kind='osr'" :
1569 "");
1570 }
1571 if (method() != NULL) {
1572 tty->print_cr("----------------------------------- MetaData -----------------------------------");
1573 method()->print_metadata();
1574 } else if (stub_name() != NULL) {
1575 tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", stub_name());
1576 }
1577 tty->cr();
1578 tty->print_cr("--------------------------------- OptoAssembly ---------------------------------");
1579 dump_asm(node_offsets, node_offset_limit);
1580 tty->print_cr("--------------------------------------------------------------------------------");
1581 if (xtty != NULL) {
1582 // print_metadata and dump_asm above may safepoint which makes us loose the ttylock.
1583 // Retake lock too make sure the end tag is coherent, and that xmlStream->pop_tag is done
1584 // thread safe
1585 ttyLocker ttyl2;
1586 xtty->tail("opto_assembly");
1587 }
1588 }
1589 }
1590 #endif
1591 }
1592
1593 void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1594 _inc_table.set_size(cnt);
1595
1596 uint inct_cnt = 0;
1597 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1598 Block* block = _cfg->get_block(i);
1599 Node *n = NULL;
1600 int j;
1601
1602 // Find the branch; ignore trailing NOPs.
1603 for (j = block->number_of_nodes() - 1; j >= 0; j--) {
1604 n = block->get_node(j);
1605 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
1606 break;
1607 }
1608 }
1609
1610 // If we didn't find anything, continue
|