1378 }
1379 }
1380
1381 if (needs_check) {
1382 // Perform the registration of finalizable objects.
1383 ValueStack* state_before = copy_state_for_exception();
1384 load_local(objectType, 0);
1385 append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1386 state()->pop_arguments(1),
1387 true, state_before, true));
1388 }
1389 }
1390
1391
1392 void GraphBuilder::method_return(Value x) {
1393 if (RegisterFinalizersAtInit &&
1394 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1395 call_register_finalizer();
1396 }
1397
1398 // Check to see whether we are inlining. If so, Return
1399 // instructions become Gotos to the continuation point.
1400 if (continuation() != NULL) {
1401 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
1402
1403 if (compilation()->env()->dtrace_method_probes()) {
1404 // Report exit from inline methods
1405 Values* args = new Values(1);
1406 args->push(append(new Constant(new ObjectConstant(method()))));
1407 append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
1408 }
1409
1410 // If the inlined method is synchronized, the monitor must be
1411 // released before we jump to the continuation block.
1412 if (method()->is_synchronized()) {
1413 assert(state()->locks_size() == 1, "receiver must be locked here");
1414 monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
1415 }
1416
1417 // State at end of inlined method is the state of the caller
1418 // without the method parameters on stack, including the
1419 // return value, if any, of the inlined method on operand stack.
1420 set_state(state()->caller_state()->copy_for_parsing());
1421 if (x != NULL) {
1422 state()->push(x->type(), x);
1423 }
1424 Goto* goto_callee = new Goto(continuation(), false);
1425
1426 // See whether this is the first return; if so, store off some
1427 // of the state for later examination
1428 if (num_returns() == 0) {
1429 set_inline_cleanup_info();
1430 }
1431
1432 // The current bci() is in the wrong scope, so use the bci() of
1433 // the continuation point.
1434 append_with_bci(goto_callee, scope_data()->continuation()->bci());
1435 incr_num_returns();
1436
1437 return;
1438 }
1439
1440 state()->truncate_stack(0);
1441 if (method()->is_synchronized()) {
1442 // perform the unlocking before exiting the method
1443 Value receiver;
1444 if (!method()->is_static()) {
1445 receiver = _initial_state->local_at(0);
1446 } else {
1447 receiver = append(new Constant(new ClassConstant(method()->holder())));
1448 }
1449 append_split(new MonitorExit(receiver, state()->unlock()));
1450 }
1451
1452 append(new Return(x));
1453 }
1454
1455
1456 void GraphBuilder::access_field(Bytecodes::Code code) {
1457 bool will_link;
1458 ciField* field = stream()->get_field(will_link);
1459 ciInstanceKlass* holder = field->holder();
1460 BasicType field_type = field->type()->basic_type();
1461 ValueType* type = as_ValueType(field_type);
1462 // call will_link again to determine if the field is valid.
1463 const bool needs_patching = !holder->is_loaded() ||
1464 !field->will_link(method()->holder(), code) ||
1465 PatchALot;
1466
1467 ValueStack* state_before = NULL;
1468 if (!holder->is_initialized() || needs_patching) {
1469 // save state before instruction for debug info when
1470 // deoptimization happens during patching
1471 state_before = copy_state_before();
1472 }
1473
1474 Value obj = NULL;
1475 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1476 if (state_before != NULL) {
1477 // build a patching constant
1478 obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
1479 } else {
1480 obj = new Constant(new InstanceConstant(holder->java_mirror()));
1481 }
1482 }
1483
1484
1485 const int offset = !needs_patching ? field->offset() : -1;
1486 switch (code) {
1487 case Bytecodes::_getstatic: {
1488 // check for compile-time constants, i.e., initialized static final fields
1489 Instruction* constant = NULL;
1490 if (field->is_constant() && !PatchALot) {
1491 ciConstant field_val = field->constant_value();
1492 BasicType field_type = field_val.basic_type();
1493 switch (field_type) {
1494 case T_ARRAY:
1495 case T_OBJECT:
1496 if (field_val.as_object()->should_be_constant()) {
1497 constant = new Constant(as_ValueType(field_val));
1498 }
1499 break;
1500
1501 default:
1502 constant = new Constant(as_ValueType(field_val));
1503 }
|
1378 }
1379 }
1380
1381 if (needs_check) {
1382 // Perform the registration of finalizable objects.
1383 ValueStack* state_before = copy_state_for_exception();
1384 load_local(objectType, 0);
1385 append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
1386 state()->pop_arguments(1),
1387 true, state_before, true));
1388 }
1389 }
1390
1391
1392 void GraphBuilder::method_return(Value x) {
1393 if (RegisterFinalizersAtInit &&
1394 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1395 call_register_finalizer();
1396 }
1397
1398 bool need_mem_bar = false;
1399 if (method()->name() == ciSymbol::object_initializer_name() &&
1400 scope()->wrote_final()) {
1401 need_mem_bar = true;
1402 }
1403
1404 // Check to see whether we are inlining. If so, Return
1405 // instructions become Gotos to the continuation point.
1406 if (continuation() != NULL) {
1407 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
1408
1409 if (compilation()->env()->dtrace_method_probes()) {
1410 // Report exit from inline methods
1411 Values* args = new Values(1);
1412 args->push(append(new Constant(new ObjectConstant(method()))));
1413 append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
1414 }
1415
1416 // If the inlined method is synchronized, the monitor must be
1417 // released before we jump to the continuation block.
1418 if (method()->is_synchronized()) {
1419 assert(state()->locks_size() == 1, "receiver must be locked here");
1420 monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
1421 }
1422
1423 if (need_mem_bar) {
1424 append(new MemBar(lir_membar_storestore));
1425 }
1426
1427 // State at end of inlined method is the state of the caller
1428 // without the method parameters on stack, including the
1429 // return value, if any, of the inlined method on operand stack.
1430 set_state(state()->caller_state()->copy_for_parsing());
1431 if (x != NULL) {
1432 state()->push(x->type(), x);
1433 }
1434 Goto* goto_callee = new Goto(continuation(), false);
1435
1436 // See whether this is the first return; if so, store off some
1437 // of the state for later examination
1438 if (num_returns() == 0) {
1439 set_inline_cleanup_info();
1440 }
1441
1442 // The current bci() is in the wrong scope, so use the bci() of
1443 // the continuation point.
1444 append_with_bci(goto_callee, scope_data()->continuation()->bci());
1445 incr_num_returns();
1446 return;
1447 }
1448
1449 state()->truncate_stack(0);
1450 if (method()->is_synchronized()) {
1451 // perform the unlocking before exiting the method
1452 Value receiver;
1453 if (!method()->is_static()) {
1454 receiver = _initial_state->local_at(0);
1455 } else {
1456 receiver = append(new Constant(new ClassConstant(method()->holder())));
1457 }
1458 append_split(new MonitorExit(receiver, state()->unlock()));
1459 }
1460
1461 if (need_mem_bar) {
1462 append(new MemBar(lir_membar_storestore));
1463 }
1464
1465 append(new Return(x));
1466 }
1467
1468
1469 void GraphBuilder::access_field(Bytecodes::Code code) {
1470 bool will_link;
1471 ciField* field = stream()->get_field(will_link);
1472 ciInstanceKlass* holder = field->holder();
1473 BasicType field_type = field->type()->basic_type();
1474 ValueType* type = as_ValueType(field_type);
1475 // call will_link again to determine if the field is valid.
1476 const bool needs_patching = !holder->is_loaded() ||
1477 !field->will_link(method()->holder(), code) ||
1478 PatchALot;
1479
1480 ValueStack* state_before = NULL;
1481 if (!holder->is_initialized() || needs_patching) {
1482 // save state before instruction for debug info when
1483 // deoptimization happens during patching
1484 state_before = copy_state_before();
1485 }
1486
1487 Value obj = NULL;
1488 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
1489 if (state_before != NULL) {
1490 // build a patching constant
1491 obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
1492 } else {
1493 obj = new Constant(new InstanceConstant(holder->java_mirror()));
1494 }
1495 }
1496
1497 if (field->is_final() && (code == Bytecodes::_putfield)) {
1498 scope()->set_wrote_final();
1499 }
1500
1501 const int offset = !needs_patching ? field->offset() : -1;
1502 switch (code) {
1503 case Bytecodes::_getstatic: {
1504 // check for compile-time constants, i.e., initialized static final fields
1505 Instruction* constant = NULL;
1506 if (field->is_constant() && !PatchALot) {
1507 ciConstant field_val = field->constant_value();
1508 BasicType field_type = field_val.basic_type();
1509 switch (field_type) {
1510 case T_ARRAY:
1511 case T_OBJECT:
1512 if (field_val.as_object()->should_be_constant()) {
1513 constant = new Constant(as_ValueType(field_val));
1514 }
1515 break;
1516
1517 default:
1518 constant = new Constant(as_ValueType(field_val));
1519 }
|