< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 54621 : imported patch 8221734-v1


1297   event->commit();
1298 }
1299 
1300 // Fast path code shared by multiple functions
1301 void ObjectSynchronizer::inflate_helper(oop obj) {
1302   markOop mark = obj->mark();
1303   if (mark->has_monitor()) {
1304     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1305     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1306     return;
1307   }
1308   inflate(Thread::current(), obj, inflate_cause_vm_internal);
1309 }
1310 
1311 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1312                                            oop object,
1313                                            const InflateCause cause) {
1314   // Inflate mutates the heap ...
1315   // Relaxing assertion for bug 6320749.
1316   assert(Universe::verify_in_progress() ||
1317          !SafepointSynchronize::is_at_safepoint(), "invariant");
1318 
1319   EventJavaMonitorInflate event;
1320 
1321   for (;;) {
1322     const markOop mark = object->mark();
1323     assert(!mark->has_bias_pattern(), "invariant");
1324 
1325     // The mark can be in one of the following states:
1326     // *  Inflated     - just return
1327     // *  Stack-locked - coerce it to inflated
1328     // *  INFLATING    - busy wait for conversion to complete
1329     // *  Neutral      - aggressively inflate the object.
1330     // *  BIASED       - Illegal.  We should never see this
1331 
1332     // CASE: inflated
1333     if (mark->has_monitor()) {
1334       ObjectMonitor * inf = mark->monitor();
1335       markOop dmw = inf->header();
1336       assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1337       assert(oopDesc::equals((oop) inf->object(), object), "invariant");


1426       m->set_header(dmw);
1427 
1428       // Optimization: if the mark->locker stack address is associated
1429       // with this thread we could simply set m->_owner = Self.
1430       // Note that a thread can inflate an object
1431       // that it has stack-locked -- as might happen in wait() -- directly
1432       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1433       m->set_owner(mark->locker());
1434       m->set_object(object);
1435       // TODO-FIXME: assert BasicLock->dhw != 0.
1436 
1437       // Must preserve store ordering. The monitor state must
1438       // be stable at the time of publishing the monitor address.
1439       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1440       object->release_set_mark(markOopDesc::encode(m));
1441 
1442       // Hopefully the performance counters are allocated on distinct cache lines
1443       // to avoid false sharing on MP systems ...
1444       OM_PERFDATA_OP(Inflations, inc());
1445       if (log_is_enabled(Trace, monitorinflation)) {
1446         ResourceMark rm(Self);
1447         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1448                      INTPTR_FORMAT ", type='%s'", p2i(object),
1449                      p2i(object->mark()), object->klass()->external_name());
1450       }
1451       if (event.should_commit()) {
1452         post_monitor_inflate_event(&event, object, cause);
1453       }
1454       return m;
1455     }
1456 
1457     // CASE: neutral
1458     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1459     // If we know we're inflating for entry it's better to inflate by swinging a
1460     // pre-locked objectMonitor pointer into the object header.   A successful
1461     // CAS inflates the object *and* confers ownership to the inflating thread.
1462     // In the current implementation we use a 2-step mechanism where we CAS()
1463     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1464     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1465     // would be useful.
1466 


1476     m->_recursions   = 0;
1477     m->_Responsible  = NULL;
1478     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1479 
1480     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1481       m->set_header(NULL);
1482       m->set_object(NULL);
1483       m->Recycle();
1484       omRelease(Self, m, true);
1485       m = NULL;
1486       continue;
1487       // interference - the markword changed - just retry.
1488       // The state-transitions are one-way, so there's no chance of
1489       // live-lock -- "Inflated" is an absorbing state.
1490     }
1491 
1492     // Hopefully the performance counters are allocated on distinct
1493     // cache lines to avoid false sharing on MP systems ...
1494     OM_PERFDATA_OP(Inflations, inc());
1495     if (log_is_enabled(Trace, monitorinflation)) {
1496       ResourceMark rm(Self);
1497       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1498                    INTPTR_FORMAT ", type='%s'", p2i(object),
1499                    p2i(object->mark()), object->klass()->external_name());
1500     }
1501     if (event.should_commit()) {
1502       post_monitor_inflate_event(&event, object, cause);
1503     }
1504     return m;
1505   }
1506 }
1507 
1508 
1509 // We maintain a list of in-use monitors for each thread.
1510 //
1511 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1512 // deflate_idle_monitors() scans only a global list of in-use monitors which
1513 // is populated only as a thread dies (see omFlush()).
1514 //
1515 // These operations are called at all safepoints, immediately after mutators
1516 // are stopped, but before any objects have moved. Collectively they traverse




1297   event->commit();
1298 }
1299 
1300 // Fast path code shared by multiple functions
1301 void ObjectSynchronizer::inflate_helper(oop obj) {
1302   markOop mark = obj->mark();
1303   if (mark->has_monitor()) {
1304     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1305     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1306     return;
1307   }
1308   inflate(Thread::current(), obj, inflate_cause_vm_internal);
1309 }
1310 
1311 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1312                                            oop object,
1313                                            const InflateCause cause) {
1314   // Inflate mutates the heap ...
1315   // Relaxing assertion for bug 6320749.
1316   assert(Universe::verify_in_progress() ||
1317          !Universe::heap()->is_gc_active(), "invariant");
1318 
1319   EventJavaMonitorInflate event;
1320 
1321   for (;;) {
1322     const markOop mark = object->mark();
1323     assert(!mark->has_bias_pattern(), "invariant");
1324 
1325     // The mark can be in one of the following states:
1326     // *  Inflated     - just return
1327     // *  Stack-locked - coerce it to inflated
1328     // *  INFLATING    - busy wait for conversion to complete
1329     // *  Neutral      - aggressively inflate the object.
1330     // *  BIASED       - Illegal.  We should never see this
1331 
1332     // CASE: inflated
1333     if (mark->has_monitor()) {
1334       ObjectMonitor * inf = mark->monitor();
1335       markOop dmw = inf->header();
1336       assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1337       assert(oopDesc::equals((oop) inf->object(), object), "invariant");


1426       m->set_header(dmw);
1427 
1428       // Optimization: if the mark->locker stack address is associated
1429       // with this thread we could simply set m->_owner = Self.
1430       // Note that a thread can inflate an object
1431       // that it has stack-locked -- as might happen in wait() -- directly
1432       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1433       m->set_owner(mark->locker());
1434       m->set_object(object);
1435       // TODO-FIXME: assert BasicLock->dhw != 0.
1436 
1437       // Must preserve store ordering. The monitor state must
1438       // be stable at the time of publishing the monitor address.
1439       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1440       object->release_set_mark(markOopDesc::encode(m));
1441 
1442       // Hopefully the performance counters are allocated on distinct cache lines
1443       // to avoid false sharing on MP systems ...
1444       OM_PERFDATA_OP(Inflations, inc());
1445       if (log_is_enabled(Trace, monitorinflation)) {
1446         ResourceMark rm;
1447         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1448                      INTPTR_FORMAT ", type='%s'", p2i(object),
1449                      p2i(object->mark()), object->klass()->external_name());
1450       }
1451       if (event.should_commit()) {
1452         post_monitor_inflate_event(&event, object, cause);
1453       }
1454       return m;
1455     }
1456 
1457     // CASE: neutral
1458     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1459     // If we know we're inflating for entry it's better to inflate by swinging a
1460     // pre-locked objectMonitor pointer into the object header.   A successful
1461     // CAS inflates the object *and* confers ownership to the inflating thread.
1462     // In the current implementation we use a 2-step mechanism where we CAS()
1463     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1464     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1465     // would be useful.
1466 


1476     m->_recursions   = 0;
1477     m->_Responsible  = NULL;
1478     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1479 
1480     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1481       m->set_header(NULL);
1482       m->set_object(NULL);
1483       m->Recycle();
1484       omRelease(Self, m, true);
1485       m = NULL;
1486       continue;
1487       // interference - the markword changed - just retry.
1488       // The state-transitions are one-way, so there's no chance of
1489       // live-lock -- "Inflated" is an absorbing state.
1490     }
1491 
1492     // Hopefully the performance counters are allocated on distinct
1493     // cache lines to avoid false sharing on MP systems ...
1494     OM_PERFDATA_OP(Inflations, inc());
1495     if (log_is_enabled(Trace, monitorinflation)) {
1496       ResourceMark rm;
1497       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1498                    INTPTR_FORMAT ", type='%s'", p2i(object),
1499                    p2i(object->mark()), object->klass()->external_name());
1500     }
1501     if (event.should_commit()) {
1502       post_monitor_inflate_event(&event, object, cause);
1503     }
1504     return m;
1505   }
1506 }
1507 
1508 
1509 // We maintain a list of in-use monitors for each thread.
1510 //
1511 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1512 // deflate_idle_monitors() scans only a global list of in-use monitors which
1513 // is populated only as a thread dies (see omFlush()).
1514 //
1515 // These operations are called at all safepoints, immediately after mutators
1516 // are stopped, but before any objects have moved. Collectively they traverse


< prev index next >