< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 54838 : [mq]: 8221734-v2
rev 54839 : [mq]: 8221734-v3


1298   event->commit();
1299 }
1300 
1301 // Fast path code shared by multiple functions
1302 void ObjectSynchronizer::inflate_helper(oop obj) {
1303   markOop mark = obj->mark();
1304   if (mark->has_monitor()) {
1305     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1306     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1307     return;
1308   }
1309   inflate(Thread::current(), obj, inflate_cause_vm_internal);
1310 }
1311 
1312 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1313                                            oop object,
1314                                            const InflateCause cause) {
1315   // Inflate mutates the heap ...
1316   // Relaxing assertion for bug 6320749.
1317   assert(Universe::verify_in_progress() ||
1318          !Universe::heap()->is_gc_active(), "invariant");
1319 
1320   EventJavaMonitorInflate event;
1321 
1322   for (;;) {
1323     const markOop mark = object->mark();
1324     assert(!mark->has_bias_pattern(), "invariant");
1325 
1326     // The mark can be in one of the following states:
1327     // *  Inflated     - just return
1328     // *  Stack-locked - coerce it to inflated
1329     // *  INFLATING    - busy wait for conversion to complete
1330     // *  Neutral      - aggressively inflate the object.
1331     // *  BIASED       - Illegal.  We should never see this
1332 
1333     // CASE: inflated
1334     if (mark->has_monitor()) {
1335       ObjectMonitor * inf = mark->monitor();
1336       markOop dmw = inf->header();
1337       assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1338       assert(oopDesc::equals((oop) inf->object(), object), "invariant");


1427       m->set_header(dmw);
1428 
1429       // Optimization: if the mark->locker stack address is associated
1430       // with this thread we could simply set m->_owner = Self.
1431       // Note that a thread can inflate an object
1432       // that it has stack-locked -- as might happen in wait() -- directly
1433       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1434       m->set_owner(mark->locker());
1435       m->set_object(object);
1436       // TODO-FIXME: assert BasicLock->dhw != 0.
1437 
1438       // Must preserve store ordering. The monitor state must
1439       // be stable at the time of publishing the monitor address.
1440       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1441       object->release_set_mark(markOopDesc::encode(m));
1442 
1443       // Hopefully the performance counters are allocated on distinct cache lines
1444       // to avoid false sharing on MP systems ...
1445       OM_PERFDATA_OP(Inflations, inc());
1446       if (log_is_enabled(Trace, monitorinflation)) {
1447         ResourceMark rm;
1448         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1449                      INTPTR_FORMAT ", type='%s'", p2i(object),
1450                      p2i(object->mark()), object->klass()->external_name());
1451       }
1452       if (event.should_commit()) {
1453         post_monitor_inflate_event(&event, object, cause);
1454       }
1455       return m;
1456     }
1457 
1458     // CASE: neutral
1459     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1460     // If we know we're inflating for entry it's better to inflate by swinging a
1461     // pre-locked objectMonitor pointer into the object header.   A successful
1462     // CAS inflates the object *and* confers ownership to the inflating thread.
1463     // In the current implementation we use a 2-step mechanism where we CAS()
1464     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1465     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1466     // would be useful.
1467 


1477     m->_recursions   = 0;
1478     m->_Responsible  = NULL;
1479     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1480 
1481     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1482       m->set_header(NULL);
1483       m->set_object(NULL);
1484       m->Recycle();
1485       omRelease(Self, m, true);
1486       m = NULL;
1487       continue;
1488       // interference - the markword changed - just retry.
1489       // The state-transitions are one-way, so there's no chance of
1490       // live-lock -- "Inflated" is an absorbing state.
1491     }
1492 
1493     // Hopefully the performance counters are allocated on distinct
1494     // cache lines to avoid false sharing on MP systems ...
1495     OM_PERFDATA_OP(Inflations, inc());
1496     if (log_is_enabled(Trace, monitorinflation)) {
1497       ResourceMark rm;
1498       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1499                    INTPTR_FORMAT ", type='%s'", p2i(object),
1500                    p2i(object->mark()), object->klass()->external_name());
1501     }
1502     if (event.should_commit()) {
1503       post_monitor_inflate_event(&event, object, cause);
1504     }
1505     return m;
1506   }
1507 }
1508 
1509 
1510 // We maintain a list of in-use monitors for each thread.
1511 //
1512 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1513 // deflate_idle_monitors() scans only a global list of in-use monitors which
1514 // is populated only as a thread dies (see omFlush()).
1515 //
1516 // These operations are called at all safepoints, immediately after mutators
1517 // are stopped, but before any objects have moved. Collectively they traverse




1298   event->commit();
1299 }
1300 
1301 // Fast path code shared by multiple functions
1302 void ObjectSynchronizer::inflate_helper(oop obj) {
1303   markOop mark = obj->mark();
1304   if (mark->has_monitor()) {
1305     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1306     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1307     return;
1308   }
1309   inflate(Thread::current(), obj, inflate_cause_vm_internal);
1310 }
1311 
1312 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1313                                            oop object,
1314                                            const InflateCause cause) {
1315   // Inflate mutates the heap ...
1316   // Relaxing assertion for bug 6320749.
1317   assert(Universe::verify_in_progress() ||
1318          !SafepointSynchronize::is_at_safepoint(), "invariant");
1319 
1320   EventJavaMonitorInflate event;
1321 
1322   for (;;) {
1323     const markOop mark = object->mark();
1324     assert(!mark->has_bias_pattern(), "invariant");
1325 
1326     // The mark can be in one of the following states:
1327     // *  Inflated     - just return
1328     // *  Stack-locked - coerce it to inflated
1329     // *  INFLATING    - busy wait for conversion to complete
1330     // *  Neutral      - aggressively inflate the object.
1331     // *  BIASED       - Illegal.  We should never see this
1332 
1333     // CASE: inflated
1334     if (mark->has_monitor()) {
1335       ObjectMonitor * inf = mark->monitor();
1336       markOop dmw = inf->header();
1337       assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1338       assert(oopDesc::equals((oop) inf->object(), object), "invariant");


1427       m->set_header(dmw);
1428 
1429       // Optimization: if the mark->locker stack address is associated
1430       // with this thread we could simply set m->_owner = Self.
1431       // Note that a thread can inflate an object
1432       // that it has stack-locked -- as might happen in wait() -- directly
1433       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1434       m->set_owner(mark->locker());
1435       m->set_object(object);
1436       // TODO-FIXME: assert BasicLock->dhw != 0.
1437 
1438       // Must preserve store ordering. The monitor state must
1439       // be stable at the time of publishing the monitor address.
1440       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1441       object->release_set_mark(markOopDesc::encode(m));
1442 
1443       // Hopefully the performance counters are allocated on distinct cache lines
1444       // to avoid false sharing on MP systems ...
1445       OM_PERFDATA_OP(Inflations, inc());
1446       if (log_is_enabled(Trace, monitorinflation)) {
1447         ResourceMark rm(Self);
1448         lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1449                      INTPTR_FORMAT ", type='%s'", p2i(object),
1450                      p2i(object->mark()), object->klass()->external_name());
1451       }
1452       if (event.should_commit()) {
1453         post_monitor_inflate_event(&event, object, cause);
1454       }
1455       return m;
1456     }
1457 
1458     // CASE: neutral
1459     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1460     // If we know we're inflating for entry it's better to inflate by swinging a
1461     // pre-locked objectMonitor pointer into the object header.   A successful
1462     // CAS inflates the object *and* confers ownership to the inflating thread.
1463     // In the current implementation we use a 2-step mechanism where we CAS()
1464     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1465     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1466     // would be useful.
1467 


1477     m->_recursions   = 0;
1478     m->_Responsible  = NULL;
1479     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1480 
1481     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1482       m->set_header(NULL);
1483       m->set_object(NULL);
1484       m->Recycle();
1485       omRelease(Self, m, true);
1486       m = NULL;
1487       continue;
1488       // interference - the markword changed - just retry.
1489       // The state-transitions are one-way, so there's no chance of
1490       // live-lock -- "Inflated" is an absorbing state.
1491     }
1492 
1493     // Hopefully the performance counters are allocated on distinct
1494     // cache lines to avoid false sharing on MP systems ...
1495     OM_PERFDATA_OP(Inflations, inc());
1496     if (log_is_enabled(Trace, monitorinflation)) {
1497       ResourceMark rm(Self);
1498       lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1499                    INTPTR_FORMAT ", type='%s'", p2i(object),
1500                    p2i(object->mark()), object->klass()->external_name());
1501     }
1502     if (event.should_commit()) {
1503       post_monitor_inflate_event(&event, object, cause);
1504     }
1505     return m;
1506   }
1507 }
1508 
1509 
1510 // We maintain a list of in-use monitors for each thread.
1511 //
1512 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1513 // deflate_idle_monitors() scans only a global list of in-use monitors which
1514 // is populated only as a thread dies (see omFlush()).
1515 //
1516 // These operations are called at all safepoints, immediately after mutators
1517 // are stopped, but before any objects have moved. Collectively they traverse


< prev index next >