17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26 #include "runtime/threadCritical.hpp"
27 #include "services/virtualMemoryTracker.hpp"
28
29 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
30
31 void VirtualMemorySummary::initialize() {
32 assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
33 // Use placement operator new to initialize static data area.
34 ::new ((void*)_snapshot) VirtualMemorySnapshot();
35 }
36
37 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> VirtualMemoryTracker::_reserved_regions;
38
39 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
40 return r1.compare(r2);
41 }
42
43 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
44 return r1.compare(r2);
45 }
46
47 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
48 assert(addr != NULL, "Invalid address");
49 assert(size > 0, "Invalid size");
50 assert(contain_region(addr, size), "Not contain this region");
51
52 if (all_committed()) return true;
53
54 CommittedMemoryRegion committed_rgn(addr, size, stack);
55 LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
56 if (node != NULL) {
57 CommittedMemoryRegion* rgn = node->data();
266 return committed;
267 }
268 }
269
270 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
271 assert((flag() == mtNone || flag() == f), "Overwrite memory type");
272 if (flag() != f) {
273 VirtualMemorySummary::move_reserved_memory(flag(), f, size());
274 VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
275 _flag = f;
276 }
277 }
278
279 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
280 if (level >= NMT_summary) {
281 VirtualMemorySummary::initialize();
282 }
283 return true;
284 }
285
286 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
287 const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
288 assert(base_addr != NULL, "Invalid address");
289 assert(size > 0, "Invalid size");
290
291 ReservedMemoryRegion rgn(base_addr, size, stack, flag);
292 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
293 LinkedListNode<ReservedMemoryRegion>* node;
294 if (reserved_rgn == NULL) {
295 VirtualMemorySummary::record_reserved_memory(size, flag);
296 node = _reserved_regions.add(rgn);
297 if (node != NULL) {
298 node->data()->set_all_committed(all_committed);
299 return true;
300 } else {
301 return false;
302 }
303 } else {
304 if (reserved_rgn->same_region(base_addr, size)) {
305 reserved_rgn->set_call_stack(stack);
306 reserved_rgn->set_flag(flag);
307 return true;
308 } else if (reserved_rgn->adjacent_to(base_addr, size)) {
309 VirtualMemorySummary::record_reserved_memory(size, flag);
310 reserved_rgn->expand_region(base_addr, size);
311 reserved_rgn->set_call_stack(stack);
312 return true;
313 } else {
314 // Overlapped reservation.
315 // It can happen when the regions are thread stacks, as JNI
316 // thread does not detach from VM before exits, and leads to
321
322 // Release old region
323 VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
324 VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
325
326 // Add new region
327 VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
328
329 *reserved_rgn = rgn;
330 return true;
331 } else {
332 ShouldNotReachHere();
333 return false;
334 }
335 }
336 }
337 }
338
339 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
340 assert(addr != NULL, "Invalid address");
341
342 ReservedMemoryRegion rgn(addr, 1);
343 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
344 if (reserved_rgn != NULL) {
345 assert(reserved_rgn->contain_address(addr), "Containment");
346 if (reserved_rgn->flag() != flag) {
347 assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
348 reserved_rgn->set_flag(flag);
349 }
350 }
351 }
352
353 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
354 const NativeCallStack& stack) {
355 assert(addr != NULL, "Invalid address");
356 assert(size > 0, "Invalid size");
357 ReservedMemoryRegion rgn(addr, size);
358 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
359
360 assert(reserved_rgn != NULL, "No reserved region");
361 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
362 return reserved_rgn->add_committed_region(addr, size, stack);
363 }
364
365 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
366 assert(addr != NULL, "Invalid address");
367 assert(size > 0, "Invalid size");
368 ReservedMemoryRegion rgn(addr, size);
369 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
370 assert(reserved_rgn != NULL, "No reserved region");
371 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
372 return reserved_rgn->remove_uncommitted_region(addr, size);
373 }
374
375 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
376 assert(addr != NULL, "Invalid address");
377 assert(size > 0, "Invalid size");
378
379 ReservedMemoryRegion rgn(addr, size);
380 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
381
382 assert(reserved_rgn != NULL, "No reserved region");
383
384 // uncommit regions within the released region
385 if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
386 return false;
387 }
388
389
390 VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
391
392 if (reserved_rgn->same_region(addr, size)) {
393 return _reserved_regions.remove(rgn);
394 } else {
395 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
396 if (reserved_rgn->base() == addr ||
397 reserved_rgn->end() == addr + size) {
398 reserved_rgn->exclude_region(addr, size);
399 return true;
400 } else {
401 address top = reserved_rgn->end();
402 address high_base = addr + size;
403 ReservedMemoryRegion high_rgn(high_base, top - high_base,
404 *reserved_rgn->call_stack(), reserved_rgn->flag());
405
406 // use original region for lower region
407 reserved_rgn->exclude_region(addr, top - addr);
408 LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions.add(high_rgn);
409 if (new_rgn == NULL) {
410 return false;
411 } else {
412 reserved_rgn->move_committed_regions(addr, *new_rgn->data());
413 return true;
414 }
415 }
416 }
417 }
418
419
420 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
421 ThreadCritical tc;
422 LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions.head();
423 while (head != NULL) {
424 const ReservedMemoryRegion* rgn = head->peek();
425 if (!walker->do_allocation_site(rgn)) {
426 return false;
427 }
428 head = head->next();
429 }
430 return true;
431 }
432
433 // Transition virtual memory tracking level.
434 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
435 if (from == NMT_minimal) {
436 assert(to == NMT_summary || to == NMT_detail, "Just check");
437 VirtualMemorySummary::reset();
438 } else if (to == NMT_minimal) {
439 assert(from == NMT_summary || from == NMT_detail, "Just check");
440 // Clean up virtual memory tracking data structures.
441 ThreadCritical tc;
442 _reserved_regions.clear();
443 }
444
445 return true;
446 }
447
448
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26 #include "runtime/threadCritical.hpp"
27 #include "services/virtualMemoryTracker.hpp"
28
29 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
30
31 void VirtualMemorySummary::initialize() {
32 assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
33 // Use placement operator new to initialize static data area.
34 ::new ((void*)_snapshot) VirtualMemorySnapshot();
35 }
36
37 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
38
39 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
40 return r1.compare(r2);
41 }
42
43 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
44 return r1.compare(r2);
45 }
46
47 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
48 assert(addr != NULL, "Invalid address");
49 assert(size > 0, "Invalid size");
50 assert(contain_region(addr, size), "Not contain this region");
51
52 if (all_committed()) return true;
53
54 CommittedMemoryRegion committed_rgn(addr, size, stack);
55 LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
56 if (node != NULL) {
57 CommittedMemoryRegion* rgn = node->data();
266 return committed;
267 }
268 }
269
270 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
271 assert((flag() == mtNone || flag() == f), "Overwrite memory type");
272 if (flag() != f) {
273 VirtualMemorySummary::move_reserved_memory(flag(), f, size());
274 VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
275 _flag = f;
276 }
277 }
278
279 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
280 if (level >= NMT_summary) {
281 VirtualMemorySummary::initialize();
282 }
283 return true;
284 }
285
286 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
287 if (level >= NMT_summary) {
288 _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
289 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
290 return (_reserved_regions != NULL);
291 }
292 return true;
293 }
294
295 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
296 const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
297 assert(base_addr != NULL, "Invalid address");
298 assert(size > 0, "Invalid size");
299 assert(_reserved_regions != NULL, "Sanity check");
300 ReservedMemoryRegion rgn(base_addr, size, stack, flag);
301 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
302 LinkedListNode<ReservedMemoryRegion>* node;
303 if (reserved_rgn == NULL) {
304 VirtualMemorySummary::record_reserved_memory(size, flag);
305 node = _reserved_regions->add(rgn);
306 if (node != NULL) {
307 node->data()->set_all_committed(all_committed);
308 return true;
309 } else {
310 return false;
311 }
312 } else {
313 if (reserved_rgn->same_region(base_addr, size)) {
314 reserved_rgn->set_call_stack(stack);
315 reserved_rgn->set_flag(flag);
316 return true;
317 } else if (reserved_rgn->adjacent_to(base_addr, size)) {
318 VirtualMemorySummary::record_reserved_memory(size, flag);
319 reserved_rgn->expand_region(base_addr, size);
320 reserved_rgn->set_call_stack(stack);
321 return true;
322 } else {
323 // Overlapped reservation.
324 // It can happen when the regions are thread stacks, as JNI
325 // thread does not detach from VM before exits, and leads to
330
331 // Release old region
332 VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
333 VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
334
335 // Add new region
336 VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
337
338 *reserved_rgn = rgn;
339 return true;
340 } else {
341 ShouldNotReachHere();
342 return false;
343 }
344 }
345 }
346 }
347
348 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
349 assert(addr != NULL, "Invalid address");
350 assert(_reserved_regions != NULL, "Sanity check");
351
352 ReservedMemoryRegion rgn(addr, 1);
353 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
354 if (reserved_rgn != NULL) {
355 assert(reserved_rgn->contain_address(addr), "Containment");
356 if (reserved_rgn->flag() != flag) {
357 assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
358 reserved_rgn->set_flag(flag);
359 }
360 }
361 }
362
363 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
364 const NativeCallStack& stack) {
365 assert(addr != NULL, "Invalid address");
366 assert(size > 0, "Invalid size");
367 assert(_reserved_regions != NULL, "Sanity check");
368
369 ReservedMemoryRegion rgn(addr, size);
370 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
371
372 assert(reserved_rgn != NULL, "No reserved region");
373 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
374 return reserved_rgn->add_committed_region(addr, size, stack);
375 }
376
377 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
378 assert(addr != NULL, "Invalid address");
379 assert(size > 0, "Invalid size");
380 assert(_reserved_regions != NULL, "Sanity check");
381
382 ReservedMemoryRegion rgn(addr, size);
383 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
384 assert(reserved_rgn != NULL, "No reserved region");
385 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
386 return reserved_rgn->remove_uncommitted_region(addr, size);
387 }
388
389 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
390 assert(addr != NULL, "Invalid address");
391 assert(size > 0, "Invalid size");
392 assert(_reserved_regions != NULL, "Sanity check");
393
394 ReservedMemoryRegion rgn(addr, size);
395 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
396
397 assert(reserved_rgn != NULL, "No reserved region");
398
399 // uncommit regions within the released region
400 if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
401 return false;
402 }
403
404
405 VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
406
407 if (reserved_rgn->same_region(addr, size)) {
408 return _reserved_regions->remove(rgn);
409 } else {
410 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
411 if (reserved_rgn->base() == addr ||
412 reserved_rgn->end() == addr + size) {
413 reserved_rgn->exclude_region(addr, size);
414 return true;
415 } else {
416 address top = reserved_rgn->end();
417 address high_base = addr + size;
418 ReservedMemoryRegion high_rgn(high_base, top - high_base,
419 *reserved_rgn->call_stack(), reserved_rgn->flag());
420
421 // use original region for lower region
422 reserved_rgn->exclude_region(addr, top - addr);
423 LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
424 if (new_rgn == NULL) {
425 return false;
426 } else {
427 reserved_rgn->move_committed_regions(addr, *new_rgn->data());
428 return true;
429 }
430 }
431 }
432 }
433
434
435 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
436 assert(_reserved_regions != NULL, "Sanity check");
437 ThreadCritical tc;
438 LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
439 while (head != NULL) {
440 const ReservedMemoryRegion* rgn = head->peek();
441 if (!walker->do_allocation_site(rgn)) {
442 return false;
443 }
444 head = head->next();
445 }
446 return true;
447 }
448
449 // Transition virtual memory tracking level.
450 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
451 if (from == NMT_minimal) {
452 assert(to == NMT_summary || to == NMT_detail, "Just check");
453 VirtualMemorySummary::reset();
454 } else if (to == NMT_minimal) {
455 assert(from == NMT_summary || from == NMT_detail, "Just check");
456 // Clean up virtual memory tracking data structures.
457 ThreadCritical tc;
458 if (_reserved_regions != NULL) {
459 delete _reserved_regions;
460 _reserved_regions = NULL;
461 }
462 }
463
464 return true;
465 }
466
467
|