620 OSThread* osthread = thread->osthread();
621 osthread->set_caller_sigmask(caller_sigmask);
622
623 pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL);
624
625 if (!ReduceSignalUsage) {
626 if (thread->is_VM_thread()) {
627 // Only the VM thread handles BREAK_SIGNAL ...
628 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
629 } else {
630 // ... all other threads block BREAK_SIGNAL
631 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
632 }
633 }
634 }
635
636
637 //////////////////////////////////////////////////////////////////////////////
638 // create new thread
639
640 // check if it's safe to start a new thread
641 static bool _thread_safety_check(Thread* thread) {
642 return true;
643 }
644
645 #ifdef __APPLE__
646 // library handle for calling objc_registerThreadWithCollector()
647 // without static linking to the libobjc library
648 #define OBJC_LIB "/usr/lib/libobjc.dylib"
649 #define OBJC_GCREGISTER "objc_registerThreadWithCollector"
650 typedef void (*objc_registerThreadWithCollector_t)();
651 extern "C" objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction;
652 objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NULL;
653 #endif
654
655 #ifdef __APPLE__
656 static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) {
657 // Additional thread_id used to correlate threads in SA
658 thread_identifier_info_data_t m_ident_info;
659 mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
660
661 thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO,
662 (thread_info_t) &m_ident_info, &count);
663
664 return m_ident_info.thread_id;
665 }
666 #endif
667
668 // Thread start routine for all newly created threads
669 static void *java_start(Thread *thread) {
670 // Try to randomize the cache line index of hot stack frames.
671 // This helps when threads of the same stack traces evict each other's
672 // cache lines. The threads can be either from the same JVM instance, or
673 // from different JVM instances. The benefit is especially true for
674 // processors with hyperthreading technology.
675 static int counter = 0;
676 int pid = os::current_process_id();
677 alloca(((pid ^ counter++) & 7) * 128);
678
679 ThreadLocalStorage::set_thread(thread);
680
681 OSThread* osthread = thread->osthread();
682 Monitor* sync = osthread->startThread_lock();
683
684 // non floating stack BsdThreads needs extra check, see above
685 if (!_thread_safety_check(thread)) {
686 // notify parent thread
687 MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
688 osthread->set_state(ZOMBIE);
689 sync->notify_all();
690 return NULL;
691 }
692
693 osthread->set_thread_id(os::Bsd::gettid());
694
695 #ifdef __APPLE__
696 uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
697 guarantee(unique_thread_id != 0, "unique thread id was not found");
698 osthread->set_unique_thread_id(unique_thread_id);
699 #endif
700 // initialize signal mask for this thread
701 os::Bsd::hotspot_sigmask(thread);
702
703 // initialize floating point control register
704 os::Bsd::init_thread_fpu_state();
705
706 #ifdef __APPLE__
707 // register thread with objc gc
708 if (objc_registerThreadWithCollectorFunction != NULL) {
709 objc_registerThreadWithCollectorFunction();
710 }
711 #endif
712
2259 #ifdef __OpenBSD__
2260 // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
2261 return ::mprotect(addr, size, PROT_NONE) == 0;
2262 #else
2263 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2264 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2265 return res != (uintptr_t) MAP_FAILED;
2266 #endif
2267 }
2268
2269 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2270 return os::commit_memory(addr, size, !ExecMem);
2271 }
2272
2273 // If this is a growable mapping, remove the guard pages entirely by
2274 // munmap()ping them. If not, just call uncommit_memory().
2275 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2276 return os::uncommit_memory(addr, size);
2277 }
2278
2279 static address _highest_vm_reserved_address = NULL;
2280
2281 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
2282 // at 'requested_addr'. If there are existing memory mappings at the same
2283 // location, however, they will be overwritten. If 'fixed' is false,
2284 // 'requested_addr' is only treated as a hint, the return value may or
2285 // may not start from the requested address. Unlike Bsd mmap(), this
2286 // function returns NULL to indicate failure.
2287 static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
2288 char * addr;
2289 int flags;
2290
2291 flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
2292 if (fixed) {
2293 assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address");
2294 flags |= MAP_FIXED;
2295 }
2296
2297 // Map reserved/uncommitted pages PROT_NONE so we fail early if we
2298 // touch an uncommitted page. Otherwise, the read/write might
2299 // succeed if we have enough swap space to back the physical page.
2300 addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
2301 flags, -1, 0);
2302
2303 if (addr != MAP_FAILED) {
2304 // anon_mmap() should only get called during VM initialization,
2305 // don't need lock (actually we can skip locking even it can be called
2306 // from multiple threads, because _highest_vm_reserved_address is just a
2307 // hint about the upper limit of non-stack memory regions.)
2308 if ((address)addr + bytes > _highest_vm_reserved_address) {
2309 _highest_vm_reserved_address = (address)addr + bytes;
2310 }
2311 }
2312
2313 return addr == MAP_FAILED ? NULL : addr;
2314 }
2315
2316 // Don't update _highest_vm_reserved_address, because there might be memory
2317 // regions above addr + size. If so, releasing a memory region only creates
2318 // a hole in the address space, it doesn't help prevent heap-stack collision.
2319 //
2320 static int anon_munmap(char * addr, size_t size) {
2321 return ::munmap(addr, size) == 0;
2322 }
2323
2324 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2325 size_t alignment_hint) {
2326 return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2327 }
2328
2329 bool os::pd_release_memory(char* addr, size_t size) {
2330 return anon_munmap(addr, size);
2331 }
2332
2333 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2334 // Bsd wants the mprotect address argument to be page aligned.
2335 char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
2336
2337 // According to SUSv3, mprotect() should only be used with mappings
2338 // established by mmap(), and mmap() always maps whole pages. Unaligned
2339 // 'addr' likely indicates problem in the VM (e.g. trying to change
2473 return UseHugeTLBFS;
2474 }
2475
2476 // Reserve memory at an arbitrary address, only if that area is
2477 // available (and not reserved for something else).
2478
2479 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2480 const int max_tries = 10;
2481 char* base[max_tries];
2482 size_t size[max_tries];
2483 const size_t gap = 0x000000;
2484
2485 // Assert only that the size is a multiple of the page size, since
2486 // that's all that mmap requires, and since that's all we really know
2487 // about at this low abstraction level. If we need higher alignment,
2488 // we can either pass an alignment to this method or verify alignment
2489 // in one of the methods further up the call chain. See bug 5044738.
2490 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2491
2492 // Repeatedly allocate blocks until the block is allocated at the
2493 // right spot. Give up after max_tries. Note that reserve_memory() will
2494 // automatically update _highest_vm_reserved_address if the call is
2495 // successful. The variable tracks the highest memory address every reserved
2496 // by JVM. It is used to detect heap-stack collision if running with
2497 // fixed-stack BsdThreads. Because here we may attempt to reserve more
2498 // space than needed, it could confuse the collision detecting code. To
2499 // solve the problem, save current _highest_vm_reserved_address and
2500 // calculate the correct value before return.
2501 address old_highest = _highest_vm_reserved_address;
2502
2503 // Bsd mmap allows caller to pass an address as hint; give it a try first,
2504 // if kernel honors the hint then we can return immediately.
2505 char * addr = anon_mmap(requested_addr, bytes, false);
2506 if (addr == requested_addr) {
2507 return requested_addr;
2508 }
2509
2510 if (addr != NULL) {
2511 // mmap() is successful but it fails to reserve at the requested address
2512 anon_munmap(addr, bytes);
2513 }
2514
2515 int i;
2516 for (i = 0; i < max_tries; ++i) {
2517 base[i] = reserve_memory(bytes);
2518
2519 if (base[i] != NULL) {
2520 // Is this the block we wanted?
2521 if (base[i] == requested_addr) {
2535 size_t bottom_overlap = base[i] + bytes - requested_addr;
2536 if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2537 unmap_memory(requested_addr, bottom_overlap);
2538 size[i] = bytes - bottom_overlap;
2539 } else {
2540 size[i] = bytes;
2541 }
2542 }
2543 }
2544 }
2545
2546 // Give back the unused reserved pieces.
2547
2548 for (int j = 0; j < i; ++j) {
2549 if (base[j] != NULL) {
2550 unmap_memory(base[j], size[j]);
2551 }
2552 }
2553
2554 if (i < max_tries) {
2555 _highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
2556 return requested_addr;
2557 } else {
2558 _highest_vm_reserved_address = old_highest;
2559 return NULL;
2560 }
2561 }
2562
2563 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2564 RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes));
2565 }
2566
2567 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2568 RESTARTABLE_RETURN_INT(::pread(fd, buf, nBytes, offset));
2569 }
2570
2571 void os::naked_short_sleep(jlong ms) {
2572 struct timespec req;
2573
2574 assert(ms < 1000, "Un-interruptable sleep, short time use only");
2575 req.tv_sec = 0;
2576 if (ms > 0) {
2577 req.tv_nsec = (ms % 1000) * 1000000;
2578 } else {
3698 if (osthread->ucontext() != NULL) {
3699 _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext());
3700 } else {
3701 // NULL context is unexpected, double-check this is the VMThread
3702 guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3703 }
3704 }
3705
3706 // Suspends the target using the signal mechanism and then grabs the PC before
3707 // resuming the target. Used by the flat-profiler only
3708 ExtendedPC os::get_thread_pc(Thread* thread) {
3709 // Make sure that it is called by the watcher for the VMThread
3710 assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3711 assert(thread->is_VM_thread(), "Can only be called for VMThread");
3712
3713 PcFetcher fetcher(thread);
3714 fetcher.run();
3715 return fetcher.result();
3716 }
3717
3718 int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond,
3719 pthread_mutex_t *_mutex,
3720 const struct timespec *_abstime) {
3721 return pthread_cond_timedwait(_cond, _mutex, _abstime);
3722 }
3723
3724 ////////////////////////////////////////////////////////////////////////////////
3725 // debug support
3726
3727 bool os::find(address addr, outputStream* st) {
3728 Dl_info dlinfo;
3729 memset(&dlinfo, 0, sizeof(dlinfo));
3730 if (dladdr(addr, &dlinfo) != 0) {
3731 st->print(PTR_FORMAT ": ", addr);
3732 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
3733 st->print("%s+%#x", dlinfo.dli_sname,
3734 addr - (intptr_t)dlinfo.dli_saddr);
3735 } else if (dlinfo.dli_fbase != NULL) {
3736 st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
3737 } else {
3738 st->print("<absolute address>");
3739 }
3740 if (dlinfo.dli_fname != NULL) {
3741 st->print(" in %s", dlinfo.dli_fname);
3742 }
3743 if (dlinfo.dli_fbase != NULL) {
4269 int ret = OS_TIMEOUT;
4270 int status = pthread_mutex_lock(_mutex);
4271 assert_status(status == 0, status, "mutex_lock");
4272 guarantee(_nParked == 0, "invariant");
4273 ++_nParked;
4274
4275 // Object.wait(timo) will return because of
4276 // (a) notification
4277 // (b) timeout
4278 // (c) thread.interrupt
4279 //
4280 // Thread.interrupt and object.notify{All} both call Event::set.
4281 // That is, we treat thread.interrupt as a special case of notification.
4282 // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4283 // We assume all ETIME returns are valid.
4284 //
4285 // TODO: properly differentiate simultaneous notify+interrupt.
4286 // In that case, we should propagate the notify to another waiter.
4287
4288 while (_Event < 0) {
4289 status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst);
4290 if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4291 pthread_cond_destroy(_cond);
4292 pthread_cond_init(_cond, NULL);
4293 }
4294 assert_status(status == 0 || status == EINTR ||
4295 status == ETIMEDOUT,
4296 status, "cond_timedwait");
4297 if (!FilterSpuriousWakeups) break; // previous semantics
4298 if (status == ETIMEDOUT) break;
4299 // We consume and ignore EINTR and spurious wakeups.
4300 }
4301 --_nParked;
4302 if (_Event >= 0) {
4303 ret = OS_OK;
4304 }
4305 _Event = 0;
4306 status = pthread_mutex_unlock(_mutex);
4307 assert_status(status == 0, status, "mutex_unlock");
4308 assert(_nParked == 0, "invariant");
4309 // Paranoia to ensure our locked and lock-free paths interact
4475 // correctly with each other and Java-level accesses.
4476 OrderAccess::fence();
4477 return;
4478 }
4479
4480 #ifdef ASSERT
4481 // Don't catch signals while blocked; let the running threads have the signals.
4482 // (This allows a debugger to break into the running thread.)
4483 sigset_t oldsigs;
4484 sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
4485 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4486 #endif
4487
4488 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4489 jt->set_suspend_equivalent();
4490 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4491
4492 if (time == 0) {
4493 status = pthread_cond_wait(_cond, _mutex);
4494 } else {
4495 status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &absTime);
4496 if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4497 pthread_cond_destroy(_cond);
4498 pthread_cond_init(_cond, NULL);
4499 }
4500 }
4501 assert_status(status == 0 || status == EINTR ||
4502 status == ETIMEDOUT,
4503 status, "cond_timedwait");
4504
4505 #ifdef ASSERT
4506 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4507 #endif
4508
4509 _counter = 0;
4510 status = pthread_mutex_unlock(_mutex);
4511 assert_status(status == 0, status, "invariant");
4512 // Paranoia to ensure our locked and lock-free paths interact
4513 // correctly with each other and Java-level accesses.
4514 OrderAccess::fence();
4515
|
620 OSThread* osthread = thread->osthread();
621 osthread->set_caller_sigmask(caller_sigmask);
622
623 pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL);
624
625 if (!ReduceSignalUsage) {
626 if (thread->is_VM_thread()) {
627 // Only the VM thread handles BREAK_SIGNAL ...
628 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
629 } else {
630 // ... all other threads block BREAK_SIGNAL
631 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
632 }
633 }
634 }
635
636
637 //////////////////////////////////////////////////////////////////////////////
638 // create new thread
639
640 #ifdef __APPLE__
641 // library handle for calling objc_registerThreadWithCollector()
642 // without static linking to the libobjc library
643 #define OBJC_LIB "/usr/lib/libobjc.dylib"
644 #define OBJC_GCREGISTER "objc_registerThreadWithCollector"
645 typedef void (*objc_registerThreadWithCollector_t)();
646 extern "C" objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction;
647 objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NULL;
648 #endif
649
650 #ifdef __APPLE__
651 static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) {
652 // Additional thread_id used to correlate threads in SA
653 thread_identifier_info_data_t m_ident_info;
654 mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
655
656 thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO,
657 (thread_info_t) &m_ident_info, &count);
658
659 return m_ident_info.thread_id;
660 }
661 #endif
662
663 // Thread start routine for all newly created threads
664 static void *java_start(Thread *thread) {
665 // Try to randomize the cache line index of hot stack frames.
666 // This helps when threads of the same stack traces evict each other's
667 // cache lines. The threads can be either from the same JVM instance, or
668 // from different JVM instances. The benefit is especially true for
669 // processors with hyperthreading technology.
670 static int counter = 0;
671 int pid = os::current_process_id();
672 alloca(((pid ^ counter++) & 7) * 128);
673
674 ThreadLocalStorage::set_thread(thread);
675
676 OSThread* osthread = thread->osthread();
677 Monitor* sync = osthread->startThread_lock();
678
679 osthread->set_thread_id(os::Bsd::gettid());
680
681 #ifdef __APPLE__
682 uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
683 guarantee(unique_thread_id != 0, "unique thread id was not found");
684 osthread->set_unique_thread_id(unique_thread_id);
685 #endif
686 // initialize signal mask for this thread
687 os::Bsd::hotspot_sigmask(thread);
688
689 // initialize floating point control register
690 os::Bsd::init_thread_fpu_state();
691
692 #ifdef __APPLE__
693 // register thread with objc gc
694 if (objc_registerThreadWithCollectorFunction != NULL) {
695 objc_registerThreadWithCollectorFunction();
696 }
697 #endif
698
2245 #ifdef __OpenBSD__
2246 // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
2247 return ::mprotect(addr, size, PROT_NONE) == 0;
2248 #else
2249 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2250 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2251 return res != (uintptr_t) MAP_FAILED;
2252 #endif
2253 }
2254
2255 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2256 return os::commit_memory(addr, size, !ExecMem);
2257 }
2258
2259 // If this is a growable mapping, remove the guard pages entirely by
2260 // munmap()ping them. If not, just call uncommit_memory().
2261 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2262 return os::uncommit_memory(addr, size);
2263 }
2264
2265 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
2266 // at 'requested_addr'. If there are existing memory mappings at the same
2267 // location, however, they will be overwritten. If 'fixed' is false,
2268 // 'requested_addr' is only treated as a hint, the return value may or
2269 // may not start from the requested address. Unlike Bsd mmap(), this
2270 // function returns NULL to indicate failure.
2271 static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
2272 char * addr;
2273 int flags;
2274
2275 flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
2276 if (fixed) {
2277 assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address");
2278 flags |= MAP_FIXED;
2279 }
2280
2281 // Map reserved/uncommitted pages PROT_NONE so we fail early if we
2282 // touch an uncommitted page. Otherwise, the read/write might
2283 // succeed if we have enough swap space to back the physical page.
2284 addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
2285 flags, -1, 0);
2286
2287 return addr == MAP_FAILED ? NULL : addr;
2288 }
2289
2290 static int anon_munmap(char * addr, size_t size) {
2291 return ::munmap(addr, size) == 0;
2292 }
2293
2294 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2295 size_t alignment_hint) {
2296 return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2297 }
2298
2299 bool os::pd_release_memory(char* addr, size_t size) {
2300 return anon_munmap(addr, size);
2301 }
2302
2303 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2304 // Bsd wants the mprotect address argument to be page aligned.
2305 char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
2306
2307 // According to SUSv3, mprotect() should only be used with mappings
2308 // established by mmap(), and mmap() always maps whole pages. Unaligned
2309 // 'addr' likely indicates problem in the VM (e.g. trying to change
2443 return UseHugeTLBFS;
2444 }
2445
2446 // Reserve memory at an arbitrary address, only if that area is
2447 // available (and not reserved for something else).
2448
2449 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2450 const int max_tries = 10;
2451 char* base[max_tries];
2452 size_t size[max_tries];
2453 const size_t gap = 0x000000;
2454
2455 // Assert only that the size is a multiple of the page size, since
2456 // that's all that mmap requires, and since that's all we really know
2457 // about at this low abstraction level. If we need higher alignment,
2458 // we can either pass an alignment to this method or verify alignment
2459 // in one of the methods further up the call chain. See bug 5044738.
2460 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2461
2462 // Repeatedly allocate blocks until the block is allocated at the
2463 // right spot.
2464
2465 // Bsd mmap allows caller to pass an address as hint; give it a try first,
2466 // if kernel honors the hint then we can return immediately.
2467 char * addr = anon_mmap(requested_addr, bytes, false);
2468 if (addr == requested_addr) {
2469 return requested_addr;
2470 }
2471
2472 if (addr != NULL) {
2473 // mmap() is successful but it fails to reserve at the requested address
2474 anon_munmap(addr, bytes);
2475 }
2476
2477 int i;
2478 for (i = 0; i < max_tries; ++i) {
2479 base[i] = reserve_memory(bytes);
2480
2481 if (base[i] != NULL) {
2482 // Is this the block we wanted?
2483 if (base[i] == requested_addr) {
2497 size_t bottom_overlap = base[i] + bytes - requested_addr;
2498 if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2499 unmap_memory(requested_addr, bottom_overlap);
2500 size[i] = bytes - bottom_overlap;
2501 } else {
2502 size[i] = bytes;
2503 }
2504 }
2505 }
2506 }
2507
2508 // Give back the unused reserved pieces.
2509
2510 for (int j = 0; j < i; ++j) {
2511 if (base[j] != NULL) {
2512 unmap_memory(base[j], size[j]);
2513 }
2514 }
2515
2516 if (i < max_tries) {
2517 return requested_addr;
2518 } else {
2519 return NULL;
2520 }
2521 }
2522
2523 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2524 RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes));
2525 }
2526
2527 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2528 RESTARTABLE_RETURN_INT(::pread(fd, buf, nBytes, offset));
2529 }
2530
2531 void os::naked_short_sleep(jlong ms) {
2532 struct timespec req;
2533
2534 assert(ms < 1000, "Un-interruptable sleep, short time use only");
2535 req.tv_sec = 0;
2536 if (ms > 0) {
2537 req.tv_nsec = (ms % 1000) * 1000000;
2538 } else {
3658 if (osthread->ucontext() != NULL) {
3659 _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext());
3660 } else {
3661 // NULL context is unexpected, double-check this is the VMThread
3662 guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3663 }
3664 }
3665
3666 // Suspends the target using the signal mechanism and then grabs the PC before
3667 // resuming the target. Used by the flat-profiler only
3668 ExtendedPC os::get_thread_pc(Thread* thread) {
3669 // Make sure that it is called by the watcher for the VMThread
3670 assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3671 assert(thread->is_VM_thread(), "Can only be called for VMThread");
3672
3673 PcFetcher fetcher(thread);
3674 fetcher.run();
3675 return fetcher.result();
3676 }
3677
3678 ////////////////////////////////////////////////////////////////////////////////
3679 // debug support
3680
3681 bool os::find(address addr, outputStream* st) {
3682 Dl_info dlinfo;
3683 memset(&dlinfo, 0, sizeof(dlinfo));
3684 if (dladdr(addr, &dlinfo) != 0) {
3685 st->print(PTR_FORMAT ": ", addr);
3686 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
3687 st->print("%s+%#x", dlinfo.dli_sname,
3688 addr - (intptr_t)dlinfo.dli_saddr);
3689 } else if (dlinfo.dli_fbase != NULL) {
3690 st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
3691 } else {
3692 st->print("<absolute address>");
3693 }
3694 if (dlinfo.dli_fname != NULL) {
3695 st->print(" in %s", dlinfo.dli_fname);
3696 }
3697 if (dlinfo.dli_fbase != NULL) {
4223 int ret = OS_TIMEOUT;
4224 int status = pthread_mutex_lock(_mutex);
4225 assert_status(status == 0, status, "mutex_lock");
4226 guarantee(_nParked == 0, "invariant");
4227 ++_nParked;
4228
4229 // Object.wait(timo) will return because of
4230 // (a) notification
4231 // (b) timeout
4232 // (c) thread.interrupt
4233 //
4234 // Thread.interrupt and object.notify{All} both call Event::set.
4235 // That is, we treat thread.interrupt as a special case of notification.
4236 // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4237 // We assume all ETIME returns are valid.
4238 //
4239 // TODO: properly differentiate simultaneous notify+interrupt.
4240 // In that case, we should propagate the notify to another waiter.
4241
4242 while (_Event < 0) {
4243 status = pthread_cond_timedwait(_cond, _mutex, &abst);
4244 if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4245 pthread_cond_destroy(_cond);
4246 pthread_cond_init(_cond, NULL);
4247 }
4248 assert_status(status == 0 || status == EINTR ||
4249 status == ETIMEDOUT,
4250 status, "cond_timedwait");
4251 if (!FilterSpuriousWakeups) break; // previous semantics
4252 if (status == ETIMEDOUT) break;
4253 // We consume and ignore EINTR and spurious wakeups.
4254 }
4255 --_nParked;
4256 if (_Event >= 0) {
4257 ret = OS_OK;
4258 }
4259 _Event = 0;
4260 status = pthread_mutex_unlock(_mutex);
4261 assert_status(status == 0, status, "mutex_unlock");
4262 assert(_nParked == 0, "invariant");
4263 // Paranoia to ensure our locked and lock-free paths interact
4429 // correctly with each other and Java-level accesses.
4430 OrderAccess::fence();
4431 return;
4432 }
4433
4434 #ifdef ASSERT
4435 // Don't catch signals while blocked; let the running threads have the signals.
4436 // (This allows a debugger to break into the running thread.)
4437 sigset_t oldsigs;
4438 sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
4439 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4440 #endif
4441
4442 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4443 jt->set_suspend_equivalent();
4444 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4445
4446 if (time == 0) {
4447 status = pthread_cond_wait(_cond, _mutex);
4448 } else {
4449 status = pthread_cond_timedwait(_cond, _mutex, &absTime);
4450 if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4451 pthread_cond_destroy(_cond);
4452 pthread_cond_init(_cond, NULL);
4453 }
4454 }
4455 assert_status(status == 0 || status == EINTR ||
4456 status == ETIMEDOUT,
4457 status, "cond_timedwait");
4458
4459 #ifdef ASSERT
4460 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4461 #endif
4462
4463 _counter = 0;
4464 status = pthread_mutex_unlock(_mutex);
4465 assert_status(status == 0, status, "invariant");
4466 // Paranoia to ensure our locked and lock-free paths interact
4467 // correctly with each other and Java-level accesses.
4468 OrderAccess::fence();
4469
|