pthread.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. // Copyright 2021 The Libc Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. //go:build !(linux && (amd64 || arm64 || loong64 || ppc64le || s390x || riscv64 || 386 || arm))
  5. package libc // import "modernc.org/libc"
  6. import (
  7. "runtime"
  8. "sync"
  9. "sync/atomic"
  10. "time"
  11. "unsafe"
  12. "modernc.org/libc/errno"
  13. "modernc.org/libc/pthread"
  14. "modernc.org/libc/sys/types"
  15. ctime "modernc.org/libc/time"
  16. )
  17. var (
  18. mutexes = map[uintptr]*mutex{}
  19. mutexesMu sync.Mutex
  20. threads = map[int32]*TLS{}
  21. threadsMu sync.Mutex
  22. threadKey pthread.Pthread_key_t
  23. threadKeyDestructors = map[pthread.Pthread_key_t][]uintptr{} // key: []destructor
  24. threadsKeysMu sync.Mutex
  25. conds = map[uintptr]*cond{}
  26. condsMu sync.Mutex
  27. )
  28. // Thread local storage.
  29. type TLS struct {
  30. errnop uintptr
  31. allocaStack [][]uintptr
  32. allocas []uintptr
  33. jumpBuffers []uintptr
  34. lastError uint32
  35. pthreadData
  36. sp int
  37. stack stackHeader
  38. ID int32
  39. reentryGuard int32 // memgrind
  40. stackHeaderBalance int32
  41. }
  42. var errno0 int32 // Temp errno for NewTLS
  43. func NewTLS() *TLS {
  44. return newTLS(false)
  45. }
  46. func newTLS(detached bool) *TLS {
  47. id := atomic.AddInt32(&tid, 1)
  48. t := &TLS{ID: id, errnop: uintptr(unsafe.Pointer(&errno0))}
  49. t.pthreadData.init(t, detached)
  50. if memgrind {
  51. atomic.AddInt32(&tlsBalance, 1)
  52. }
  53. t.errnop = t.Alloc(int(unsafe.Sizeof(int32(0))))
  54. *(*int32)(unsafe.Pointer(t.errnop)) = 0
  55. return t
  56. }
  57. // StackSlots reports the number of tls stack slots currently in use.
  58. func (tls *TLS) StackSlots() int {
  59. return tls.sp
  60. }
  61. func (t *TLS) alloca(n size_t) (r uintptr) {
  62. r = Xmalloc(t, n)
  63. t.allocas = append(t.allocas, r)
  64. return r
  65. }
  66. func (t *TLS) FreeAlloca() func() {
  67. t.allocaStack = append(t.allocaStack, t.allocas)
  68. t.allocas = nil
  69. return func() {
  70. for _, v := range t.allocas {
  71. Xfree(t, v)
  72. }
  73. n := len(t.allocaStack)
  74. t.allocas = t.allocaStack[n-1]
  75. t.allocaStack = t.allocaStack[:n-1]
  76. }
  77. }
  78. func (tls *TLS) PushJumpBuffer(jb uintptr) {
  79. tls.jumpBuffers = append(tls.jumpBuffers, jb)
  80. }
  81. type LongjmpRetval int32
  82. func (tls *TLS) PopJumpBuffer(jb uintptr) {
  83. n := len(tls.jumpBuffers)
  84. if n == 0 || tls.jumpBuffers[n-1] != jb {
  85. panic(todo("unsupported setjmp/longjmp usage"))
  86. }
  87. tls.jumpBuffers = tls.jumpBuffers[:n-1]
  88. }
  89. func (tls *TLS) Longjmp(jb uintptr, val int32) {
  90. tls.PopJumpBuffer(jb)
  91. if val == 0 {
  92. val = 1
  93. }
  94. panic(LongjmpRetval(val))
  95. }
  96. func Xalloca(tls *TLS, size size_t) uintptr {
  97. if __ccgo_strace {
  98. trc("tls=%v size=%v, (%v:)", tls, size, origin(2))
  99. }
  100. return tls.alloca(size)
  101. }
  102. func X__builtin_alloca(tls *TLS, size size_t) uintptr {
  103. if __ccgo_strace {
  104. trc("tls=%v size=%v, (%v:)", tls, size, origin(2))
  105. }
  106. return Xalloca(tls, size)
  107. }
  108. // Pthread specific part of a TLS.
  109. type pthreadData struct {
  110. done chan struct{}
  111. kv map[pthread.Pthread_key_t]uintptr
  112. retVal uintptr
  113. wait chan struct{} // cond var interaction
  114. detached bool
  115. }
  116. func (d *pthreadData) init(t *TLS, detached bool) {
  117. d.detached = detached
  118. d.wait = make(chan struct{}, 1)
  119. if detached {
  120. return
  121. }
  122. d.done = make(chan struct{})
  123. threadsMu.Lock()
  124. defer threadsMu.Unlock()
  125. threads[t.ID] = t
  126. }
  127. func (d *pthreadData) close(t *TLS) {
  128. threadsMu.Lock()
  129. defer threadsMu.Unlock()
  130. delete(threads, t.ID)
  131. }
  132. // int pthread_attr_destroy(pthread_attr_t *attr);
  133. func Xpthread_attr_destroy(t *TLS, pAttr uintptr) int32 {
  134. if __ccgo_strace {
  135. trc("t=%v pAttr=%v, (%v:)", t, pAttr, origin(2))
  136. }
  137. return 0
  138. }
  139. // int pthread_attr_setscope(pthread_attr_t *attr, int contentionscope);
  140. func Xpthread_attr_setscope(t *TLS, pAttr uintptr, contentionScope int32) int32 {
  141. if __ccgo_strace {
  142. trc("t=%v pAttr=%v contentionScope=%v, (%v:)", t, pAttr, contentionScope, origin(2))
  143. }
  144. switch contentionScope {
  145. case pthread.PTHREAD_SCOPE_SYSTEM:
  146. return 0
  147. default:
  148. panic(todo("", contentionScope))
  149. }
  150. }
  151. // int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize);
  152. func Xpthread_attr_setstacksize(t *TLS, attr uintptr, stackSize types.Size_t) int32 {
  153. if __ccgo_strace {
  154. trc("t=%v attr=%v stackSize=%v, (%v:)", t, attr, stackSize, origin(2))
  155. }
  156. return 0
  157. }
  158. // Go side data of pthread_cond_t.
  159. type cond struct {
  160. sync.Mutex
  161. waiters map[*TLS]struct{}
  162. }
  163. func newCond() *cond {
  164. return &cond{
  165. waiters: map[*TLS]struct{}{},
  166. }
  167. }
  168. func (c *cond) signal(all bool) int32 {
  169. if c == nil {
  170. return errno.EINVAL
  171. }
  172. c.Lock()
  173. defer c.Unlock()
  174. // The pthread_cond_broadcast() and pthread_cond_signal() functions shall have
  175. // no effect if there are no threads currently blocked on cond.
  176. for tls := range c.waiters {
  177. tls.wait <- struct{}{}
  178. delete(c.waiters, tls)
  179. if !all {
  180. break
  181. }
  182. }
  183. return 0
  184. }
  185. // The pthread_cond_init() function shall initialize the condition variable
  186. // referenced by cond with attributes referenced by attr. If attr is NULL, the
  187. // default condition variable attributes shall be used; the effect is the same
  188. // as passing the address of a default condition variable attributes object.
  189. // Upon successful initialization, the state of the condition variable shall
  190. // become initialized.
  191. //
  192. // If successful, the pthread_cond_destroy() and pthread_cond_init() functions
  193. // shall return zero; otherwise, an error number shall be returned to indicate
  194. // the error.
  195. //
  196. // int pthread_cond_init(pthread_cond_t *restrict cond, const pthread_condattr_t *restrict attr);
  197. func Xpthread_cond_init(t *TLS, pCond, pAttr uintptr) int32 {
  198. if __ccgo_strace {
  199. trc("t=%v pAttr=%v, (%v:)", t, pAttr, origin(2))
  200. }
  201. if pCond == 0 {
  202. return errno.EINVAL
  203. }
  204. if pAttr != 0 {
  205. panic(todo("%#x %#x", pCond, pAttr))
  206. }
  207. condsMu.Lock()
  208. defer condsMu.Unlock()
  209. conds[pCond] = newCond()
  210. return 0
  211. }
  212. // int pthread_cond_destroy(pthread_cond_t *cond);
  213. func Xpthread_cond_destroy(t *TLS, pCond uintptr) int32 {
  214. if __ccgo_strace {
  215. trc("t=%v pCond=%v, (%v:)", t, pCond, origin(2))
  216. }
  217. if pCond == 0 {
  218. return errno.EINVAL
  219. }
  220. condsMu.Lock()
  221. defer condsMu.Unlock()
  222. cond := conds[pCond]
  223. if cond == nil {
  224. return errno.EINVAL
  225. }
  226. cond.Lock()
  227. defer cond.Unlock()
  228. if len(cond.waiters) != 0 {
  229. return errno.EBUSY
  230. }
  231. delete(conds, pCond)
  232. return 0
  233. }
  234. // int pthread_cond_signal(pthread_cond_t *cond);
  235. func Xpthread_cond_signal(t *TLS, pCond uintptr) int32 {
  236. if __ccgo_strace {
  237. trc("t=%v pCond=%v, (%v:)", t, pCond, origin(2))
  238. }
  239. return condSignal(pCond, false)
  240. }
  241. // int pthread_cond_broadcast(pthread_cond_t *cond);
  242. func Xpthread_cond_broadcast(t *TLS, pCond uintptr) int32 {
  243. if __ccgo_strace {
  244. trc("t=%v pCond=%v, (%v:)", t, pCond, origin(2))
  245. }
  246. return condSignal(pCond, true)
  247. }
  248. func condSignal(pCond uintptr, all bool) int32 {
  249. if pCond == 0 {
  250. return errno.EINVAL
  251. }
  252. condsMu.Lock()
  253. cond := conds[pCond]
  254. condsMu.Unlock()
  255. return cond.signal(all)
  256. }
  257. // int pthread_cond_wait(pthread_cond_t *restrict cond, pthread_mutex_t *restrict mutex);
  258. func Xpthread_cond_wait(t *TLS, pCond, pMutex uintptr) int32 {
  259. if __ccgo_strace {
  260. trc("t=%v pMutex=%v, (%v:)", t, pMutex, origin(2))
  261. }
  262. if pCond == 0 {
  263. return errno.EINVAL
  264. }
  265. condsMu.Lock()
  266. cond := conds[pCond]
  267. if cond == nil { // static initialized condition variables are valid
  268. cond = newCond()
  269. conds[pCond] = cond
  270. }
  271. cond.Lock()
  272. cond.waiters[t] = struct{}{}
  273. cond.Unlock()
  274. condsMu.Unlock()
  275. mutexesMu.Lock()
  276. mu := mutexes[pMutex]
  277. mutexesMu.Unlock()
  278. mu.Unlock()
  279. <-t.wait
  280. mu.Lock()
  281. return 0
  282. }
  283. // int pthread_cond_timedwait(pthread_cond_t *restrict cond, pthread_mutex_t *restrict mutex, const struct timespec *restrict abstime);
  284. func Xpthread_cond_timedwait(t *TLS, pCond, pMutex, pAbsTime uintptr) int32 {
  285. if __ccgo_strace {
  286. trc("t=%v pAbsTime=%v, (%v:)", t, pAbsTime, origin(2))
  287. }
  288. if pCond == 0 {
  289. return errno.EINVAL
  290. }
  291. condsMu.Lock()
  292. cond := conds[pCond]
  293. if cond == nil { // static initialized condition variables are valid
  294. cond = newCond()
  295. conds[pCond] = cond
  296. }
  297. cond.Lock()
  298. cond.waiters[t] = struct{}{}
  299. cond.Unlock()
  300. condsMu.Unlock()
  301. mutexesMu.Lock()
  302. mu := mutexes[pMutex]
  303. mutexesMu.Unlock()
  304. deadlineSecs := (*ctime.Timespec)(unsafe.Pointer(pAbsTime)).Ftv_sec
  305. deadlineNsecs := (*ctime.Timespec)(unsafe.Pointer(pAbsTime)).Ftv_nsec
  306. deadline := time.Unix(int64(deadlineSecs), int64(deadlineNsecs))
  307. d := deadline.Sub(time.Now())
  308. switch {
  309. case d <= 0:
  310. return errno.ETIMEDOUT
  311. default:
  312. to := time.After(d)
  313. mu.Unlock()
  314. defer mu.Lock()
  315. select {
  316. case <-t.wait:
  317. return 0
  318. case <-to:
  319. cond.Lock()
  320. defer cond.Unlock()
  321. delete(cond.waiters, t)
  322. return errno.ETIMEDOUT
  323. }
  324. }
  325. }
  326. // Go side data of pthread_mutex_t
  327. type mutex struct {
  328. sync.Mutex
  329. typ int // PTHREAD_MUTEX_NORMAL, ...
  330. wait sync.Mutex
  331. id int32 // owner's t.ID
  332. cnt int32
  333. robust bool
  334. }
  335. func newMutex(typ int) *mutex {
  336. return &mutex{
  337. typ: typ,
  338. }
  339. }
  340. func (m *mutex) lock(id int32) int32 {
  341. if m.robust {
  342. panic(todo(""))
  343. }
  344. // If successful, the pthread_mutex_lock() and pthread_mutex_unlock() functions
  345. // shall return zero; otherwise, an error number shall be returned to indicate
  346. // the error.
  347. switch m.typ {
  348. default:
  349. fallthrough
  350. case pthread.PTHREAD_MUTEX_NORMAL:
  351. // If the mutex type is PTHREAD_MUTEX_NORMAL, deadlock detection shall not be
  352. // provided. Attempting to relock the mutex causes deadlock. If a thread
  353. // attempts to unlock a mutex that it has not locked or a mutex which is
  354. // unlocked, undefined behavior results.
  355. m.Lock()
  356. m.id = id
  357. return 0
  358. case pthread.PTHREAD_MUTEX_RECURSIVE:
  359. for {
  360. m.Lock()
  361. switch m.id {
  362. case 0:
  363. m.cnt = 1
  364. m.id = id
  365. m.wait.Lock()
  366. m.Unlock()
  367. return 0
  368. case id:
  369. m.cnt++
  370. m.Unlock()
  371. return 0
  372. }
  373. m.Unlock()
  374. m.wait.Lock()
  375. // intentional empty section - wake up other waiters
  376. m.wait.Unlock()
  377. }
  378. }
  379. }
  380. func (m *mutex) tryLock(id int32) int32 {
  381. if m.robust {
  382. panic(todo(""))
  383. }
  384. switch m.typ {
  385. default:
  386. fallthrough
  387. case pthread.PTHREAD_MUTEX_NORMAL:
  388. return errno.EBUSY
  389. case pthread.PTHREAD_MUTEX_RECURSIVE:
  390. m.Lock()
  391. switch m.id {
  392. case 0:
  393. m.cnt = 1
  394. m.id = id
  395. m.wait.Lock()
  396. m.Unlock()
  397. return 0
  398. case id:
  399. m.cnt++
  400. m.Unlock()
  401. return 0
  402. }
  403. m.Unlock()
  404. return errno.EBUSY
  405. }
  406. }
  407. func (m *mutex) unlock() int32 {
  408. if m.robust {
  409. panic(todo(""))
  410. }
  411. // If successful, the pthread_mutex_lock() and pthread_mutex_unlock() functions
  412. // shall return zero; otherwise, an error number shall be returned to indicate
  413. // the error.
  414. switch m.typ {
  415. default:
  416. fallthrough
  417. case pthread.PTHREAD_MUTEX_NORMAL:
  418. // If the mutex type is PTHREAD_MUTEX_NORMAL, deadlock detection shall not be
  419. // provided. Attempting to relock the mutex causes deadlock. If a thread
  420. // attempts to unlock a mutex that it has not locked or a mutex which is
  421. // unlocked, undefined behavior results.
  422. m.id = 0
  423. m.Unlock()
  424. return 0
  425. case pthread.PTHREAD_MUTEX_RECURSIVE:
  426. m.Lock()
  427. m.cnt--
  428. if m.cnt == 0 {
  429. m.id = 0
  430. m.wait.Unlock()
  431. }
  432. m.Unlock()
  433. return 0
  434. }
  435. }
  436. // int pthread_mutex_destroy(pthread_mutex_t *mutex);
  437. func Xpthread_mutex_destroy(t *TLS, pMutex uintptr) int32 {
  438. if __ccgo_strace {
  439. trc("t=%v pMutex=%v, (%v:)", t, pMutex, origin(2))
  440. }
  441. mutexesMu.Lock()
  442. defer mutexesMu.Unlock()
  443. delete(mutexes, pMutex)
  444. return 0
  445. }
  446. // int pthread_mutex_lock(pthread_mutex_t *mutex);
  447. func Xpthread_mutex_lock(t *TLS, pMutex uintptr) int32 {
  448. if __ccgo_strace {
  449. trc("t=%v pMutex=%v, (%v:)", t, pMutex, origin(2))
  450. }
  451. mutexesMu.Lock()
  452. mu := mutexes[pMutex]
  453. if mu == nil { // static initialized mutexes are valid
  454. mu = newMutex(int(X__ccgo_getMutexType(t, pMutex)))
  455. mutexes[pMutex] = mu
  456. }
  457. mutexesMu.Unlock()
  458. return mu.lock(t.ID)
  459. }
  460. // int pthread_mutex_trylock(pthread_mutex_t *mutex);
  461. func Xpthread_mutex_trylock(t *TLS, pMutex uintptr) int32 {
  462. if __ccgo_strace {
  463. trc("t=%v pMutex=%v, (%v:)", t, pMutex, origin(2))
  464. }
  465. mutexesMu.Lock()
  466. mu := mutexes[pMutex]
  467. if mu == nil { // static initialized mutexes are valid
  468. mu = newMutex(int(X__ccgo_getMutexType(t, pMutex)))
  469. mutexes[pMutex] = mu
  470. }
  471. mutexesMu.Unlock()
  472. return mu.tryLock(t.ID)
  473. }
  474. // int pthread_mutex_unlock(pthread_mutex_t *mutex);
  475. func Xpthread_mutex_unlock(t *TLS, pMutex uintptr) int32 {
  476. if __ccgo_strace {
  477. trc("t=%v pMutex=%v, (%v:)", t, pMutex, origin(2))
  478. }
  479. mutexesMu.Lock()
  480. defer mutexesMu.Unlock()
  481. return mutexes[pMutex].unlock()
  482. }
  483. // int pthread_key_create(pthread_key_t *key, void (*destructor)(void*));
  484. func Xpthread_key_create(t *TLS, pKey, destructor uintptr) int32 {
  485. threadsKeysMu.Lock()
  486. defer threadsKeysMu.Unlock()
  487. threadKey++
  488. r := threadKey
  489. if destructor != 0 {
  490. threadKeyDestructors[r] = append(threadKeyDestructors[r], destructor)
  491. }
  492. *(*pthread.Pthread_key_t)(unsafe.Pointer(pKey)) = pthread.Pthread_key_t(r)
  493. return 0
  494. }
  495. // int pthread_key_delete(pthread_key_t key);
  496. func Xpthread_key_delete(t *TLS, key pthread.Pthread_key_t) int32 {
  497. if __ccgo_strace {
  498. trc("t=%v key=%v, (%v:)", t, key, origin(2))
  499. }
  500. if _, ok := t.kv[key]; ok {
  501. delete(t.kv, key)
  502. return 0
  503. }
  504. panic(todo(""))
  505. }
  506. // void *pthread_getspecific(pthread_key_t key);
  507. func Xpthread_getspecific(t *TLS, key pthread.Pthread_key_t) uintptr {
  508. if __ccgo_strace {
  509. trc("t=%v key=%v, (%v:)", t, key, origin(2))
  510. }
  511. return t.kv[key]
  512. }
  513. // int pthread_setspecific(pthread_key_t key, const void *value);
  514. func Xpthread_setspecific(t *TLS, key pthread.Pthread_key_t, value uintptr) int32 {
  515. if __ccgo_strace {
  516. trc("t=%v key=%v value=%v, (%v:)", t, key, value, origin(2))
  517. }
  518. if t.kv == nil {
  519. t.kv = map[pthread.Pthread_key_t]uintptr{}
  520. }
  521. t.kv[key] = value
  522. return 0
  523. }
  524. // int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
  525. func Xpthread_create(t *TLS, pThread, pAttr, startRoutine, arg uintptr) int32 {
  526. if __ccgo_strace {
  527. trc("t=%v arg=%v, (%v:)", t, arg, origin(2))
  528. }
  529. fn := (*struct {
  530. f func(*TLS, uintptr) uintptr
  531. })(unsafe.Pointer(&struct{ uintptr }{startRoutine})).f
  532. detached := pAttr != 0 && X__ccgo_pthreadAttrGetDetachState(t, pAttr) == pthread.PTHREAD_CREATE_DETACHED
  533. tls := newTLS(detached)
  534. *(*pthread.Pthread_t)(unsafe.Pointer(pThread)) = pthread.Pthread_t(tls.ID)
  535. go func() {
  536. Xpthread_exit(tls, fn(tls, arg))
  537. }()
  538. return 0
  539. }
  540. // int pthread_detach(pthread_t thread);
  541. func Xpthread_detach(t *TLS, thread pthread.Pthread_t) int32 {
  542. if __ccgo_strace {
  543. trc("t=%v thread=%v, (%v:)", t, thread, origin(2))
  544. }
  545. threadsMu.Lock()
  546. threads[int32(thread)].detached = true
  547. threadsMu.Unlock()
  548. return 0
  549. }
  550. // int pthread_equal(pthread_t t1, pthread_t t2);
  551. func Xpthread_equal(t *TLS, t1, t2 pthread.Pthread_t) int32 {
  552. if __ccgo_strace {
  553. trc("t=%v t2=%v, (%v:)", t, t2, origin(2))
  554. }
  555. return Bool32(t1 == t2)
  556. }
  557. // void pthread_exit(void *value_ptr);
  558. func Xpthread_exit(t *TLS, value uintptr) {
  559. if __ccgo_strace {
  560. trc("t=%v value=%v, (%v:)", t, value, origin(2))
  561. }
  562. t.retVal = value
  563. // At thread exit, if a key value has a non-NULL destructor pointer, and the
  564. // thread has a non-NULL value associated with that key, the value of the key
  565. // is set to NULL, and then the function pointed to is called with the
  566. // previously associated value as its sole argument. The order of destructor
  567. // calls is unspecified if more than one destructor exists for a thread when it
  568. // exits.
  569. for k, v := range t.kv {
  570. if v == 0 {
  571. continue
  572. }
  573. threadsKeysMu.Lock()
  574. destructors := threadKeyDestructors[k]
  575. threadsKeysMu.Unlock()
  576. for _, destructor := range destructors {
  577. delete(t.kv, k)
  578. panic(todo("%#x", destructor)) //TODO call destructor(v)
  579. }
  580. }
  581. switch {
  582. case t.detached:
  583. threadsMu.Lock()
  584. delete(threads, t.ID)
  585. threadsMu.Unlock()
  586. default:
  587. close(t.done)
  588. }
  589. runtime.Goexit()
  590. }
  591. // int pthread_join(pthread_t thread, void **value_ptr);
  592. func Xpthread_join(t *TLS, thread pthread.Pthread_t, pValue uintptr) int32 {
  593. if __ccgo_strace {
  594. trc("t=%v thread=%v pValue=%v, (%v:)", t, thread, pValue, origin(2))
  595. }
  596. threadsMu.Lock()
  597. tls := threads[int32(thread)]
  598. delete(threads, int32(thread))
  599. threadsMu.Unlock()
  600. <-tls.done
  601. if pValue != 0 {
  602. *(*uintptr)(unsafe.Pointer(pValue)) = tls.retVal
  603. }
  604. return 0
  605. }
  606. // pthread_t pthread_self(void);
  607. func Xpthread_self(t *TLS) pthread.Pthread_t {
  608. if __ccgo_strace {
  609. trc("t=%v, (%v:)", t, origin(2))
  610. }
  611. return pthread.Pthread_t(t.ID)
  612. }