pthread_musl.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. // Copyright 2024 The Libc Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. //go:build linux && (amd64 || arm64 || loong64 || ppc64le || s390x || riscv64 || 386 || arm)
  5. package libc // import "modernc.org/libc"
  6. import (
  7. "runtime"
  8. "sync"
  9. "sync/atomic"
  10. "time"
  11. "unsafe"
  12. )
  13. type pthreadAttr struct {
  14. detachState int32
  15. }
  16. type pthreadCleanupItem struct {
  17. routine, arg uintptr
  18. }
  19. // C original, unpatched version
  20. //
  21. // include/alltypes.h.in:86:TYPEDEF struct {
  22. // union {
  23. // int __i[sizeof(long)==8?10:6];
  24. // volatile int __vi[sizeof(long)==8?10:6];
  25. // volatile void *volatile __p[sizeof(long)==8?5:6];
  26. // } __u;
  27. // } pthread_mutex_t;
  28. //TODO(jnml) can remove __ccgo_room patches now.
  29. // We overlay the C version with our version below. It must not be larger than
  30. // the C version.
  31. type pthreadMutex struct { // gc 64b 32b | tinygo 64b 32b
  32. sync.Mutex // 0 8 0 4 | 0 16 0 8
  33. count int32 // 8 4 4 4 | 16 4 8 4
  34. typ uint32 // 12 4 8 4 | 20 4 12 4
  35. owner int32 // 16 4 12 4 | 24 4 16 4
  36. // 20 16 | 28 20
  37. }
  38. type pthreadConds struct {
  39. sync.Mutex
  40. conds map[uintptr][]chan struct{}
  41. }
  42. var (
  43. // Ensure there's enough space for unsafe type conversions.
  44. _ [unsafe.Sizeof(Tpthread_mutex_t{}) - unsafe.Sizeof(pthreadMutex{})]byte
  45. _ [unsafe.Sizeof(Tpthread_attr_t{}) - unsafe.Sizeof(pthreadAttr{})]byte
  46. pthreadKeysMutex sync.Mutex
  47. pthreadKeyDestructors []uintptr
  48. pthreadKeysFree []Tpthread_key_t
  49. conds = pthreadConds{conds: map[uintptr][]chan struct{}{}}
  50. )
  51. func _pthread_setcancelstate(tls *TLS, new int32, old uintptr) int32 {
  52. //TODO actually respect cancel state
  53. if uint32(new) > 2 {
  54. return EINVAL
  55. }
  56. p := tls.pthread + unsafe.Offsetof(t__pthread{}.Fcanceldisable)
  57. if old != 0 {
  58. r := *(*int32)(unsafe.Pointer(p))
  59. *(*int32)(unsafe.Pointer(old)) = int32(byte(r))
  60. }
  61. *(*int32)(unsafe.Pointer(p)) = new
  62. return 0
  63. }
  64. func Xpthread_getspecific(tls *TLS, k Tpthread_key_t) uintptr {
  65. return tls.pthreadKeyValues[k]
  66. }
  67. func Xpthread_setspecific(tls *TLS, k Tpthread_key_t, x uintptr) int32 {
  68. if tls.pthreadKeyValues == nil {
  69. tls.pthreadKeyValues = map[Tpthread_key_t]uintptr{}
  70. }
  71. tls.pthreadKeyValues[k] = x
  72. return 0
  73. }
  74. func Xpthread_key_create(tls *TLS, k uintptr, dtor uintptr) int32 {
  75. pthreadKeysMutex.Lock()
  76. defer pthreadKeysMutex.Unlock()
  77. var key Tpthread_key_t
  78. switch l := Tpthread_key_t(len(pthreadKeysFree)); {
  79. case l == 0:
  80. key = Tpthread_key_t(len(pthreadKeyDestructors))
  81. pthreadKeyDestructors = append(pthreadKeyDestructors, dtor)
  82. default:
  83. key = pthreadKeysFree[l-1]
  84. pthreadKeysFree = pthreadKeysFree[:l-1]
  85. pthreadKeyDestructors[key] = dtor
  86. }
  87. *(*Tpthread_key_t)(unsafe.Pointer(k)) = key
  88. return 0
  89. }
  90. func Xpthread_key_delete(tls *TLS, k Tpthread_key_t) int32 {
  91. pthreadKeysMutex.Lock()
  92. defer pthreadKeysMutex.Unlock()
  93. pthreadKeysFree = append(pthreadKeysFree, k)
  94. return 0
  95. }
  96. func Xpthread_create(tls *TLS, res, attrp, entry, arg uintptr) int32 {
  97. var attr pthreadAttr
  98. if attrp != 0 {
  99. attr = *(*pthreadAttr)(unsafe.Pointer(attrp))
  100. }
  101. detachState := int32(_DT_JOINABLE)
  102. if attr.detachState != 0 {
  103. detachState = _DT_DETACHED
  104. }
  105. tls2 := NewTLS()
  106. tls2.ownsPthread = false
  107. *(*Tpthread_t)(unsafe.Pointer(res)) = tls2.pthread
  108. (*t__pthread)(unsafe.Pointer(tls2.pthread)).Fdetach_state = detachState
  109. if detachState == _DT_JOINABLE {
  110. (*sync.Mutex)(unsafe.Pointer(tls2.pthread + unsafe.Offsetof(t__pthread{}.F__ccgo_join_mutex))).Lock()
  111. }
  112. go func() {
  113. Xpthread_exit(tls2, (*(*func(*TLS, uintptr) uintptr)(unsafe.Pointer(&struct{ uintptr }{entry})))(tls2, arg))
  114. }()
  115. return 0
  116. }
  117. func Xpthread_exit(tls *TLS, result uintptr) {
  118. state := atomic.LoadInt32((*int32)(unsafe.Pointer(tls.pthread + unsafe.Offsetof(t__pthread{}.Fdetach_state))))
  119. (*t__pthread)(unsafe.Pointer(tls.pthread)).Fresult = result
  120. switch state {
  121. case _DT_JOINABLE, _DT_DETACHED:
  122. // ok
  123. default:
  124. panic(todo("", state))
  125. }
  126. for len(tls.pthreadCleanupItems) != 0 {
  127. Xpthread_cleanup_pop(tls, 1)
  128. }
  129. for {
  130. done := true
  131. for k, v := range tls.pthreadKeyValues {
  132. if v != 0 {
  133. delete(tls.pthreadKeyValues, k)
  134. pthreadKeysMutex.Lock()
  135. d := pthreadKeyDestructors[k]
  136. pthreadKeysMutex.Unlock()
  137. if d != 0 {
  138. done = false
  139. (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{d})))(tls, v)
  140. }
  141. }
  142. }
  143. if done {
  144. break
  145. }
  146. }
  147. if state == _DT_JOINABLE {
  148. (*sync.Mutex)(unsafe.Pointer(tls.pthread + unsafe.Offsetof(t__pthread{}.F__ccgo_join_mutex))).Unlock()
  149. }
  150. atomic.StoreInt32((*int32)(unsafe.Pointer(tls.pthread+unsafe.Offsetof(t__pthread{}.Fdetach_state))), _DT_EXITED)
  151. tls.Close()
  152. runtime.Goexit()
  153. }
  154. func Xpthread_join(tls *TLS, t Tpthread_t, res uintptr) (r int32) {
  155. if (*t__pthread)(unsafe.Pointer(t)).Fdetach_state > _DT_JOINABLE {
  156. return EINVAL
  157. }
  158. (*sync.Mutex)(unsafe.Pointer(t + unsafe.Offsetof(t__pthread{}.F__ccgo_join_mutex))).Lock()
  159. if res != 0 {
  160. *(*uintptr)(unsafe.Pointer(res)) = (*t__pthread)(unsafe.Pointer(tls.pthread)).Fresult
  161. }
  162. return 0
  163. }
  164. func Xpthread_cleanup_push(tls *TLS, f, x uintptr) {
  165. X_pthread_cleanup_push(tls, 0, f, x)
  166. }
  167. func __pthread_cleanup_push(tls *TLS, _, f, x uintptr) {
  168. tls.pthreadCleanupItems = append(tls.pthreadCleanupItems, pthreadCleanupItem{f, x})
  169. }
  170. func X_pthread_cleanup_push(tls *TLS, _, f, x uintptr) {
  171. tls.pthreadCleanupItems = append(tls.pthreadCleanupItems, pthreadCleanupItem{f, x})
  172. }
  173. func Xpthread_cleanup_pop(tls *TLS, run int32) {
  174. X_pthread_cleanup_pop(tls, 0, run)
  175. }
  176. func __pthread_cleanup_pop(tls *TLS, _ uintptr, run int32) {
  177. X_pthread_cleanup_pop(tls, 0, run)
  178. }
  179. func X_pthread_cleanup_pop(tls *TLS, _ uintptr, run int32) {
  180. l := len(tls.pthreadCleanupItems)
  181. item := tls.pthreadCleanupItems[l-1]
  182. tls.pthreadCleanupItems = tls.pthreadCleanupItems[:l-1]
  183. if run != 0 {
  184. (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{item.routine})))(tls, item.arg)
  185. }
  186. }
  187. func Xpthread_attr_init(tls *TLS, a uintptr) int32 {
  188. *(*Tpthread_attr_t)(unsafe.Pointer(a)) = Tpthread_attr_t{}
  189. return 0
  190. }
  191. func Xpthread_attr_setscope(tls *TLS, a uintptr, scope int32) int32 {
  192. switch scope {
  193. case PTHREAD_SCOPE_SYSTEM:
  194. return 0
  195. case PTHREAD_SCOPE_PROCESS:
  196. return ENOTSUP
  197. default:
  198. return EINVAL
  199. }
  200. }
  201. func Xpthread_attr_setstacksize(tls *TLS, a uintptr, stacksite Tsize_t) int32 {
  202. return 0
  203. }
  204. func Xpthread_attr_setdetachstate(tls *TLS, a uintptr, state int32) (r int32) {
  205. if uint32(state) > 1 {
  206. return EINVAL
  207. }
  208. (*pthreadAttr)(unsafe.Pointer(a)).detachState = state
  209. return 0
  210. }
  211. func Xpthread_attr_getdetachstate(tls *TLS, a uintptr, state uintptr) int32 {
  212. *(*int32)(unsafe.Pointer(state)) = (*pthreadAttr)(unsafe.Pointer(a)).detachState
  213. return 0
  214. }
  215. func Xpthread_attr_destroy(tls *TLS, a uintptr) int32 {
  216. return 0
  217. }
  218. func Xpthread_self(tls *TLS) uintptr {
  219. return tls.pthread
  220. }
  221. func Xpthread_mutex_init(tls *TLS, m, a uintptr) int32 {
  222. *(*Tpthread_mutex_t)(unsafe.Pointer(m)) = Tpthread_mutex_t{}
  223. if a != 0 {
  224. (*pthreadMutex)(unsafe.Pointer(m)).typ = (*Tpthread_mutexattr_t)(unsafe.Pointer(a)).F__attr
  225. }
  226. return 0
  227. }
  228. func Xpthread_mutex_destroy(tls *TLS, m uintptr) int32 {
  229. *(*Tpthread_mutex_t)(unsafe.Pointer(m)) = Tpthread_mutex_t{}
  230. return 0
  231. }
  232. func Xpthread_mutex_lock(tls *TLS, m uintptr) int32 {
  233. switch typ := (*pthreadMutex)(unsafe.Pointer(m)).typ; typ {
  234. case PTHREAD_MUTEX_NORMAL:
  235. (*pthreadMutex)(unsafe.Pointer(m)).Lock()
  236. return 0
  237. case PTHREAD_MUTEX_RECURSIVE:
  238. if atomic.CompareAndSwapInt32(&((*pthreadMutex)(unsafe.Pointer(m)).owner), 0, tls.ID) {
  239. (*pthreadMutex)(unsafe.Pointer(m)).count = 1
  240. (*pthreadMutex)(unsafe.Pointer(m)).Lock()
  241. return 0
  242. }
  243. if atomic.LoadInt32(&((*pthreadMutex)(unsafe.Pointer(m)).owner)) == tls.ID {
  244. (*pthreadMutex)(unsafe.Pointer(m)).count++
  245. return 0
  246. }
  247. for {
  248. (*pthreadMutex)(unsafe.Pointer(m)).Lock()
  249. if atomic.CompareAndSwapInt32(&((*pthreadMutex)(unsafe.Pointer(m)).owner), 0, tls.ID) {
  250. (*pthreadMutex)(unsafe.Pointer(m)).count = 1
  251. return 0
  252. }
  253. (*pthreadMutex)(unsafe.Pointer(m)).Unlock()
  254. }
  255. default:
  256. panic(todo("", typ))
  257. }
  258. }
  259. func Xpthread_mutex_trylock(tls *TLS, m uintptr) int32 {
  260. switch typ := (*pthreadMutex)(unsafe.Pointer(m)).typ; typ {
  261. case PTHREAD_MUTEX_NORMAL:
  262. if (*pthreadMutex)(unsafe.Pointer(m)).TryLock() {
  263. return 0
  264. }
  265. return EBUSY
  266. default:
  267. panic(todo("typ=%v", typ))
  268. }
  269. }
  270. func Xpthread_mutex_unlock(tls *TLS, m uintptr) int32 {
  271. switch typ := (*pthreadMutex)(unsafe.Pointer(m)).typ; typ {
  272. case PTHREAD_MUTEX_NORMAL:
  273. (*pthreadMutex)(unsafe.Pointer(m)).Unlock()
  274. return 0
  275. case PTHREAD_MUTEX_RECURSIVE:
  276. if atomic.LoadInt32(&((*pthreadMutex)(unsafe.Pointer(m)).owner)) != tls.ID {
  277. return EPERM
  278. }
  279. if atomic.AddInt32(&((*pthreadMutex)(unsafe.Pointer(m)).count), -1) == 0 {
  280. atomic.StoreInt32(&((*pthreadMutex)(unsafe.Pointer(m)).owner), 0)
  281. (*pthreadMutex)(unsafe.Pointer(m)).Unlock()
  282. }
  283. return 0
  284. default:
  285. panic(todo("", typ))
  286. }
  287. }
  288. func Xpthread_cond_init(tls *TLS, c, a uintptr) int32 {
  289. *(*Tpthread_cond_t)(unsafe.Pointer(c)) = Tpthread_cond_t{}
  290. if a != 0 {
  291. panic(todo(""))
  292. }
  293. conds.Lock()
  294. delete(conds.conds, c)
  295. conds.Unlock()
  296. return 0
  297. }
  298. func Xpthread_cond_timedwait(tls *TLS, c, m, ts uintptr) (r int32) {
  299. var to <-chan time.Time
  300. if ts != 0 {
  301. deadlineSecs := (*Ttimespec)(unsafe.Pointer(ts)).Ftv_sec
  302. deadlineNsecs := (*Ttimespec)(unsafe.Pointer(ts)).Ftv_nsec
  303. deadline := time.Unix(deadlineSecs, int64(deadlineNsecs))
  304. d := deadline.Sub(time.Now())
  305. if d <= 0 {
  306. return ETIMEDOUT
  307. }
  308. to = time.After(d)
  309. }
  310. conds.Lock()
  311. waiters := conds.conds[c]
  312. ch := make(chan struct{}, 1)
  313. waiters = append(waiters, ch)
  314. conds.conds[c] = waiters
  315. conds.Unlock()
  316. defer func() {
  317. conds.Lock()
  318. defer conds.Unlock()
  319. waiters = conds.conds[c]
  320. for i, v := range waiters {
  321. if v == ch {
  322. conds.conds[c] = append(waiters[:i], waiters[i+1:]...)
  323. return
  324. }
  325. }
  326. }()
  327. switch typ := (*pthreadMutex)(unsafe.Pointer(m)).typ; typ {
  328. case PTHREAD_MUTEX_NORMAL:
  329. (*pthreadMutex)(unsafe.Pointer(m)).Unlock()
  330. select {
  331. case <-ch:
  332. // ok
  333. case <-to:
  334. r = ETIMEDOUT
  335. }
  336. (*pthreadMutex)(unsafe.Pointer(m)).Lock()
  337. return r
  338. default:
  339. panic(todo("", typ))
  340. }
  341. }
  342. func Xpthread_cond_wait(tls *TLS, c, m uintptr) int32 {
  343. return Xpthread_cond_timedwait(tls, c, m, 0)
  344. }
  345. func Xpthread_cond_signal(tls *TLS, c uintptr) int32 {
  346. return pthreadSignalN(tls, c, false)
  347. }
  348. func pthreadSignalN(tls *TLS, c uintptr, all bool) int32 {
  349. conds.Lock()
  350. waiters := conds.conds[c]
  351. handle := waiters
  352. if len(waiters) != 0 {
  353. switch {
  354. case all:
  355. delete(conds.conds, c)
  356. default:
  357. handle = handle[:1]
  358. conds.conds[c] = waiters[1:]
  359. }
  360. }
  361. conds.Unlock()
  362. for _, v := range handle {
  363. close(v)
  364. }
  365. return 0
  366. }
  367. func Xpthread_cond_broadcast(tls *TLS, c uintptr) int32 {
  368. return pthreadSignalN(tls, c, true)
  369. }
  370. func Xpthread_cond_destroy(tls *TLS, c uintptr) int32 {
  371. return Xpthread_cond_broadcast(tls, c)
  372. }
  373. func Xpthread_atfork(tls *TLS, prepare, parent, child uintptr) int32 {
  374. // fork(2) not supported.
  375. return 0
  376. }
  377. func Xpthread_mutexattr_init(tls *TLS, a uintptr) int32 {
  378. *(*Tpthread_mutexattr_t)(unsafe.Pointer(a)) = Tpthread_mutexattr_t{}
  379. return 0
  380. }
  381. func Xpthread_mutexattr_destroy(tls *TLS, a uintptr) int32 {
  382. return 0
  383. }
  384. func Xpthread_mutexattr_settype(tls *TLS, a uintptr, typ int32) int32 {
  385. if uint32(typ) > 2 {
  386. return EINVAL
  387. }
  388. (*Tpthread_mutexattr_t)(unsafe.Pointer(a)).F__attr = uint32(typ) & 3
  389. return 0
  390. }
  391. func Xpthread_detach(tls *TLS, t uintptr) int32 {
  392. state := atomic.SwapInt32((*int32)(unsafe.Pointer(tls.pthread+unsafe.Offsetof(t__pthread{}.Fdetach_state))), _DT_DETACHED)
  393. switch state {
  394. case _DT_EXITED, _DT_DETACHED:
  395. return 0
  396. default:
  397. panic(todo("", tls.ID, state))
  398. }
  399. }
  400. // int pthread_equal(pthread_t, pthread_t);
  401. func Xpthread_equal(tls *TLS, t, u uintptr) int32 {
  402. return Bool32(t == u)
  403. }
  404. // int pthread_sigmask(int how, const sigset_t *restrict set, sigset_t *restrict old)
  405. func _pthread_sigmask(tls *TLS, now int32, set, old uintptr) int32 {
  406. // ignored
  407. return 0
  408. }
  409. // 202402251838 all_test.go:589: files=36 buildFails=30 execFails=2 pass=4
  410. // 202402262246 all_test.go:589: files=36 buildFails=26 execFails=2 pass=8
  411. // 202403041858 all_musl_test.go:640: files=36 buildFails=22 execFails=4 pass=10