etc.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. // Copyright 2020 The Libc Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. //go:build !(linux && (amd64 || arm64 || loong64 || ppc64le || s390x || riscv64 || 386 || arm))
  5. package libc // import "modernc.org/libc"
  6. import (
  7. "fmt"
  8. "io"
  9. "os"
  10. "path/filepath"
  11. "reflect"
  12. "runtime"
  13. "runtime/debug"
  14. "sort"
  15. "strconv"
  16. "strings"
  17. "sync"
  18. "sync/atomic"
  19. "time"
  20. "unsafe"
  21. "modernc.org/libc/errno"
  22. "modernc.org/libc/signal"
  23. "modernc.org/libc/sys/types"
  24. )
  25. const (
  26. allocatorPageOverhead = 4 * unsafe.Sizeof(int(0))
  27. stackHeaderSize = unsafe.Sizeof(stackHeader{})
  28. stackSegmentSize = 1<<12 - allocatorPageOverhead
  29. uintptrSize = unsafe.Sizeof(uintptr(0))
  30. )
  31. var (
  32. Covered = map[uintptr]struct{}{}
  33. CoveredC = map[string]struct{}{}
  34. fToken uintptr
  35. tid int32
  36. atExit []func()
  37. atExitMu sync.Mutex
  38. signals [signal.NSIG]uintptr
  39. signalsMu sync.Mutex
  40. objectMu sync.Mutex
  41. objects = map[uintptr]interface{}{}
  42. tlsBalance int32
  43. _ = origin
  44. _ = trc
  45. )
  46. func init() {
  47. if n := stackHeaderSize; n%16 != 0 {
  48. panic(fmt.Errorf("internal error: stackHeaderSize %v == %v (mod 16)", n, n%16))
  49. }
  50. }
  51. func origin(skip int) string {
  52. pc, fn, fl, _ := runtime.Caller(skip)
  53. f := runtime.FuncForPC(pc)
  54. var fns string
  55. if f != nil {
  56. fns = f.Name()
  57. if x := strings.LastIndex(fns, "."); x > 0 {
  58. fns = fns[x+1:]
  59. }
  60. }
  61. return fmt.Sprintf("%s:%d:%s", filepath.Base(fn), fl, fns)
  62. }
  63. func trc(s string, args ...interface{}) string { //TODO-
  64. switch {
  65. case s == "":
  66. s = fmt.Sprintf(strings.Repeat("%v ", len(args)), args...)
  67. default:
  68. s = fmt.Sprintf(s, args...)
  69. }
  70. r := fmt.Sprintf("%s: TRC %s", origin(2), s)
  71. fmt.Fprintf(os.Stdout, "%s\n", r)
  72. os.Stdout.Sync()
  73. return r
  74. }
  75. func todo(s string, args ...interface{}) string { //TODO-
  76. switch {
  77. case s == "":
  78. s = fmt.Sprintf(strings.Repeat("%v ", len(args)), args...)
  79. default:
  80. s = fmt.Sprintf(s, args...)
  81. }
  82. r := fmt.Sprintf("%s: TODOTODO %s", origin(2), s) //TODOOK
  83. if dmesgs {
  84. dmesg("%s", r)
  85. }
  86. fmt.Fprintf(os.Stdout, "%s\n", r)
  87. fmt.Fprintf(os.Stdout, "%s\n", debug.Stack()) //TODO-
  88. os.Stdout.Sync()
  89. os.Exit(1)
  90. panic("unrechable")
  91. }
  92. var coverPCs [1]uintptr //TODO not concurrent safe
  93. func Cover() {
  94. runtime.Callers(2, coverPCs[:])
  95. Covered[coverPCs[0]] = struct{}{}
  96. }
  97. func CoverReport(w io.Writer) error {
  98. var a []string
  99. pcs := make([]uintptr, 1)
  100. for pc := range Covered {
  101. pcs[0] = pc
  102. frame, _ := runtime.CallersFrames(pcs).Next()
  103. a = append(a, fmt.Sprintf("%s:%07d:%s", filepath.Base(frame.File), frame.Line, frame.Func.Name()))
  104. }
  105. sort.Strings(a)
  106. _, err := fmt.Fprintf(w, "%s\n", strings.Join(a, "\n"))
  107. return err
  108. }
  109. func CoverC(s string) {
  110. CoveredC[s] = struct{}{}
  111. }
  112. func CoverCReport(w io.Writer) error {
  113. var a []string
  114. for k := range CoveredC {
  115. a = append(a, k)
  116. }
  117. sort.Strings(a)
  118. _, err := fmt.Fprintf(w, "%s\n", strings.Join(a, "\n"))
  119. return err
  120. }
  121. func token() uintptr { return atomic.AddUintptr(&fToken, 1) }
  122. func addObject(o interface{}) uintptr {
  123. t := token()
  124. objectMu.Lock()
  125. objects[t] = o
  126. objectMu.Unlock()
  127. return t
  128. }
  129. func getObject(t uintptr) interface{} {
  130. objectMu.Lock()
  131. o := objects[t]
  132. if o == nil {
  133. panic(todo("", t))
  134. }
  135. objectMu.Unlock()
  136. return o
  137. }
  138. func removeObject(t uintptr) {
  139. objectMu.Lock()
  140. if _, ok := objects[t]; !ok {
  141. panic(todo(""))
  142. }
  143. delete(objects, t)
  144. objectMu.Unlock()
  145. }
  146. func (t *TLS) setErrno(err interface{}) {
  147. if t == nil {
  148. panic("nil TLS")
  149. }
  150. if memgrind {
  151. if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
  152. panic(todo("concurrent use of TLS instance %p", t))
  153. }
  154. defer func() {
  155. if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
  156. panic(todo("concurrent use of TLS instance %p", t))
  157. }
  158. }()
  159. }
  160. // if dmesgs {
  161. // dmesg("%v: %T(%v)\n%s", origin(1), err, err, debug.Stack())
  162. // }
  163. again:
  164. switch x := err.(type) {
  165. case int:
  166. *(*int32)(unsafe.Pointer(t.errnop)) = int32(x)
  167. case int32:
  168. *(*int32)(unsafe.Pointer(t.errnop)) = x
  169. case *os.PathError:
  170. err = x.Err
  171. goto again
  172. case syscallErrno:
  173. *(*int32)(unsafe.Pointer(t.errnop)) = int32(x)
  174. case *os.SyscallError:
  175. err = x.Err
  176. goto again
  177. default:
  178. panic(todo("%T", x))
  179. }
  180. }
  181. // Close frees the resources of t.
  182. func (t *TLS) Close() {
  183. t.Free(int(unsafe.Sizeof(int32(0))))
  184. if memgrind {
  185. if t.stackHeaderBalance != 0 {
  186. panic(todo("non zero stack header balance: %d", t.stackHeaderBalance))
  187. }
  188. atomic.AddInt32(&tlsBalance, -1)
  189. }
  190. t.pthreadData.close(t)
  191. *t = TLS{}
  192. }
  193. // Alloc allocates n bytes of thread-local storage. It must be paired with a
  194. // call to t.Free(n), using the same n. The order matters. This is ok:
  195. //
  196. // t.Alloc(11)
  197. // t.Alloc(22)
  198. // t.Free(22)
  199. // t.Free(11)
  200. //
  201. // This is not correct:
  202. //
  203. // t.Alloc(11)
  204. // t.Alloc(22)
  205. // t.Free(11)
  206. // t.Free(22)
  207. func (t *TLS) Alloc(n int) (r uintptr) {
  208. t.sp++
  209. if memgrind {
  210. if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
  211. panic(todo("concurrent use of TLS instance %p", t))
  212. }
  213. defer func() {
  214. if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
  215. panic(todo("concurrent use of TLS instance %p", t))
  216. }
  217. }()
  218. }
  219. n += 15
  220. n &^= 15
  221. if t.stack.free >= n {
  222. r = t.stack.sp
  223. t.stack.free -= n
  224. t.stack.sp += uintptr(n)
  225. return r
  226. }
  227. //if we have a next stack
  228. if nstack := t.stack.next; nstack != 0 {
  229. if (*stackHeader)(unsafe.Pointer(nstack)).free >= n {
  230. *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
  231. t.stack = *(*stackHeader)(unsafe.Pointer(nstack))
  232. r = t.stack.sp
  233. t.stack.free -= n
  234. t.stack.sp += uintptr(n)
  235. return r
  236. }
  237. nstack := *(*stackHeader)(unsafe.Pointer(t.stack.next))
  238. for ; ; nstack = *(*stackHeader)(unsafe.Pointer(nstack.next)) {
  239. if memgrind {
  240. if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
  241. panic(todo("negative stack header balance"))
  242. }
  243. }
  244. Xfree(t, nstack.page)
  245. if nstack.next == 0 {
  246. break
  247. }
  248. }
  249. t.stack.next = 0
  250. }
  251. if t.stack.page != 0 {
  252. *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
  253. }
  254. rq := n + int(stackHeaderSize)
  255. if rq%int(stackSegmentSize) != 0 {
  256. rq -= rq % int(stackSegmentSize)
  257. rq += int(stackSegmentSize)
  258. }
  259. t.stack.free = rq - int(stackHeaderSize)
  260. t.stack.prev = t.stack.page
  261. rq += 15
  262. rq &^= 15
  263. t.stack.page = Xmalloc(t, types.Size_t(rq))
  264. if t.stack.page == 0 {
  265. panic("OOM")
  266. }
  267. if memgrind {
  268. atomic.AddInt32(&t.stackHeaderBalance, 1)
  269. }
  270. t.stack.sp = t.stack.page + stackHeaderSize
  271. r = t.stack.sp
  272. t.stack.free -= n
  273. t.stack.sp += uintptr(n)
  274. if t.stack.prev != 0 {
  275. (*stackHeader)(unsafe.Pointer(t.stack.prev)).next = t.stack.page
  276. }
  277. return r
  278. }
  279. // this declares how many stack frames are kept alive before being freed
  280. const stackFrameKeepalive = 2
  281. // Free deallocates n bytes of thread-local storage. See TLS.Alloc for details
  282. // on correct usage.
  283. func (t *TLS) Free(n int) {
  284. t.sp--
  285. if memgrind {
  286. if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
  287. panic(todo("concurrent use of TLS instance %p", t))
  288. }
  289. defer func() {
  290. if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
  291. panic(todo("concurrent use of TLS instance %p", t))
  292. }
  293. }()
  294. }
  295. n += 15
  296. n &^= 15
  297. t.stack.free += n
  298. t.stack.sp -= uintptr(n)
  299. if t.stack.sp != t.stack.page+stackHeaderSize {
  300. return
  301. }
  302. nstack := t.stack
  303. //if we are the first one, just free all of them
  304. if t.stack.prev == 0 {
  305. for ; ; nstack = *(*stackHeader)(unsafe.Pointer(nstack.next)) {
  306. if memgrind {
  307. if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
  308. panic(todo("negative stack header balance"))
  309. }
  310. }
  311. Xfree(t, nstack.page)
  312. if nstack.next == 0 {
  313. break
  314. }
  315. }
  316. t.stack = stackHeader{}
  317. return
  318. }
  319. //look if we are in the last n stackframes (n=stackFrameKeepalive)
  320. //if we find something just return and set the current stack pointer to the previous one
  321. for i := 0; i < stackFrameKeepalive; i++ {
  322. if nstack.next == 0 {
  323. *((*stackHeader)(unsafe.Pointer(t.stack.page))) = t.stack
  324. t.stack = *(*stackHeader)(unsafe.Pointer(t.stack.prev))
  325. return
  326. }
  327. nstack = *(*stackHeader)(unsafe.Pointer(nstack.next))
  328. }
  329. //else only free the last
  330. if memgrind {
  331. if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
  332. panic(todo("negative stack header balance"))
  333. }
  334. }
  335. Xfree(t, nstack.page)
  336. (*stackHeader)(unsafe.Pointer(nstack.prev)).next = 0
  337. *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
  338. t.stack = *(*stackHeader)(unsafe.Pointer(t.stack.prev))
  339. }
  340. type stackHeader struct {
  341. free int // bytes left in page
  342. page uintptr // stack page
  343. prev uintptr // prev stack page = prev stack header
  344. next uintptr // next stack page = next stack header
  345. sp uintptr // next allocation address
  346. _ stackHeaderPadding
  347. }
  348. func cString(t *TLS, s string) uintptr { //TODO-
  349. n := len(s)
  350. p := Xmalloc(t, types.Size_t(n)+1)
  351. if p == 0 {
  352. panic("OOM")
  353. }
  354. copy((*RawMem)(unsafe.Pointer(p))[:n:n], s)
  355. *(*byte)(unsafe.Pointer(p + uintptr(n))) = 0
  356. return p
  357. }
  358. // VaList fills a varargs list at p with args and returns p. The list must
  359. // have been allocated by caller and it must not be in Go managed memory, ie.
  360. // it must be pinned. Caller is responsible for freeing the list.
  361. //
  362. // Individual arguments must be one of int, uint, int32, uint32, int64, uint64,
  363. // float64, uintptr or Intptr. Other types will panic.
  364. //
  365. // This function supports code generated by ccgo/v3. For manually constructed
  366. // var args it's recommended to use the NewVaList function instead.
  367. //
  368. // Note: The C translated to Go varargs ABI alignment for all types is 8 on all
  369. // architectures.
  370. func VaList(p uintptr, args ...interface{}) (r uintptr) {
  371. if p&7 != 0 {
  372. panic("internal error")
  373. }
  374. r = p
  375. for _, v := range args {
  376. switch x := v.(type) {
  377. case int:
  378. *(*int64)(unsafe.Pointer(p)) = int64(x)
  379. case int32:
  380. *(*int64)(unsafe.Pointer(p)) = int64(x)
  381. case int64:
  382. *(*int64)(unsafe.Pointer(p)) = x
  383. case uint:
  384. *(*uint64)(unsafe.Pointer(p)) = uint64(x)
  385. case uint16:
  386. *(*uint64)(unsafe.Pointer(p)) = uint64(x)
  387. case uint32:
  388. *(*uint64)(unsafe.Pointer(p)) = uint64(x)
  389. case uint64:
  390. *(*uint64)(unsafe.Pointer(p)) = x
  391. case float64:
  392. *(*float64)(unsafe.Pointer(p)) = x
  393. case uintptr:
  394. *(*uintptr)(unsafe.Pointer(p)) = x
  395. default:
  396. sz := reflect.TypeOf(v).Size()
  397. copy(unsafe.Slice((*byte)(unsafe.Pointer(p)), sz), unsafe.Slice((*byte)(unsafe.Pointer((*[2]uintptr)(unsafe.Pointer(&v))[1])), sz))
  398. p += roundup(sz, 8)
  399. continue
  400. }
  401. p += 8
  402. }
  403. return r
  404. }
  405. // NewVaListN returns a newly allocated va_list for n items. The caller of
  406. // NewVaListN is responsible for freeing the va_list.
  407. func NewVaListN(n int) (va_list uintptr) {
  408. return Xmalloc(nil, types.Size_t(8*n))
  409. }
  410. // NewVaList is like VaList but automatically allocates the correct amount of
  411. // memory for all of the items in args.
  412. //
  413. // The va_list return value is used to pass the constructed var args to var
  414. // args accepting functions. The caller of NewVaList is responsible for freeing
  415. // the va_list.
  416. func NewVaList(args ...interface{}) (va_list uintptr) {
  417. return VaList(NewVaListN(len(args)), args...)
  418. }
  419. func VaOther(app *uintptr, sz uint64) (r uintptr) {
  420. ap := *(*uintptr)(unsafe.Pointer(app))
  421. if ap == 0 {
  422. return 0
  423. }
  424. r = ap
  425. ap = roundup(ap+uintptr(sz), 8)
  426. *(*uintptr)(unsafe.Pointer(app)) = ap
  427. return r
  428. }
  429. func VaInt32(app *uintptr) int32 {
  430. ap := *(*uintptr)(unsafe.Pointer(app))
  431. if ap == 0 {
  432. return 0
  433. }
  434. ap = roundup(ap, 8)
  435. v := int32(*(*int64)(unsafe.Pointer(ap)))
  436. ap += 8
  437. *(*uintptr)(unsafe.Pointer(app)) = ap
  438. return v
  439. }
  440. func VaUint32(app *uintptr) uint32 {
  441. ap := *(*uintptr)(unsafe.Pointer(app))
  442. if ap == 0 {
  443. return 0
  444. }
  445. ap = roundup(ap, 8)
  446. v := uint32(*(*uint64)(unsafe.Pointer(ap)))
  447. ap += 8
  448. *(*uintptr)(unsafe.Pointer(app)) = ap
  449. return v
  450. }
  451. func VaInt64(app *uintptr) int64 {
  452. ap := *(*uintptr)(unsafe.Pointer(app))
  453. if ap == 0 {
  454. return 0
  455. }
  456. ap = roundup(ap, 8)
  457. v := *(*int64)(unsafe.Pointer(ap))
  458. ap += 8
  459. *(*uintptr)(unsafe.Pointer(app)) = ap
  460. return v
  461. }
  462. func VaUint64(app *uintptr) uint64 {
  463. ap := *(*uintptr)(unsafe.Pointer(app))
  464. if ap == 0 {
  465. return 0
  466. }
  467. ap = roundup(ap, 8)
  468. v := *(*uint64)(unsafe.Pointer(ap))
  469. ap += 8
  470. *(*uintptr)(unsafe.Pointer(app)) = ap
  471. return v
  472. }
  473. func VaFloat32(app *uintptr) float32 {
  474. ap := *(*uintptr)(unsafe.Pointer(app))
  475. if ap == 0 {
  476. return 0
  477. }
  478. ap = roundup(ap, 8)
  479. v := *(*float64)(unsafe.Pointer(ap))
  480. ap += 8
  481. *(*uintptr)(unsafe.Pointer(app)) = ap
  482. return float32(v)
  483. }
  484. func VaFloat64(app *uintptr) float64 {
  485. ap := *(*uintptr)(unsafe.Pointer(app))
  486. if ap == 0 {
  487. return 0
  488. }
  489. ap = roundup(ap, 8)
  490. v := *(*float64)(unsafe.Pointer(ap))
  491. ap += 8
  492. *(*uintptr)(unsafe.Pointer(app)) = ap
  493. return v
  494. }
  495. func VaUintptr(app *uintptr) uintptr {
  496. ap := *(*uintptr)(unsafe.Pointer(app))
  497. if ap == 0 {
  498. return 0
  499. }
  500. ap = roundup(ap, 8)
  501. v := *(*uintptr)(unsafe.Pointer(ap))
  502. ap += 8
  503. *(*uintptr)(unsafe.Pointer(app)) = ap
  504. return v
  505. }
  506. func getVaList(va uintptr) []string {
  507. r := []string{}
  508. for p := va; ; p += 8 {
  509. st := *(*uintptr)(unsafe.Pointer(p))
  510. if st == 0 {
  511. return r
  512. }
  513. r = append(r, GoString(st))
  514. }
  515. return r
  516. }
  517. func roundup(n, to uintptr) uintptr {
  518. if r := n % to; r != 0 {
  519. return n + to - r
  520. }
  521. return n
  522. }
  523. func Bool(v bool) bool { return v }
  524. func Bool32(b bool) int32 {
  525. if b {
  526. return 1
  527. }
  528. return 0
  529. }
  530. func Bool64(b bool) int64 {
  531. if b {
  532. return 1
  533. }
  534. return 0
  535. }
  536. type sorter struct {
  537. len int
  538. base uintptr
  539. sz uintptr
  540. f func(*TLS, uintptr, uintptr) int32
  541. t *TLS
  542. }
  543. func (s *sorter) Len() int { return s.len }
  544. func (s *sorter) Less(i, j int) bool {
  545. return s.f(s.t, s.base+uintptr(i)*s.sz, s.base+uintptr(j)*s.sz) < 0
  546. }
  547. func (s *sorter) Swap(i, j int) {
  548. p := uintptr(s.base + uintptr(i)*s.sz)
  549. q := uintptr(s.base + uintptr(j)*s.sz)
  550. for i := 0; i < int(s.sz); i++ {
  551. *(*byte)(unsafe.Pointer(p)), *(*byte)(unsafe.Pointer(q)) = *(*byte)(unsafe.Pointer(q)), *(*byte)(unsafe.Pointer(p))
  552. p++
  553. q++
  554. }
  555. }
  556. func CString(s string) (uintptr, error) {
  557. n := len(s)
  558. p := Xmalloc(nil, types.Size_t(n)+1)
  559. if p == 0 {
  560. return 0, fmt.Errorf("CString: cannot allocate %d bytes", n+1)
  561. }
  562. copy((*RawMem)(unsafe.Pointer(p))[:n:n], s)
  563. *(*byte)(unsafe.Pointer(p + uintptr(n))) = 0
  564. return p, nil
  565. }
  566. func GetEnviron() (r []string) {
  567. for p := Environ(); ; p += unsafe.Sizeof(p) {
  568. q := *(*uintptr)(unsafe.Pointer(p))
  569. if q == 0 {
  570. return r
  571. }
  572. r = append(r, GoString(q))
  573. }
  574. }
  575. func strToUint64(t *TLS, s uintptr, base int32) (seenDigits, neg bool, next uintptr, n uint64, err int32) {
  576. var c byte
  577. out:
  578. for {
  579. c = *(*byte)(unsafe.Pointer(s))
  580. switch c {
  581. case ' ', '\t', '\n', '\r', '\v', '\f':
  582. s++
  583. case '+':
  584. s++
  585. break out
  586. case '-':
  587. s++
  588. neg = true
  589. break out
  590. default:
  591. break out
  592. }
  593. }
  594. for {
  595. c = *(*byte)(unsafe.Pointer(s))
  596. var digit uint64
  597. switch base {
  598. case 10:
  599. switch {
  600. case c >= '0' && c <= '9':
  601. seenDigits = true
  602. digit = uint64(c) - '0'
  603. default:
  604. return seenDigits, neg, s, n, 0
  605. }
  606. case 16:
  607. if c >= 'A' && c <= 'F' {
  608. c = c + ('a' - 'A')
  609. }
  610. switch {
  611. case c >= '0' && c <= '9':
  612. seenDigits = true
  613. digit = uint64(c) - '0'
  614. case c >= 'a' && c <= 'f':
  615. seenDigits = true
  616. digit = uint64(c) - 'a' + 10
  617. default:
  618. return seenDigits, neg, s, n, 0
  619. }
  620. default:
  621. panic(todo("", base))
  622. }
  623. n0 := n
  624. n = uint64(base)*n + digit
  625. if n < n0 { // overflow
  626. return seenDigits, neg, s, n0, errno.ERANGE
  627. }
  628. s++
  629. }
  630. }
  631. func strToFloatt64(t *TLS, s uintptr, bits int) (n float64, errno int32) {
  632. var b []byte
  633. var neg bool
  634. defer func() {
  635. var err error
  636. if n, err = strconv.ParseFloat(string(b), bits); err != nil {
  637. panic(todo(""))
  638. }
  639. if neg {
  640. n = -n
  641. }
  642. }()
  643. var c byte
  644. out:
  645. for {
  646. c = *(*byte)(unsafe.Pointer(s))
  647. switch c {
  648. case ' ', '\t', '\n', '\r', '\v', '\f':
  649. s++
  650. case '+':
  651. s++
  652. break out
  653. case '-':
  654. s++
  655. neg = true
  656. break out
  657. default:
  658. break out
  659. }
  660. }
  661. for {
  662. c = *(*byte)(unsafe.Pointer(s))
  663. switch {
  664. case c >= '0' && c <= '9':
  665. b = append(b, c)
  666. case c == '.':
  667. b = append(b, c)
  668. s++
  669. for {
  670. c = *(*byte)(unsafe.Pointer(s))
  671. switch {
  672. case c >= '0' && c <= '9':
  673. b = append(b, c)
  674. case c == 'e' || c == 'E':
  675. b = append(b, c)
  676. s++
  677. for {
  678. c = *(*byte)(unsafe.Pointer(s))
  679. switch {
  680. case c == '+' || c == '-':
  681. b = append(b, c)
  682. s++
  683. for {
  684. c = *(*byte)(unsafe.Pointer(s))
  685. switch {
  686. case c >= '0' && c <= '9':
  687. b = append(b, c)
  688. default:
  689. return
  690. }
  691. s++
  692. }
  693. default:
  694. panic(todo("%q %q", b, string(c)))
  695. }
  696. }
  697. default:
  698. return
  699. }
  700. s++
  701. }
  702. default:
  703. panic(todo("%q %q", b, string(c)))
  704. }
  705. s++
  706. }
  707. }
  708. func parseZone(s string) (name string, off int) {
  709. _, name, off, _ = parseZoneOffset(s, false)
  710. return name, off
  711. }
  712. func parseZoneOffset(s string, offOpt bool) (string, string, int, bool) {
  713. s0 := s
  714. name := s
  715. for len(s) != 0 {
  716. switch c := s[0]; {
  717. case c >= 'A' && c <= 'Z', c >= 'a' && c <= 'z', c == '_', c == '/':
  718. s = s[1:]
  719. default:
  720. name = name[:len(name)-len(s)]
  721. if len(name) < 3 {
  722. panic(todo("%q", s0))
  723. }
  724. if offOpt {
  725. if len(s) == 0 {
  726. return "", name, 0, false
  727. }
  728. if c := s[0]; (c < '0' || c > '9') && c != '+' && c != '-' {
  729. return s, name, 0, false
  730. }
  731. }
  732. s, off := parseOffset(s)
  733. return s, name, off, true
  734. }
  735. }
  736. return "", s0, 0, true
  737. }
  738. // [+|-]hh[:mm[:ss]]
  739. func parseOffset(s string) (string, int) {
  740. if len(s) == 0 {
  741. panic(todo(""))
  742. }
  743. k := 1
  744. switch s[0] {
  745. case '+':
  746. // nop
  747. s = s[1:]
  748. case '-':
  749. k = -1
  750. s = s[1:]
  751. }
  752. s, hh, ok := parseUint(s)
  753. if !ok {
  754. panic(todo(""))
  755. }
  756. n := hh * 3600
  757. if len(s) == 0 || s[0] != ':' {
  758. return s, k * n
  759. }
  760. s = s[1:] // ':'
  761. if len(s) == 0 {
  762. panic(todo(""))
  763. }
  764. s, mm, ok := parseUint(s)
  765. if !ok {
  766. panic(todo(""))
  767. }
  768. n += mm * 60
  769. if len(s) == 0 || s[0] != ':' {
  770. return s, k * n
  771. }
  772. s = s[1:] // ':'
  773. if len(s) == 0 {
  774. panic(todo(""))
  775. }
  776. s, ss, _ := parseUint(s)
  777. return s, k * (n + ss)
  778. }
  779. func parseUint(s string) (string, int, bool) {
  780. var ok bool
  781. var r int
  782. for len(s) != 0 {
  783. switch c := s[0]; {
  784. case c >= '0' && c <= '9':
  785. ok = true
  786. r0 := r
  787. r = 10*r + int(c) - '0'
  788. if r < r0 {
  789. panic(todo(""))
  790. }
  791. s = s[1:]
  792. default:
  793. return s, r, ok
  794. }
  795. }
  796. return s, r, ok
  797. }
  798. // https://stackoverflow.com/a/53052382
  799. //
  800. // isTimeDST returns true if time t occurs within daylight saving time
  801. // for its time zone.
  802. func isTimeDST(t time.Time) bool {
  803. // If the most recent (within the last year) clock change
  804. // was forward then assume the change was from std to dst.
  805. hh, mm, _ := t.UTC().Clock()
  806. tClock := hh*60 + mm
  807. for m := -1; m > -12; m-- {
  808. // assume dst lasts for at least one month
  809. hh, mm, _ := t.AddDate(0, m, 0).UTC().Clock()
  810. clock := hh*60 + mm
  811. if clock != tClock {
  812. return clock > tClock
  813. }
  814. }
  815. // assume no dst
  816. return false
  817. }