verify.go raw
1 //go:build !js && !wasm && !tinygo && !wasm32
2
3 package p256k1
4
5 import (
6 "crypto/sha256"
7 "hash"
8 "sync"
9 "unsafe"
10 )
11
12 // ============================================================================
13 // UTILITY FUNCTIONS
14 // ============================================================================
15
16 // secp256k1_read_be32 reads a uint32_t in big endian
17 func secp256k1_read_be32(p []byte) uint32 {
18 if len(p) < 4 {
19 panic("buffer too small")
20 }
21 return uint32(p[0])<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3])
22 }
23
24 // secp256k1_write_be32 writes a uint32_t in big endian
25 func secp256k1_write_be32(p []byte, x uint32) {
26 if len(p) < 4 {
27 panic("buffer too small")
28 }
29 p[3] = byte(x)
30 p[2] = byte(x >> 8)
31 p[1] = byte(x >> 16)
32 p[0] = byte(x >> 24)
33 }
34
35 // secp256k1_read_be64 reads a uint64_t in big endian
36 func secp256k1_read_be64(p []byte) uint64 {
37 if len(p) < 8 {
38 panic("buffer too small")
39 }
40 return uint64(p[0])<<56 | uint64(p[1])<<48 | uint64(p[2])<<40 | uint64(p[3])<<32 |
41 uint64(p[4])<<24 | uint64(p[5])<<16 | uint64(p[6])<<8 | uint64(p[7])
42 }
43
44 // secp256k1_write_be64 writes a uint64_t in big endian
45 func secp256k1_write_be64(p []byte, x uint64) {
46 if len(p) < 8 {
47 panic("buffer too small")
48 }
49 p[7] = byte(x)
50 p[6] = byte(x >> 8)
51 p[5] = byte(x >> 16)
52 p[4] = byte(x >> 24)
53 p[3] = byte(x >> 32)
54 p[2] = byte(x >> 40)
55 p[1] = byte(x >> 48)
56 p[0] = byte(x >> 56)
57 }
58
59 // secp256k1_memczero zeroes memory if flag == 1. Flag must be 0 or 1. Constant time.
60 func secp256k1_memczero(s []byte, flag int) {
61 if flag == 0 {
62 return
63 }
64 for i := range s {
65 s[i] = 0
66 }
67 }
68
69 // secp256k1_memzero_explicit zeroes memory to prevent leaking sensitive info. Won't be optimized out.
70 func secp256k1_memzero_explicit(ptr unsafe.Pointer, len uintptr) {
71 memclear(ptr, len)
72 }
73
74 // secp256k1_memclear_explicit cleanses memory to prevent leaking sensitive info. Won't be optimized out.
75 func secp256k1_memclear_explicit(ptr unsafe.Pointer, len uintptr) {
76 memclear(ptr, len)
77 }
78
79 // secp256k1_memcmp_var semantics like memcmp. Variable-time.
80 func secp256k1_memcmp_var(s1, s2 []byte) int {
81 n := len(s1)
82 if len(s2) < n {
83 n = len(s2)
84 }
85 for i := 0; i < n; i++ {
86 diff := int(s1[i]) - int(s2[i])
87 if diff != 0 {
88 return diff
89 }
90 }
91 return len(s1) - len(s2)
92 }
93
94 // ============================================================================
95 // SHA256 IMPLEMENTATION
96 // ============================================================================
97
98 // secp256k1_sha256 represents a SHA-256 hash context
99 type secp256k1_sha256 struct {
100 s [8]uint32
101 buf [64]byte
102 bytes uint64
103 }
104
105 // secp256k1_sha256_initialize initializes a SHA-256 hash context
106 func secp256k1_sha256_initialize(hash *secp256k1_sha256) {
107 hash.s[0] = 0x6a09e667
108 hash.s[1] = 0xbb67ae85
109 hash.s[2] = 0x3c6ef372
110 hash.s[3] = 0xa54ff53a
111 hash.s[4] = 0x510e527f
112 hash.s[5] = 0x9b05688c
113 hash.s[6] = 0x1f83d9ab
114 hash.s[7] = 0x5be0cd19
115 hash.bytes = 0
116 }
117
118 // secp256k1_sha256_transform performs one SHA-256 transformation
119 func secp256k1_sha256_transform(s *[8]uint32, buf []byte) {
120 // Use standard library SHA256 for transformation
121 // This is a simplified implementation - full implementation would include
122 // the exact transformation from the C code
123 hasher := NewSHA256()
124 hasher.Write(buf)
125 var tmp [32]byte
126 hasher.Finalize(tmp[:])
127
128 // Convert back to state format (simplified)
129 for i := 0; i < 8; i++ {
130 s[i] = secp256k1_read_be32(tmp[i*4:])
131 }
132 }
133
134 // secp256k1_sha256_write writes data to the hash
135 func secp256k1_sha256_write(hash *secp256k1_sha256, data []byte, len int) {
136 // Simplified implementation using standard library
137 // Full implementation would match C code exactly
138 if len == 0 {
139 return
140 }
141
142 bufsize := int(hash.bytes & 0x3F)
143 hash.bytes += uint64(len)
144
145 // Process full blocks
146 i := 0
147 for len >= 64-bufsize {
148 chunkLen := 64 - bufsize
149 copy(hash.buf[bufsize:], data[i:i+chunkLen])
150 i += chunkLen
151 len -= chunkLen
152 secp256k1_sha256_transform(&hash.s, hash.buf[:])
153 bufsize = 0
154 }
155
156 // Copy remaining data
157 if len > 0 {
158 copy(hash.buf[bufsize:], data[i:i+len])
159 }
160 }
161
162 // secp256k1_sha256_finalize finalizes the hash
163 func secp256k1_sha256_finalize(hash *secp256k1_sha256, out32 []byte) {
164 if len(out32) < 32 {
165 panic("output buffer too small")
166 }
167
168 // Use standard library for finalization
169 hasher := NewSHA256()
170
171 // Write all buffered data
172 bufsize := int(hash.bytes & 0x3F)
173 if bufsize > 0 {
174 hasher.Write(hash.buf[:bufsize])
175 }
176
177 // Finalize
178 hasher.Finalize(out32)
179
180 // Clear hash state
181 hash.bytes = 0
182 for i := range hash.s {
183 hash.s[i] = 0
184 }
185 }
186
187 // secp256k1_sha256_initialize_tagged initializes SHA256 with tagged hash
188 func secp256k1_sha256_initialize_tagged(hash *secp256k1_sha256, tag []byte, taglen int) {
189 var buf [32]byte
190 secp256k1_sha256_initialize(hash)
191 secp256k1_sha256_write(hash, tag, taglen)
192 secp256k1_sha256_finalize(hash, buf[:])
193
194 secp256k1_sha256_initialize(hash)
195 secp256k1_sha256_write(hash, buf[:], 32)
196 secp256k1_sha256_write(hash, buf[:], 32)
197 }
198
199 // secp256k1_sha256_clear clears the hash context
200 func secp256k1_sha256_clear(hash *secp256k1_sha256) {
201 secp256k1_memclear_explicit(unsafe.Pointer(hash), unsafe.Sizeof(*hash))
202 }
203
204 // ============================================================================
205 // SCALAR OPERATIONS
206 // ============================================================================
207
208 // secp256k1_scalar represents a scalar value
209 type secp256k1_scalar struct {
210 d [4]uint64
211 }
212
213 // secp256k1_scalar_check_overflow checks if scalar overflows
214 func secp256k1_scalar_check_overflow(a *secp256k1_scalar) bool {
215 yes := 0
216 no := 0
217
218 no |= boolToInt(a.d[3] < scalarN3)
219 yes |= boolToInt(a.d[2] > scalarN2) & (^no)
220 no |= boolToInt(a.d[2] < scalarN2)
221 yes |= boolToInt(a.d[1] > scalarN1) & (^no)
222 no |= boolToInt(a.d[1] < scalarN1)
223 yes |= boolToInt(a.d[0] >= scalarN0) & (^no)
224
225 return yes != 0
226 }
227
228 // secp256k1_scalar_reduce reduces scalar modulo order
229 func secp256k1_scalar_reduce(r *secp256k1_scalar, overflow int) {
230 if overflow < 0 || overflow > 1 {
231 panic("overflow must be 0 or 1")
232 }
233
234 var s Scalar
235 s.d = r.d
236 s.reduce(overflow)
237 r.d = s.d
238 }
239
240 // secp256k1_scalar_set_b32 sets scalar from 32 bytes
241 func secp256k1_scalar_set_b32(r *secp256k1_scalar, b32 []byte, overflow *int) {
242 var s Scalar
243 over := s.setB32(b32)
244 r.d = s.d
245
246 if overflow != nil {
247 *overflow = boolToInt(over)
248 }
249 }
250
251 // secp256k1_scalar_get_b32 gets scalar to 32 bytes
252 func secp256k1_scalar_get_b32(bin []byte, a *secp256k1_scalar) {
253 var s Scalar
254 s.d = a.d
255 scalarGetB32(bin, &s)
256 }
257
258 // secp256k1_scalar_is_zero checks if scalar is zero
259 func secp256k1_scalar_is_zero(a *secp256k1_scalar) bool {
260 var s Scalar
261 s.d = a.d
262 return scalarIsZero(&s)
263 }
264
265 // secp256k1_scalar_negate negates scalar
266 func secp256k1_scalar_negate(r *secp256k1_scalar, a *secp256k1_scalar) {
267 var s Scalar
268 s.d = a.d
269 var sa Scalar
270 sa.d = a.d
271 s.negate(&sa)
272 r.d = s.d
273 }
274
275 // secp256k1_scalar_add adds two scalars
276 func secp256k1_scalar_add(r *secp256k1_scalar, a *secp256k1_scalar, b *secp256k1_scalar) bool {
277 var sa, sb Scalar
278 sa.d = a.d
279 sb.d = b.d
280 var sr Scalar
281 overflow := scalarAdd(&sr, &sa, &sb)
282 r.d = sr.d
283 return overflow
284 }
285
286 // secp256k1_scalar_mul multiplies two scalars
287 func secp256k1_scalar_mul(r *secp256k1_scalar, a *secp256k1_scalar, b *secp256k1_scalar) {
288 var sa, sb Scalar
289 sa.d = a.d
290 sb.d = b.d
291 var sr Scalar
292 scalarMul(&sr, &sa, &sb)
293 r.d = sr.d
294 }
295
296 // secp256k1_scalar_clear clears scalar
297 func secp256k1_scalar_clear(r *secp256k1_scalar) {
298 secp256k1_memclear_explicit(unsafe.Pointer(r), unsafe.Sizeof(*r))
299 }
300
301 // secp256k1_scalar_set_b32_seckey sets scalar from seckey
302 func secp256k1_scalar_set_b32_seckey(r *secp256k1_scalar, bin []byte) bool {
303 var s Scalar
304 ret := s.setB32Seckey(bin)
305 r.d = s.d
306 return ret
307 }
308
309 // secp256k1_scalar_cmov conditionally moves scalar
310 func secp256k1_scalar_cmov(r *secp256k1_scalar, a *secp256k1_scalar, flag int) {
311 var sr, sa Scalar
312 sr.d = r.d
313 sa.d = a.d
314 sr.cmov(&sa, flag)
315 r.d = sr.d
316 }
317
318 // secp256k1_scalar_get_bits_limb32 gets bits from scalar
319 func secp256k1_scalar_get_bits_limb32(a *secp256k1_scalar, offset, count uint) uint32 {
320 var s Scalar
321 s.d = a.d
322 return s.getBits(offset, count)
323 }
324
325 // secp256k1_scalar constants
326 var (
327 secp256k1_scalar_one = secp256k1_scalar{d: [4]uint64{1, 0, 0, 0}}
328 secp256k1_scalar_zero = secp256k1_scalar{d: [4]uint64{0, 0, 0, 0}}
329 )
330
331 // ============================================================================
332 // FIELD OPERATIONS
333 // ============================================================================
334
335 // secp256k1_fe represents a field element
336 type secp256k1_fe struct {
337 n [5]uint64
338 }
339
340 // secp256k1_fe_clear clears field element
341 func secp256k1_fe_clear(a *secp256k1_fe) {
342 secp256k1_memclear_explicit(unsafe.Pointer(a), unsafe.Sizeof(*a))
343 }
344
345 // secp256k1_fe_set_int sets field element to int
346 func secp256k1_fe_set_int(r *secp256k1_fe, a int) {
347 var fe FieldElement
348 fe.setInt(a)
349 r.n = fe.n
350 }
351
352 // secp256k1_fe_is_zero checks if field element is zero
353 func secp256k1_fe_is_zero(a *secp256k1_fe) bool {
354 return (a.n[0] | a.n[1] | a.n[2] | a.n[3] | a.n[4]) == 0
355 }
356
357 // secp256k1_fe_is_odd checks if field element is odd
358 func secp256k1_fe_is_odd(a *secp256k1_fe) bool {
359 return a.n[0]&1 == 1
360 }
361
362 // secp256k1_fe_normalize_var normalizes field element
363 func secp256k1_fe_normalize_var(r *secp256k1_fe) {
364 var fe FieldElement
365 fe.n = r.n
366 fieldNormalize(&fe)
367 r.n = fe.n
368 }
369
370 // secp256k1_fe_normalize_weak normalizes field element weakly
371 func secp256k1_fe_normalize_weak(r *secp256k1_fe) {
372 var fe FieldElement
373 fe.n = r.n
374 fe.normalizeWeak()
375 r.n = fe.n
376 }
377
378 // secp256k1_fe_normalizes_to_zero checks if field element normalizes to zero
379 func secp256k1_fe_normalizes_to_zero(r *secp256k1_fe) bool {
380 var fe FieldElement
381 fe.n = r.n
382 return fe.normalizesToZeroVar()
383 }
384
385 // secp256k1_fe_negate negates field element
386 func secp256k1_fe_negate(r *secp256k1_fe, a *secp256k1_fe, m int) {
387 var fe FieldElement
388 fe.n = a.n
389 var fea FieldElement
390 fea.n = a.n
391 fe.negate(&fea, m)
392 r.n = fe.n
393 }
394
395 // secp256k1_fe_add adds field element
396 func secp256k1_fe_add(r *secp256k1_fe, a *secp256k1_fe) {
397 var fe FieldElement
398 fe.n = r.n
399 var fea FieldElement
400 fea.n = a.n
401 fieldAdd(&fe, &fea)
402 r.n = fe.n
403 }
404
405 // secp256k1_fe_add_int adds int to field element
406 func secp256k1_fe_add_int(r *secp256k1_fe, a int) {
407 var fe FieldElement
408 fe.n = r.n
409 fe.mulInt(a)
410 r.n = fe.n
411 }
412
413 // secp256k1_fe_set_b32_mod sets field element from bytes mod
414 func secp256k1_fe_set_b32_mod(r *secp256k1_fe, a []byte) {
415 var fe FieldElement
416 fe.setB32(a)
417 r.n = fe.n
418 }
419
420 // secp256k1_fe_set_b32_limit sets field element from bytes limit
421 func secp256k1_fe_set_b32_limit(r *secp256k1_fe, a []byte) bool {
422 var fe FieldElement
423 if err := fe.setB32(a); err != nil {
424 return false
425 }
426
427 // Check if normalized value is within limit
428 fe.normalize()
429 r.n = fe.n
430
431 // Check if r >= p (field modulus)
432 // p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
433 // Check: r.n[4] == 0x0FFFFFFFFFFFF && r.n[3] == 0xFFFFFFFFFFFFF &&
434 // r.n[2] == 0xFFFFFFFFFFFFF && r.n[1] == 0xFFFFFFFFFFFFF &&
435 // r.n[0] >= 0xFFFFEFFFFFC2F
436 limit := (r.n[4] == 0x0FFFFFFFFFFFF) &&
437 ((r.n[3] & r.n[2] & r.n[1]) == 0xFFFFFFFFFFFFF) &&
438 (r.n[0] >= 0xFFFFEFFFFFC2F)
439
440 return !limit
441 }
442
443 // secp256k1_fe_get_b32 gets field element to bytes
444 func secp256k1_fe_get_b32(r []byte, a *secp256k1_fe) {
445 var fe FieldElement
446 fe.n = a.n
447 fieldGetB32(r, &fe)
448 }
449
450 // secp256k1_fe_equal checks if two field elements are equal
451 func secp256k1_fe_equal(a *secp256k1_fe, b *secp256k1_fe) bool {
452 var fea, feb FieldElement
453 fea.n = a.n
454 feb.n = b.n
455 // Normalize both to ensure consistent state since secp256k1_fe doesn't carry
456 // magnitude information. This ensures that the limbs correspond to a valid
457 // field element representation before we compute the comparison.
458 fea.normalize()
459 feb.normalize()
460
461 // Now compute the difference and check if it's zero: (a - b) ≡ 0 (mod p)
462 var na FieldElement
463 na.negate(&fea, 1)
464 na.add(&feb)
465 return na.normalizesToZeroVar()
466 }
467
468 // secp256k1_fe_sqrt computes square root
469 func secp256k1_fe_sqrt(r *secp256k1_fe, a *secp256k1_fe) bool {
470 var fea, fer FieldElement
471 fea.n = a.n
472 ret := fer.sqrt(&fea)
473 r.n = fer.n
474 return ret
475 }
476
477 // secp256k1_fe_mul multiplies field elements
478 func secp256k1_fe_mul(r *secp256k1_fe, a *secp256k1_fe, b *secp256k1_fe) {
479 var fea, feb, fer FieldElement
480 copy(fea.n[:], a.n[:])
481 copy(feb.n[:], b.n[:])
482 fer.mul(&fea, &feb)
483 copy(r.n[:], fer.n[:])
484 }
485
486 // secp256k1_fe_sqr squares field element
487 func secp256k1_fe_sqr(r *secp256k1_fe, a *secp256k1_fe) {
488 var fea, fer FieldElement
489 copy(fea.n[:], a.n[:])
490 fer.sqr(&fea)
491 copy(r.n[:], fer.n[:])
492 }
493
494 // secp256k1_fe_inv_var computes field element inverse
495 func secp256k1_fe_inv_var(r *secp256k1_fe, x *secp256k1_fe) {
496 var fex, fer FieldElement
497 fex.n = x.n
498 fer.inv(&fex)
499 r.n = fer.n
500 }
501
502 // ============================================================================
503 // GROUP OPERATIONS
504 // ============================================================================
505
506 // secp256k1_ge represents a group element in affine coordinates
507 type secp256k1_ge struct {
508 x, y secp256k1_fe
509 infinity int
510 }
511
512 // secp256k1_gej represents a group element in Jacobian coordinates
513 type secp256k1_gej struct {
514 x, y, z secp256k1_fe
515 infinity int
516 }
517
518 // secp256k1_ge_set_infinity sets group element to infinity
519 func secp256k1_ge_set_infinity(r *secp256k1_ge) {
520 r.infinity = 1
521 secp256k1_fe_set_int(&r.x, 0)
522 secp256k1_fe_set_int(&r.y, 0)
523 }
524
525 // secp256k1_ge_is_infinity checks if group element is infinity
526 func secp256k1_ge_is_infinity(a *secp256k1_ge) bool {
527 return a.infinity != 0
528 }
529
530 // secp256k1_ge_set_xy sets group element from x, y
531 func secp256k1_ge_set_xy(r *secp256k1_ge, x *secp256k1_fe, y *secp256k1_fe) {
532 r.infinity = 0
533 r.x = *x
534 r.y = *y
535 }
536
537 // secp256k1_ge_set_xo_var sets group element from x-only
538 func secp256k1_ge_set_xo_var(r *secp256k1_ge, x *secp256k1_fe, odd int) bool {
539 var fex FieldElement
540 fex.n = x.n
541
542 var ge GroupElementAffine
543 ret := ge.setXOVar(&fex, odd != 0)
544 if ret {
545 r.x.n = ge.x.n
546 r.y.n = ge.y.n
547 r.infinity = 0
548 }
549 return ret
550 }
551
552 // secp256k1_gej_set_infinity sets Jacobian group element to infinity
553 func secp256k1_gej_set_infinity(r *secp256k1_gej) {
554 r.infinity = 1
555 secp256k1_fe_set_int(&r.x, 0)
556 secp256k1_fe_set_int(&r.y, 0)
557 secp256k1_fe_set_int(&r.z, 0)
558 }
559
560 // secp256k1_gej_is_infinity checks if Jacobian group element is infinity
561 func secp256k1_gej_is_infinity(a *secp256k1_gej) bool {
562 return a.infinity != 0
563 }
564
565 // secp256k1_gej_set_ge sets Jacobian from affine
566 func secp256k1_gej_set_ge(r *secp256k1_gej, a *secp256k1_ge) {
567 r.infinity = a.infinity
568 r.x = a.x
569 r.y = a.y
570 secp256k1_fe_set_int(&r.z, 1)
571 }
572
573 // secp256k1_gej_clear clears Jacobian group element
574 func secp256k1_gej_clear(r *secp256k1_gej) {
575 secp256k1_memclear_explicit(unsafe.Pointer(r), unsafe.Sizeof(*r))
576 }
577
578 // secp256k1_ge_set_gej sets affine from Jacobian
579 func secp256k1_ge_set_gej(r *secp256k1_ge, a *secp256k1_gej) {
580 var gej GroupElementJacobian
581 gej.x.n = a.x.n
582 gej.y.n = a.y.n
583 gej.z.n = a.z.n
584 gej.infinity = a.infinity != 0
585
586 var ge GroupElementAffine
587 ge.setGEJ(&gej)
588
589 r.x.n = ge.x.n
590 r.y.n = ge.y.n
591 r.infinity = boolToInt(ge.infinity)
592 }
593
594 // secp256k1_ge_set_gej_var sets affine from Jacobian (variable time)
595 func secp256k1_ge_set_gej_var(r *secp256k1_ge, a *secp256k1_gej) {
596 if secp256k1_gej_is_infinity(a) {
597 secp256k1_ge_set_infinity(r)
598 return
599 }
600
601 var gej GroupElementJacobian
602 gej.x.n = a.x.n
603 gej.y.n = a.y.n
604 gej.z.n = a.z.n
605 gej.infinity = false
606
607 var ge GroupElementAffine
608 ge.setGEJ(&gej)
609
610 r.x.n = ge.x.n
611 r.y.n = ge.y.n
612 r.infinity = 0
613 }
614
615 // secp256k1_gej_double_var doubles Jacobian point
616 func secp256k1_gej_double_var(r *secp256k1_gej, a *secp256k1_gej, rzr *secp256k1_fe) {
617 var geja, gejr GroupElementJacobian
618 geja.x.n = a.x.n
619 geja.y.n = a.y.n
620 geja.z.n = a.z.n
621 geja.infinity = a.infinity != 0
622
623 gejr.double(&geja)
624
625 r.x.n = gejr.x.n
626 r.y.n = gejr.y.n
627 r.z.n = gejr.z.n
628 r.infinity = boolToInt(gejr.infinity)
629
630 if rzr != nil {
631 // rzr = 2*a->y (from double logic)
632 rzr.n = a.y.n
633 secp256k1_fe_add(rzr, &a.y)
634 }
635 }
636
637 // secp256k1_gej_add_ge_var adds affine point to Jacobian point
638 func secp256k1_gej_add_ge_var(r *secp256k1_gej, a *secp256k1_gej, b *secp256k1_ge, rzr *secp256k1_fe) {
639 var geja GroupElementJacobian
640 geja.x.n = a.x.n
641 geja.y.n = a.y.n
642 geja.z.n = a.z.n
643 geja.infinity = a.infinity != 0
644
645 var geb GroupElementAffine
646 geb.x.n = b.x.n
647 geb.y.n = b.y.n
648 geb.infinity = b.infinity != 0
649
650 var fezr *FieldElement
651 if rzr != nil {
652 var tmp FieldElement
653 tmp.n = rzr.n
654 fezr = &tmp
655 }
656
657 var gejr GroupElementJacobian
658 gejr.addGEWithZR(&geja, &geb, fezr)
659
660 r.x.n = gejr.x.n
661 r.y.n = gejr.y.n
662 r.z.n = gejr.z.n
663 r.infinity = boolToInt(gejr.infinity)
664
665 if rzr != nil && fezr != nil {
666 rzr.n = fezr.n
667 }
668 }
669
670 // secp256k1_gej_add_zinv_var adds affine point to Jacobian with z inverse
671 func secp256k1_gej_add_zinv_var(r *secp256k1_gej, a *secp256k1_gej, b *secp256k1_ge, bzinv *secp256k1_fe) {
672 // Simplified implementation - full implementation would use zinv optimization
673 secp256k1_gej_add_ge_var(r, a, b, nil)
674 }
675
676 // ============================================================================
677 // GLOBAL PRE-ALLOCATED RESOURCES
678 // ============================================================================
679
680 // Pool of hash contexts for challenge computation - thread-safe reuse
681 var challengeHashPool = sync.Pool{
682 New: func() any {
683 return sha256.New()
684 },
685 }
686
687 func getChallengeHashContext() hash.Hash {
688 return challengeHashPool.Get().(hash.Hash)
689 }
690
691 func putChallengeHashContext(h hash.Hash) {
692 h.Reset()
693 challengeHashPool.Put(h)
694 }
695
696 // ============================================================================
697 // EC MULTIPLICATION OPERATIONS
698 // ============================================================================
699
700 // secp256k1_ecmult_gen_context represents EC multiplication generator context
701 type secp256k1_ecmult_gen_context struct {
702 built int
703 }
704
705 // secp256k1_ecmult_gen_context_is_built checks if context is built
706 func secp256k1_ecmult_gen_context_is_built(ctx *secp256k1_ecmult_gen_context) bool {
707 return ctx.built != 0
708 }
709
710 // secp256k1_ecmult_gen computes generator multiplication
711 func secp256k1_ecmult_gen(ctx *secp256k1_ecmult_gen_context, r *secp256k1_gej, gn *secp256k1_scalar) {
712 var s Scalar
713 s.d = gn.d
714
715 var gejr GroupElementJacobian
716 EcmultGen(&gejr, &s)
717
718 r.x.n = gejr.x.n
719 r.y.n = gejr.y.n
720 r.z.n = gejr.z.n
721 r.infinity = boolToInt(gejr.infinity)
722 }
723
724 // secp256k1_ecmult computes EC multiplication
725 // Optimized: interleaved computation of r = na * a + ng * G
726 // Simplest optimization: process both scalars byte-by-byte in a single loop
727 // This reduces doublings and improves cache locality without requiring WNAF/GLV
728 func secp256k1_ecmult(r *secp256k1_gej, a *secp256k1_gej, na *secp256k1_scalar, ng *secp256k1_scalar) {
729 // r = na * a + ng * G
730 // Convert input to Go types
731 var geja GroupElementJacobian
732 geja.x.n = a.x.n
733 geja.y.n = a.y.n
734 geja.z.n = a.z.n
735 geja.infinity = a.infinity != 0
736
737 var sna, sng Scalar
738 sna.d = na.d
739 sng.d = ng.d
740
741 // Handle zero scalars
742 if sna.isZero() && sng.isZero() {
743 r.x.n = [5]uint64{0, 0, 0, 0, 0}
744 r.y.n = [5]uint64{0, 0, 0, 0, 0}
745 r.z.n = [5]uint64{0, 0, 0, 0, 0}
746 r.infinity = 1
747 return
748 }
749
750 // Simple case: if one scalar is zero, use existing optimized functions
751 if sna.isZero() {
752 var ngg GroupElementJacobian
753 EcmultGen(&ngg, &sng)
754 r.x.n = ngg.x.n
755 r.y.n = ngg.y.n
756 r.z.n = ngg.z.n
757 r.infinity = boolToInt(ngg.infinity)
758 return
759 }
760
761 if sng.isZero() {
762 var naa GroupElementJacobian
763 Ecmult(&naa, &geja, &sna)
764 r.x.n = naa.x.n
765 r.y.n = naa.y.n
766 r.z.n = naa.z.n
767 r.infinity = boolToInt(naa.infinity)
768 return
769 }
770
771 // Use combined Strauss algorithm - shares doublings between both multiplications
772 // This is ~2x faster than computing separately
773 var gejr GroupElementJacobian
774 EcmultCombined(&gejr, &geja, &sna, &sng)
775
776 r.x.n = gejr.x.n
777 r.y.n = gejr.y.n
778 r.z.n = gejr.z.n
779 r.infinity = boolToInt(gejr.infinity)
780 }
781
782 // ============================================================================
783 // PUBKEY/KEYPAIR OPERATIONS
784 // ============================================================================
785
786 // secp256k1_context represents a context
787 type secp256k1_context struct {
788 ecmult_gen_ctx secp256k1_ecmult_gen_context
789 declassify int
790 }
791
792 // secp256k1_declassify declassifies data (no-op in non-VERIFY builds)
793 func secp256k1_declassify(ctx *secp256k1_context, p unsafe.Pointer, len uintptr) {
794 // No-op
795 }
796
797 // secp256k1_pubkey represents a public key
798 type secp256k1_pubkey struct {
799 data [64]byte
800 }
801
802 // secp256k1_xonly_pubkey represents an x-only public key
803 type secp256k1_xonly_pubkey struct {
804 data [32]byte
805 }
806
807 // secp256k1_keypair represents a keypair
808 type secp256k1_keypair struct {
809 data [96]byte
810 }
811
812 // secp256k1_pubkey_load loads public key
813 func secp256k1_pubkey_load(ctx *secp256k1_context, ge *secp256k1_ge, pubkey *secp256k1_pubkey) bool {
814 var pub PublicKey
815 copy(pub.data[:], pubkey.data[:])
816
817 var gep GroupElementAffine
818 gep.fromBytes(pub.data[:])
819
820 if gep.isInfinity() {
821 return false
822 }
823
824 ge.x.n = gep.x.n
825 ge.y.n = gep.y.n
826 ge.infinity = boolToInt(gep.infinity)
827
828 var fex FieldElement
829 fex.n = ge.x.n
830 fex.normalize()
831 return !fex.isZero()
832 }
833
834 // secp256k1_pubkey_save saves public key
835 func secp256k1_pubkey_save(pubkey *secp256k1_pubkey, ge *secp256k1_ge) {
836 var gep GroupElementAffine
837 gep.x.n = ge.x.n
838 gep.y.n = ge.y.n
839 gep.infinity = ge.infinity != 0
840
841 var pub PublicKey
842 gep.toBytes(pub.data[:])
843 copy(pubkey.data[:], pub.data[:])
844 }
845
846 // secp256k1_xonly_pubkey_load loads x-only public key
847 func secp256k1_xonly_pubkey_load(ctx *secp256k1_context, ge *secp256k1_ge, pubkey *secp256k1_xonly_pubkey) bool {
848 // Reconstruct point from X coordinate (x-only pubkey only has X)
849 var x FieldElement
850 if err := x.setB32(pubkey.data[:]); err != nil {
851 return false
852 }
853
854 // Try to recover Y coordinate (use even Y for BIP-340)
855 var gep GroupElementAffine
856 if !gep.setXOVar(&x, false) {
857 return false
858 }
859
860 ge.x.n = gep.x.n
861 ge.y.n = gep.y.n
862 ge.infinity = boolToInt(gep.infinity)
863
864 return true
865 }
866
867 // secp256k1_keypair_load loads keypair
868 func secp256k1_keypair_load(ctx *secp256k1_context, sk *secp256k1_scalar, pk *secp256k1_ge, keypair *secp256k1_keypair) bool {
869 var pubkey secp256k1_pubkey
870 copy(pubkey.data[:], keypair.data[32:])
871
872 secp256k1_declassify(ctx, unsafe.Pointer(&pubkey.data[0]), 64)
873
874 ret := secp256k1_pubkey_load(ctx, pk, &pubkey)
875 if sk != nil {
876 var s Scalar
877 ret = ret && s.setB32Seckey(keypair.data[:32])
878 if ret {
879 sk.d = s.d
880 }
881 }
882
883 if !ret {
884 // Set to default values
885 if pk != nil {
886 secp256k1_ge_set_infinity(pk)
887 }
888 if sk != nil {
889 *sk = secp256k1_scalar_one
890 }
891 }
892
893 return ret
894 }
895
896 // ============================================================================
897 // SCHNORR SIGNATURE OPERATIONS
898 // ============================================================================
899
900 // secp256k1_schnorrsig_sha256_tagged initializes SHA256 with tagged hash
901 func secp256k1_schnorrsig_sha256_tagged(sha *secp256k1_sha256) {
902 secp256k1_sha256_initialize(sha)
903 sha.s[0] = 0x9cecba11
904 sha.s[1] = 0x23925381
905 sha.s[2] = 0x11679112
906 sha.s[3] = 0xd1627e0f
907 sha.s[4] = 0x97c87550
908 sha.s[5] = 0x003cc765
909 sha.s[6] = 0x90f61164
910 sha.s[7] = 0x33e9b66a
911 sha.bytes = 64
912 }
913
914 // secp256k1_schnorrsig_challenge computes challenge hash
915 func secp256k1_schnorrsig_challenge(e *secp256k1_scalar, r32 []byte, msg []byte, msglen int, pubkey32 []byte) {
916 // Zero-allocation challenge computation
917 var challengeHash [32]byte
918
919 // Get hash context from pool (thread-safe)
920 h := getChallengeHashContext()
921 defer putChallengeHashContext(h)
922
923 // Use precomputed SHA256(tag) hash to avoid allocation
924 tagHash := getTaggedHashPrefix(bip340ChallengeTag)
925
926 // Second hash: SHA256(SHA256(tag) || SHA256(tag) || r32 || pubkey32 || msg)
927 h.Reset()
928 h.Write(tagHash[:]) // SHA256(tag)
929 h.Write(tagHash[:]) // SHA256(tag) again
930 h.Write(r32[:32]) // r32
931 h.Write(pubkey32[:32]) // pubkey32
932 h.Write(msg[:msglen]) // msg
933
934 // Sum into a temporary buffer, then copy
935 var temp [32]byte
936 h.Sum(temp[:0])
937 copy(challengeHash[:], temp[:])
938
939 // Convert hash to scalar directly - avoid intermediate Scalar by setting directly
940 e.d[0] = uint64(challengeHash[31]) | uint64(challengeHash[30])<<8 | uint64(challengeHash[29])<<16 | uint64(challengeHash[28])<<24 |
941 uint64(challengeHash[27])<<32 | uint64(challengeHash[26])<<40 | uint64(challengeHash[25])<<48 | uint64(challengeHash[24])<<56
942 e.d[1] = uint64(challengeHash[23]) | uint64(challengeHash[22])<<8 | uint64(challengeHash[21])<<16 | uint64(challengeHash[20])<<24 |
943 uint64(challengeHash[19])<<32 | uint64(challengeHash[18])<<40 | uint64(challengeHash[17])<<48 | uint64(challengeHash[16])<<56
944 e.d[2] = uint64(challengeHash[15]) | uint64(challengeHash[14])<<8 | uint64(challengeHash[13])<<16 | uint64(challengeHash[12])<<24 |
945 uint64(challengeHash[11])<<32 | uint64(challengeHash[10])<<40 | uint64(challengeHash[9])<<48 | uint64(challengeHash[8])<<56
946 e.d[3] = uint64(challengeHash[7]) | uint64(challengeHash[6])<<8 | uint64(challengeHash[5])<<16 | uint64(challengeHash[4])<<24 |
947 uint64(challengeHash[3])<<32 | uint64(challengeHash[2])<<40 | uint64(challengeHash[1])<<48 | uint64(challengeHash[0])<<56
948
949 // Check overflow inline (same logic as Scalar.checkOverflow) and reduce if needed
950 yes := 0
951 no := 0
952 no |= boolToInt(e.d[3] < scalarN3)
953 yes |= boolToInt(e.d[2] > scalarN2) & (^no)
954 no |= boolToInt(e.d[2] < scalarN2)
955 yes |= boolToInt(e.d[1] > scalarN1) & (^no)
956 no |= boolToInt(e.d[1] < scalarN1)
957 yes |= boolToInt(e.d[0] >= scalarN0) & (^no)
958
959 if yes != 0 {
960 // Reduce inline using secp256k1_scalar_reduce logic
961 secp256k1_scalar_reduce(e, 1)
962 }
963 }
964
965 // Direct array-based implementations to avoid struct allocations
966
967 // feSetB32Limit sets field element from 32 bytes with limit check
968 func feSetB32Limit(r []uint64, b []byte) bool {
969 if len(r) < 5 || len(b) < 32 {
970 return false
971 }
972
973 r[0] = (uint64(b[31]) | uint64(b[30])<<8 | uint64(b[29])<<16 | uint64(b[28])<<24 |
974 uint64(b[27])<<32 | uint64(b[26])<<40 | uint64(b[25])<<48 | uint64(b[24])<<56)
975 r[1] = (uint64(b[23]) | uint64(b[22])<<8 | uint64(b[21])<<16 | uint64(b[20])<<24 |
976 uint64(b[19])<<32 | uint64(b[18])<<40 | uint64(b[17])<<48 | uint64(b[16])<<56)
977 r[2] = (uint64(b[15]) | uint64(b[14])<<8 | uint64(b[13])<<16 | uint64(b[12])<<24 |
978 uint64(b[11])<<32 | uint64(b[10])<<40 | uint64(b[9])<<48 | uint64(b[8])<<56)
979 r[3] = (uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
980 uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56)
981 r[4] = 0
982
983 return !((r[4] == 0x0FFFFFFFFFFFF) && ((r[3] & r[2] & r[1]) == 0xFFFFFFFFFFFF) && (r[0] >= 0xFFFFEFFFFFC2F))
984 }
985
986 // xonlyPubkeyLoad loads x-only public key into arrays
987 func xonlyPubkeyLoad(pkx, pky []uint64, pkInf *int, pubkey *secp256k1_xonly_pubkey) bool {
988 if len(pkx) < 5 || len(pky) < 5 {
989 return false
990 }
991
992 // Set x coordinate from pubkey data
993 if !feSetB32Limit(pkx, pubkey.data[:32]) {
994 return false
995 }
996
997 // Compute y^2 = x^3 + 7
998 var x2, x3, y2 [5]uint64
999 fieldSqr(x2[:], pkx)
1000 fieldMul(x3[:], x2[:], pkx)
1001 // Add 7 (which is 111 in binary, so add 1 seven times)
1002 x3[0] += 7
1003 fieldSqr(y2[:], x3[:])
1004
1005 // Check if y^2 is quadratic residue (has square root)
1006 if !fieldSqrt(pky, y2[:]) {
1007 return false
1008 }
1009
1010 *pkInf = 0
1011 return true
1012 }
1013
1014 // schnorrsigChallenge computes challenge directly into array
1015 func schnorrsigChallenge(e []uint64, r32 []byte, msg []byte, msglen int, pubkey32 []byte) {
1016 if len(e) < 4 {
1017 return
1018 }
1019
1020 // Zero-allocation challenge computation
1021 var challengeHash [32]byte
1022
1023 // Use precomputed SHA256(tag) hash to avoid allocation
1024 tagHash := getTaggedHashPrefix(bip340ChallengeTag)
1025
1026 // Second hash: SHA256(SHA256(tag) || SHA256(tag) || r32 || pubkey32 || msg)
1027 h := getChallengeHashContext()
1028 defer putChallengeHashContext(h)
1029 h.Reset()
1030 h.Write(tagHash[:]) // SHA256(tag)
1031 h.Write(tagHash[:]) // SHA256(tag) again
1032 h.Write(r32[:32]) // r32
1033 h.Write(pubkey32[:32]) // pubkey32
1034 h.Write(msg[:msglen]) // msg
1035
1036 // Sum into challengeHash
1037 var temp [32]byte
1038 h.Sum(temp[:0])
1039 copy(challengeHash[:], temp[:])
1040
1041 // Convert hash to scalar directly
1042 var tempScalar Scalar
1043 tempScalar.d[0] = uint64(challengeHash[31]) | uint64(challengeHash[30])<<8 | uint64(challengeHash[29])<<16 | uint64(challengeHash[28])<<24 |
1044 uint64(challengeHash[27])<<32 | uint64(challengeHash[26])<<40 | uint64(challengeHash[25])<<48 | uint64(challengeHash[24])<<56
1045 tempScalar.d[1] = uint64(challengeHash[23]) | uint64(challengeHash[22])<<8 | uint64(challengeHash[21])<<16 | uint64(challengeHash[20])<<24 |
1046 uint64(challengeHash[19])<<32 | uint64(challengeHash[18])<<40 | uint64(challengeHash[17])<<48 | uint64(challengeHash[16])<<56
1047 tempScalar.d[2] = uint64(challengeHash[15]) | uint64(challengeHash[14])<<8 | uint64(challengeHash[13])<<16 | uint64(challengeHash[12])<<24 |
1048 uint64(challengeHash[11])<<32 | uint64(challengeHash[10])<<40 | uint64(challengeHash[9])<<48 | uint64(challengeHash[8])<<56
1049 tempScalar.d[3] = uint64(challengeHash[7]) | uint64(challengeHash[6])<<8 | uint64(challengeHash[5])<<16 | uint64(challengeHash[4])<<24 |
1050 uint64(challengeHash[3])<<32 | uint64(challengeHash[2])<<40 | uint64(challengeHash[1])<<48 | uint64(challengeHash[0])<<56
1051
1052 // Check overflow and reduce if needed
1053 if tempScalar.checkOverflow() {
1054 tempScalar.reduce(1)
1055 }
1056
1057 // Copy back to array
1058 e[0], e[1], e[2], e[3] = tempScalar.d[0], tempScalar.d[1], tempScalar.d[2], tempScalar.d[3]
1059 }
1060
1061 // scalarSetB32 sets scalar from 32 bytes
1062 func scalarSetB32(r []uint64, bin []byte, overflow *int) {
1063 if len(r) < 4 || len(bin) < 32 {
1064 if overflow != nil {
1065 *overflow = 1
1066 }
1067 return
1068 }
1069
1070 r[0] = uint64(bin[31]) | uint64(bin[30])<<8 | uint64(bin[29])<<16 | uint64(bin[28])<<24 |
1071 uint64(bin[27])<<32 | uint64(bin[26])<<40 | uint64(bin[25])<<48 | uint64(bin[24])<<56
1072 r[1] = uint64(bin[23]) | uint64(bin[22])<<8 | uint64(bin[21])<<16 | uint64(bin[20])<<24 |
1073 uint64(bin[19])<<32 | uint64(bin[18])<<40 | uint64(bin[17])<<48 | uint64(bin[16])<<56
1074 r[2] = uint64(bin[15]) | uint64(bin[14])<<8 | uint64(bin[13])<<16 | uint64(bin[12])<<24 |
1075 uint64(bin[11])<<32 | uint64(bin[10])<<40 | uint64(bin[9])<<48 | uint64(bin[8])<<56
1076 r[3] = uint64(bin[7]) | uint64(bin[6])<<8 | uint64(bin[5])<<16 | uint64(bin[4])<<24 |
1077 uint64(bin[3])<<32 | uint64(bin[2])<<40 | uint64(bin[1])<<48 | uint64(bin[0])<<56
1078
1079 var tempS Scalar
1080 copy(tempS.d[:], r)
1081 if overflow != nil {
1082 *overflow = boolToInt(tempS.checkOverflow())
1083 }
1084 if tempS.checkOverflow() {
1085 tempS.reduce(1)
1086 copy(r, tempS.d[:])
1087 }
1088 }
1089
1090 // feNormalizeVar normalizes field element
1091 func feNormalizeVar(r []uint64) {
1092 if len(r) < 5 {
1093 return
1094 }
1095 var tempFE FieldElement
1096 copy(tempFE.n[:], r)
1097 fieldNormalize(&tempFE)
1098 copy(r, tempFE.n[:])
1099 }
1100
1101 // feGetB32 serializes field element to 32 bytes
1102 func feGetB32(b []byte, a []uint64) {
1103 if len(b) < 32 || len(a) < 5 {
1104 return
1105 }
1106 var tempFE FieldElement
1107 copy(tempFE.n[:], a)
1108 fieldGetB32(b, &tempFE)
1109 }
1110
1111 // scalarNegate negates scalar
1112 func scalarNegate(r []uint64) {
1113 if len(r) < 4 {
1114 return
1115 }
1116
1117 // Compute -r mod n: if r == 0 then 0 else n - r
1118 if r[0] != 0 || r[1] != 0 || r[2] != 0 || r[3] != 0 {
1119 r[0] = (^r[0]) + 1
1120 r[1] = ^r[1]
1121 r[2] = ^r[2]
1122 r[3] = ^r[3]
1123
1124 // Add n if we wrapped around
1125 var tempS Scalar
1126 copy(tempS.d[:], r)
1127 if tempS.checkOverflow() {
1128 r[0] += scalarNC0
1129 r[1] += scalarNC1
1130 r[2] += scalarNC2
1131 r[3] += 0
1132 }
1133 }
1134 }
1135
1136 // gejSetGe sets jacobian coordinates from affine
1137 func gejSetGe(rjx, rjy, rjz []uint64, rjInf *int, ax, ay []uint64, aInf int) {
1138 if len(rjx) < 5 || len(rjy) < 5 || len(rjz) < 5 || len(ax) < 5 || len(ay) < 5 {
1139 return
1140 }
1141
1142 if aInf != 0 {
1143 *rjInf = 1
1144 copy(rjx, ax)
1145 copy(rjy, ay)
1146 rjz[0], rjz[1], rjz[2], rjz[3], rjz[4] = 0, 0, 0, 0, 0
1147 } else {
1148 *rjInf = 0
1149 copy(rjx, ax)
1150 copy(rjy, ay)
1151 rjz[0], rjz[1], rjz[2], rjz[3], rjz[4] = 1, 0, 0, 0, 0
1152 }
1153 }
1154
1155 // geSetGejVar converts jacobian to affine coordinates
1156 func geSetGejVar(rx, ry []uint64, rjx, rjy, rjz []uint64, rjInf int, rInf *int) {
1157 if len(rx) < 5 || len(ry) < 5 || len(rjx) < 5 || len(rjy) < 5 || len(rjz) < 5 {
1158 return
1159 }
1160
1161 if rjInf != 0 {
1162 *rInf = 1
1163 return
1164 }
1165
1166 *rInf = 0
1167
1168 // Compute z^-1
1169 var zinv [5]uint64
1170 fieldInvVar(zinv[:], rjz)
1171
1172 // Compute z^-2
1173 var zinv2 [5]uint64
1174 fieldSqr(zinv2[:], zinv[:])
1175
1176 // x = x * z^-2
1177 fieldMul(rx, rjx, zinv2[:])
1178
1179 // Compute z^-3 = z^-1 * z^-2
1180 var zinv3 [5]uint64
1181 fieldMul(zinv3[:], zinv[:], zinv2[:])
1182
1183 // y = y * z^-3
1184 fieldMul(ry, rjy, zinv3[:])
1185 }
1186
1187 // feIsOdd checks if field element is odd
1188 func feIsOdd(a []uint64) bool {
1189 if len(a) < 5 {
1190 return false
1191 }
1192
1193 var normalized [5]uint64
1194 copy(normalized[:], a)
1195 var tempFE FieldElement
1196 copy(tempFE.n[:], normalized[:])
1197 fieldNormalize(&tempFE)
1198 return (tempFE.n[0] & 1) == 1
1199 }
1200
1201 // ecmult computes r = na * a + ng * G using arrays
1202 func ecmult(rjx, rjy, rjz []uint64, rjInf *int, ajx, ajy, ajz []uint64, ajInf int, na, ng []uint64) {
1203 if len(rjx) < 5 || len(rjy) < 5 || len(rjz) < 5 || len(ajx) < 5 || len(ajy) < 5 || len(ajz) < 5 || len(na) < 4 || len(ng) < 4 {
1204 return
1205 }
1206
1207 // Convert arrays to structs for optimized computation
1208 var a secp256k1_gej
1209 copy(a.x.n[:], ajx)
1210 copy(a.y.n[:], ajy)
1211 copy(a.z.n[:], ajz)
1212 a.infinity = ajInf
1213
1214 var sna secp256k1_scalar
1215 copy(sna.d[:], na)
1216
1217 var sng secp256k1_scalar
1218 copy(sng.d[:], ng)
1219
1220 var r secp256k1_gej
1221 secp256k1_ecmult(&r, &a, &sna, &sng)
1222
1223 // Convert back to arrays
1224 copy(rjx, r.x.n[:])
1225 copy(rjy, r.y.n[:])
1226 copy(rjz, r.z.n[:])
1227 *rjInf = r.infinity
1228 }
1229
1230 // secp256k1_schnorrsig_verify verifies a Schnorr signature
1231 func secp256k1_schnorrsig_verify(ctx *secp256k1_context, sig64 []byte, msg []byte, msglen int, pubkey *secp256k1_xonly_pubkey) int {
1232 var s secp256k1_scalar
1233 var e secp256k1_scalar
1234 var rj secp256k1_gej
1235 var pk secp256k1_ge
1236 var pkj secp256k1_gej
1237 var rx secp256k1_fe
1238 var r secp256k1_ge
1239 var overflow int
1240
1241 if ctx == nil {
1242 return 0
1243 }
1244 if sig64 == nil {
1245 return 0
1246 }
1247 if msg == nil && msglen != 0 {
1248 return 0
1249 }
1250 if pubkey == nil {
1251 return 0
1252 }
1253
1254 // Check signature length
1255 if len(sig64) < 64 {
1256 return 0
1257 }
1258
1259 if !secp256k1_fe_set_b32_limit(&rx, sig64[:32]) {
1260 return 0
1261 }
1262
1263 secp256k1_scalar_set_b32(&s, sig64[32:], &overflow)
1264 if overflow != 0 {
1265 return 0
1266 }
1267
1268 if !secp256k1_xonly_pubkey_load(ctx, &pk, pubkey) {
1269 return 0
1270 }
1271
1272 // Compute e - extract normalized pk.x bytes efficiently
1273 secp256k1_fe_normalize_var(&pk.x)
1274 var pkXBytes [32]byte
1275 secp256k1_fe_get_b32(pkXBytes[:], &pk.x)
1276 secp256k1_schnorrsig_challenge(&e, sig64[:32], msg, msglen, pkXBytes[:])
1277
1278 // Compute rj = s*G + (-e)*pkj
1279 secp256k1_scalar_negate(&e, &e)
1280 secp256k1_gej_set_ge(&pkj, &pk)
1281 secp256k1_ecmult(&rj, &pkj, &e, &s)
1282
1283 secp256k1_ge_set_gej_var(&r, &rj)
1284 if secp256k1_ge_is_infinity(&r) {
1285 return 0
1286 }
1287
1288 // Optimize: normalize r.y only once and check if odd
1289 secp256k1_fe_normalize_var(&r.y)
1290 if secp256k1_fe_is_odd(&r.y) {
1291 return 0
1292 }
1293
1294 // Optimize: normalize r.x and rx only once before comparison
1295 secp256k1_fe_normalize_var(&r.x)
1296 secp256k1_fe_normalize_var(&rx)
1297
1298 // Direct comparison of normalized field elements to avoid allocations
1299 if rx.n[0] != r.x.n[0] || rx.n[1] != r.x.n[1] || rx.n[2] != r.x.n[2] ||
1300 rx.n[3] != r.x.n[3] || rx.n[4] != r.x.n[4] {
1301 return 0
1302 }
1303
1304 return 1
1305 }
1306