PostGIS 3.7.0dev-r@@SVN_REVISION@@
Loading...
Searching...
No Matches

◆ hashlittle2()

void hashlittle2 ( const void *  key,
size_t  length,
uint32_t *  pc,
uint32_t *  pb 
)

Definition at line 479 of file lookup3.c.

484{
485 uint32_t a,b,c; /* internal state */
486 union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
487
488 /* Set up the internal state */
489 a = b = c = 0xdeadbeef + ((uint32_t)length) + *pc;
490 c += *pb;
491
492 u.ptr = key;
493 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
494 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
495 // const uint8_t *k8;
496
497 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
498 while (length > 12)
499 {
500 a += k[0];
501 b += k[1];
502 c += k[2];
503 mix(a,b,c);
504 length -= 12;
505 k += 3;
506 }
507
508 /*----------------------------- handle the last (probably partial) block */
509 /*
510 * "k[2]&0xffffff" actually reads beyond the end of the string, but
511 * then masks off the part it's not allowed to read. Because the
512 * string is aligned, the masked-off tail is in the same word as the
513 * rest of the string. Every machine with memory protection I've seen
514 * does it on word boundaries, so is OK with this. But VALGRIND will
515 * still catch it and complain. The masking trick does make the hash
516 * noticeably faster for short strings (like English words).
517 */
518#ifndef VALGRIND
519
520 switch(length)
521 {
522 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
523 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
524 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
525 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
526 case 8 : b+=k[1]; a+=k[0]; break;
527 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
528 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
529 case 5 : b+=k[1]&0xff; a+=k[0]; break;
530 case 4 : a+=k[0]; break;
531 case 3 : a+=k[0]&0xffffff; break;
532 case 2 : a+=k[0]&0xffff; break;
533 case 1 : a+=k[0]&0xff; break;
534 case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
535 }
536
537#else /* make valgrind happy */
538
539 k8 = (const uint8_t *)k;
540 switch(length)
541 {
542 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
543 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
544 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
545 case 9 : c+=k8[8]; /* fall through */
546 case 8 : b+=k[1]; a+=k[0]; break;
547 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
548 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
549 case 5 : b+=k8[4]; /* fall through */
550 case 4 : a+=k[0]; break;
551 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
552 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
553 case 1 : a+=k8[0]; break;
554 case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
555 }
556
557#endif /* !valgrind */
558
559 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
560 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
561 const uint8_t *k8;
562
563 /*--------------- all but last block: aligned reads and different mixing */
564 while (length > 12)
565 {
566 a += k[0] + (((uint32_t)k[1])<<16);
567 b += k[2] + (((uint32_t)k[3])<<16);
568 c += k[4] + (((uint32_t)k[5])<<16);
569 mix(a,b,c);
570 length -= 12;
571 k += 6;
572 }
573
574 /*----------------------------- handle the last (probably partial) block */
575 k8 = (const uint8_t *)k;
576 switch(length)
577 {
578 case 12: c+=k[4]+(((uint32_t)k[5])<<16);
579 b+=k[2]+(((uint32_t)k[3])<<16);
580 a+=k[0]+(((uint32_t)k[1])<<16);
581 break;
582 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
583 case 10: c+=k[4];
584 b+=k[2]+(((uint32_t)k[3])<<16);
585 a+=k[0]+(((uint32_t)k[1])<<16);
586 break;
587 case 9 : c+=k8[8]; /* fall through */
588 case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
589 a+=k[0]+(((uint32_t)k[1])<<16);
590 break;
591 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
592 case 6 : b+=k[2];
593 a+=k[0]+(((uint32_t)k[1])<<16);
594 break;
595 case 5 : b+=k8[4]; /* fall through */
596 case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
597 break;
598 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
599 case 2 : a+=k[0];
600 break;
601 case 1 : a+=k8[0];
602 break;
603 case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
604 }
605
606 } else { /* need to read the key one byte at a time */
607 const uint8_t *k = (const uint8_t *)key;
608
609 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
610 while (length > 12)
611 {
612 a += k[0];
613 a += ((uint32_t)k[1])<<8;
614 a += ((uint32_t)k[2])<<16;
615 a += ((uint32_t)k[3])<<24;
616 b += k[4];
617 b += ((uint32_t)k[5])<<8;
618 b += ((uint32_t)k[6])<<16;
619 b += ((uint32_t)k[7])<<24;
620 c += k[8];
621 c += ((uint32_t)k[9])<<8;
622 c += ((uint32_t)k[10])<<16;
623 c += ((uint32_t)k[11])<<24;
624 mix(a,b,c);
625 length -= 12;
626 k += 12;
627 }
628
629 /*-------------------------------- last block: affect all 32 bits of (c) */
630 switch(length) /* all the case statements fall through */
631 {
632 case 12: c+=((uint32_t)k[11])<<24; /* fall through */
633 case 11: c+=((uint32_t)k[10])<<16; /* fall through */
634 case 10: c+=((uint32_t)k[9])<<8; /* fall through */
635 case 9 : c+=k[8]; /* fall through */
636 case 8 : b+=((uint32_t)k[7])<<24; /* fall through */
637 case 7 : b+=((uint32_t)k[6])<<16; /* fall through */
638 case 6 : b+=((uint32_t)k[5])<<8; /* fall through */
639 case 5 : b+=k[4]; /* fall through */
640 case 4 : a+=((uint32_t)k[3])<<24; /* fall through */
641 case 3 : a+=((uint32_t)k[2])<<16; /* fall through */
642 case 2 : a+=((uint32_t)k[1])<<8; /* fall through */
643 case 1 : a+=k[0];
644 break;
645 case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
646 }
647 }
648
649 final(a,b,c);
650 *pc=c; *pb=b;
651}
#define HASH_LITTLE_ENDIAN
Definition lookup3.c:68
#define mix(a, b, c)
Definition lookup3.c:129

Referenced by gserialized2_hash().

Here is the caller graph for this function: