cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

checksum.c (4245B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *
      4 * INET		An implementation of the TCP/IP protocol suite for the LINUX
      5 *		operating system.  INET is implemented using the  BSD Socket
      6 *		interface as the means of communication with the user level.
      7 *
      8 *		IP/TCP/UDP checksumming routines
      9 *
     10 * Authors:	Jorge Cwik, <jorge@laser.satlink.net>
     11 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
     12 *		Tom May, <ftom@netcom.com>
     13 *		Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
     14 *		Lots of code moved from tcp.c and ip.c; see those files
     15 *		for more names.
     16 *
     17 * 03/02/96	Jes Sorensen, Andreas Schwab, Roman Hodek:
     18 *		Fixed some nasty bugs, causing some horrible crashes.
     19 *		A: At some points, the sum (%0) was used as
     20 *		length-counter instead of the length counter
     21 *		(%1). Thanks to Roman Hodek for pointing this out.
     22 *		B: GCC seems to mess up if one uses too many
     23 *		data-registers to hold input values and one tries to
     24 *		specify d0 and d1 as scratch registers. Letting gcc
     25 *		choose these registers itself solves the problem.
     26 */
     27
     28/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
     29 kills, so most of the assembly has to go. */
     30
     31#include <linux/export.h>
     32#include <net/checksum.h>
     33
     34#include <asm/byteorder.h>
     35
     36#ifndef do_csum
     37static inline unsigned short from32to16(unsigned int x)
     38{
     39	/* add up 16-bit and 16-bit for 16+c bit */
     40	x = (x & 0xffff) + (x >> 16);
     41	/* add up carry.. */
     42	x = (x & 0xffff) + (x >> 16);
     43	return x;
     44}
     45
     46static unsigned int do_csum(const unsigned char *buff, int len)
     47{
     48	int odd;
     49	unsigned int result = 0;
     50
     51	if (len <= 0)
     52		goto out;
     53	odd = 1 & (unsigned long) buff;
     54	if (odd) {
     55#ifdef __LITTLE_ENDIAN
     56		result += (*buff << 8);
     57#else
     58		result = *buff;
     59#endif
     60		len--;
     61		buff++;
     62	}
     63	if (len >= 2) {
     64		if (2 & (unsigned long) buff) {
     65			result += *(unsigned short *) buff;
     66			len -= 2;
     67			buff += 2;
     68		}
     69		if (len >= 4) {
     70			const unsigned char *end = buff + ((unsigned)len & ~3);
     71			unsigned int carry = 0;
     72			do {
     73				unsigned int w = *(unsigned int *) buff;
     74				buff += 4;
     75				result += carry;
     76				result += w;
     77				carry = (w > result);
     78			} while (buff < end);
     79			result += carry;
     80			result = (result & 0xffff) + (result >> 16);
     81		}
     82		if (len & 2) {
     83			result += *(unsigned short *) buff;
     84			buff += 2;
     85		}
     86	}
     87	if (len & 1)
     88#ifdef __LITTLE_ENDIAN
     89		result += *buff;
     90#else
     91		result += (*buff << 8);
     92#endif
     93	result = from32to16(result);
     94	if (odd)
     95		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
     96out:
     97	return result;
     98}
     99#endif
    100
    101#ifndef ip_fast_csum
    102/*
    103 *	This is a version of ip_compute_csum() optimized for IP headers,
    104 *	which always checksum on 4 octet boundaries.
    105 */
    106__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
    107{
    108	return (__force __sum16)~do_csum(iph, ihl*4);
    109}
    110EXPORT_SYMBOL(ip_fast_csum);
    111#endif
    112
    113/*
    114 * computes the checksum of a memory block at buff, length len,
    115 * and adds in "sum" (32-bit)
    116 *
    117 * returns a 32-bit number suitable for feeding into itself
    118 * or csum_tcpudp_magic
    119 *
    120 * this function must be called with even lengths, except
    121 * for the last fragment, which may be odd
    122 *
    123 * it's best to have buff aligned on a 32-bit boundary
    124 */
    125__wsum csum_partial(const void *buff, int len, __wsum wsum)
    126{
    127	unsigned int sum = (__force unsigned int)wsum;
    128	unsigned int result = do_csum(buff, len);
    129
    130	/* add in old sum, and carry.. */
    131	result += sum;
    132	if (sum > result)
    133		result += 1;
    134	return (__force __wsum)result;
    135}
    136EXPORT_SYMBOL(csum_partial);
    137
    138/*
    139 * this routine is used for miscellaneous IP-like checksums, mainly
    140 * in icmp.c
    141 */
    142__sum16 ip_compute_csum(const void *buff, int len)
    143{
    144	return (__force __sum16)~do_csum(buff, len);
    145}
    146EXPORT_SYMBOL(ip_compute_csum);
    147
    148#ifndef csum_tcpudp_nofold
    149static inline u32 from64to32(u64 x)
    150{
    151	/* add up 32-bit and 32-bit for 32+c bit */
    152	x = (x & 0xffffffff) + (x >> 32);
    153	/* add up carry.. */
    154	x = (x & 0xffffffff) + (x >> 32);
    155	return (u32)x;
    156}
    157
    158__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
    159			  __u32 len, __u8 proto, __wsum sum)
    160{
    161	unsigned long long s = (__force u32)sum;
    162
    163	s += (__force u32)saddr;
    164	s += (__force u32)daddr;
    165#ifdef __BIG_ENDIAN
    166	s += proto + len;
    167#else
    168	s += (proto + len) << 8;
    169#endif
    170	return (__force __wsum)from64to32(s);
    171}
    172EXPORT_SYMBOL(csum_tcpudp_nofold);
    173#endif