Commit 1ec2a888 authored by Benjamin Beurdouche's avatar Benjamin Beurdouche

Bug 1696800 - HACL* update March 2021 - c95ab70fcb2bc21025d8845281bc4bc8987ca683 r=beurdouche

Differential Revision: https://phabricator.services.mozilla.com/D107387

--HG--
rename : lib/freebl/verified/Hacl_Curve25519_51.c => lib/freebl/verified/Hacl_Bignum25519_51.h
extra : moz-landing-system : lando
parent e1f2f555
......@@ -13,7 +13,7 @@ set -e -x -v
# HACL CI.
# When bug 1593647 is resolved, extract the code on CI again.
git clone -q "https://github.com/project-everest/hacl-star" ~/hacl-star
git -C ~/hacl-star checkout -q e4311991b1526734f99f4e3a0058895a46c63e5c
git -C ~/hacl-star checkout -q c95ab70fcb2bc21025d8845281bc4bc8987ca683
# Format the C snapshot.
cd ~/hacl-star/dist/mozilla
......
This diff is collapsed.
......@@ -95,7 +95,7 @@ rounds(uint32_t *st)
static inline void
chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
{
memcpy(k, ctx, (uint32_t)16U * sizeof(ctx[0U]));
memcpy(k, ctx, (uint32_t)16U * sizeof(uint32_t));
uint32_t ctr_u32 = ctr;
k[12U] = k[12U] + ctr_u32;
rounds(k);
......@@ -169,9 +169,9 @@ static inline void
chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr, uint8_t *text)
{
uint8_t plain[64U] = { 0U };
memcpy(plain, text, len * sizeof(text[0U]));
memcpy(plain, text, len * sizeof(uint8_t));
chacha20_encrypt_block(ctx, plain, incr, plain);
memcpy(out, plain, len * sizeof(plain[0U]));
memcpy(out, plain, len * sizeof(uint8_t));
}
static inline void
......
......@@ -21,14 +21,18 @@
* SOFTWARE.
*/
#ifndef __Hacl_Chacha20_H
#define __Hacl_Chacha20_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Chacha20_H
#define __Hacl_Chacha20_H
#include "Hacl_Kremlib.h"
extern const uint32_t Hacl_Impl_Chacha20_Vec_chacha20_constants[4U];
......@@ -51,5 +55,9 @@ Hacl_Chacha20_chacha20_decrypt(
uint8_t *n,
uint32_t ctr);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Chacha20_H_DEFINED
#endif
......@@ -47,9 +47,9 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load_le(block);
Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
Lib_IntVector_Intrinsics_vec128
b2 = Lib_IntVector_Intrinsics_vec128_load_le(block + (uint32_t)16U);
b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
Lib_IntVector_Intrinsics_vec128
hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
......@@ -480,7 +480,7 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last, rem1 * sizeof(last[0U]));
memcpy(tmp, last, rem1 * sizeof(uint8_t));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
......@@ -685,7 +685,7 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
acc0[4U] = o4;
}
uint8_t tmp[16U] = { 0U };
memcpy(tmp, rem, r * sizeof(rem[0U]));
memcpy(tmp, rem, r * sizeof(uint8_t));
if (r > (uint32_t)0U) {
Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec128 *acc = ctx;
......@@ -912,7 +912,9 @@ poly1305_do_128(
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
uint8_t block[16U] = { 0U };
Hacl_Poly1305_128_poly1305_init(ctx, k);
poly1305_padded_128(ctx, aadlen, aad);
if (aadlen != (uint32_t)0U) {
poly1305_padded_128(ctx, aadlen, aad);
}
poly1305_padded_128(ctx, mlen, m);
store64_le(block, (uint64_t)aadlen);
store64_le(block + (uint32_t)8U, (uint64_t)mlen);
......
......@@ -21,15 +21,19 @@
* SOFTWARE.
*/
#ifndef __Hacl_Chacha20Poly1305_128_H
#define __Hacl_Chacha20Poly1305_128_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "libintvector.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Chacha20Poly1305_128_H
#define __Hacl_Chacha20Poly1305_128_H
#include "Hacl_Kremlib.h"
#include "Hacl_Chacha20_Vec128.h"
#include "Hacl_Poly1305_128.h"
......@@ -56,5 +60,9 @@ Hacl_Chacha20Poly1305_128_aead_decrypt(
uint8_t *cipher,
uint8_t *mac);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Chacha20Poly1305_128_H_DEFINED
#endif
......@@ -47,9 +47,9 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec256 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load_le(block);
Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
Lib_IntVector_Intrinsics_vec256
hi = Lib_IntVector_Intrinsics_vec256_load_le(block + (uint32_t)32U);
hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256
mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
......@@ -482,7 +482,7 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last, rem1 * sizeof(last[0U]));
memcpy(tmp, last, rem1 * sizeof(uint8_t));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
......@@ -687,7 +687,7 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
acc0[4U] = o4;
}
uint8_t tmp[16U] = { 0U };
memcpy(tmp, rem, r * sizeof(rem[0U]));
memcpy(tmp, rem, r * sizeof(uint8_t));
if (r > (uint32_t)0U) {
Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec256 *acc = ctx;
......@@ -914,7 +914,9 @@ poly1305_do_256(
ctx[_i] = Lib_IntVector_Intrinsics_vec256_zero;
uint8_t block[16U] = { 0U };
Hacl_Poly1305_256_poly1305_init(ctx, k);
poly1305_padded_256(ctx, aadlen, aad);
if (aadlen != (uint32_t)0U) {
poly1305_padded_256(ctx, aadlen, aad);
}
poly1305_padded_256(ctx, mlen, m);
store64_le(block, (uint64_t)aadlen);
store64_le(block + (uint32_t)8U, (uint64_t)mlen);
......
......@@ -21,15 +21,19 @@
* SOFTWARE.
*/
#ifndef __Hacl_Chacha20Poly1305_256_H
#define __Hacl_Chacha20Poly1305_256_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "libintvector.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Chacha20Poly1305_256_H
#define __Hacl_Chacha20Poly1305_256_H
#include "Hacl_Kremlib.h"
#include "Hacl_Chacha20_Vec256.h"
#include "Hacl_Poly1305_256.h"
......@@ -56,5 +60,9 @@ Hacl_Chacha20Poly1305_256_aead_decrypt(
uint8_t *cipher,
uint8_t *mac);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Chacha20Poly1305_256_H_DEFINED
#endif
......@@ -157,7 +157,7 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint8_t *last = blocks + nb * (uint32_t)16U;
uint64_t e[5U] = { 0U };
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last, rem1 * sizeof(last[0U]));
memcpy(tmp, last, rem1 * sizeof(uint8_t));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
......@@ -275,7 +275,7 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
acc0[4U] = o4;
}
uint8_t tmp[16U] = { 0U };
memcpy(tmp, rem, r * sizeof(rem[0U]));
memcpy(tmp, rem, r * sizeof(uint8_t));
if (r > (uint32_t)0U) {
uint64_t *pre = ctx + (uint32_t)5U;
uint64_t *acc = ctx;
......@@ -411,7 +411,9 @@ poly1305_do_32(
uint64_t ctx[25U] = { 0U };
uint8_t block[16U] = { 0U };
Hacl_Poly1305_32_poly1305_init(ctx, k);
poly1305_padded_32(ctx, aadlen, aad);
if (aadlen != (uint32_t)0U) {
poly1305_padded_32(ctx, aadlen, aad);
}
poly1305_padded_32(ctx, mlen, m);
store64_le(block, (uint64_t)aadlen);
store64_le(block + (uint32_t)8U, (uint64_t)mlen);
......
......@@ -21,14 +21,18 @@
* SOFTWARE.
*/
#ifndef __Hacl_Chacha20Poly1305_32_H
#define __Hacl_Chacha20Poly1305_32_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Chacha20Poly1305_32_H
#define __Hacl_Chacha20Poly1305_32_H
#include "Hacl_Chacha20.h"
#include "Hacl_Kremlib.h"
#include "Hacl_Poly1305_32.h"
......@@ -55,5 +59,9 @@ Hacl_Chacha20Poly1305_32_aead_decrypt(
uint8_t *cipher,
uint8_t *mac);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Chacha20Poly1305_32_H_DEFINED
#endif
......@@ -130,7 +130,7 @@ chacha20_core_128(
Lib_IntVector_Intrinsics_vec128 *ctx,
uint32_t ctr)
{
memcpy(k, ctx, (uint32_t)16U * sizeof(ctx[0U]));
memcpy(k, ctx, (uint32_t)16U * sizeof(Lib_IntVector_Intrinsics_vec128));
uint32_t ctr_u32 = (uint32_t)4U * ctr;
Lib_IntVector_Intrinsics_vec128 cv = Lib_IntVector_Intrinsics_vec128_load32(ctr_u32);
k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
......@@ -334,16 +334,16 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
k[15U] = v15;
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
Lib_IntVector_Intrinsics_vec128
x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i0 * (uint32_t)16U);
x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i0 * (uint32_t)16U, y);
Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y);
}
}
if (rem1 > (uint32_t)0U) {
uint8_t *uu____2 = out + nb * (uint32_t)256U;
uint8_t *uu____3 = text + nb * (uint32_t)256U;
uint8_t plain[256U] = { 0U };
memcpy(plain, uu____3, rem * sizeof(uu____3[0U]));
memcpy(plain, uu____3, rem * sizeof(uint8_t));
Lib_IntVector_Intrinsics_vec128 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
......@@ -462,11 +462,11 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
k[15U] = v15;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
Lib_IntVector_Intrinsics_vec128
x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)16U);
x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, y);
Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y);
}
memcpy(uu____2, plain, rem * sizeof(plain[0U]));
memcpy(uu____2, plain, rem * sizeof(uint8_t));
}
}
......@@ -607,16 +607,16 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
k[15U] = v15;
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
Lib_IntVector_Intrinsics_vec128
x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i0 * (uint32_t)16U);
x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i0 * (uint32_t)16U, y);
Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y);
}
}
if (rem1 > (uint32_t)0U) {
uint8_t *uu____2 = out + nb * (uint32_t)256U;
uint8_t *uu____3 = cipher + nb * (uint32_t)256U;
uint8_t plain[256U] = { 0U };
memcpy(plain, uu____3, rem * sizeof(uu____3[0U]));
memcpy(plain, uu____3, rem * sizeof(uint8_t));
Lib_IntVector_Intrinsics_vec128 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
......@@ -735,10 +735,10 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
k[15U] = v15;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
Lib_IntVector_Intrinsics_vec128
x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)16U);
x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, y);
Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y);
}
memcpy(uu____2, plain, rem * sizeof(plain[0U]));
memcpy(uu____2, plain, rem * sizeof(uint8_t));
}
}
......@@ -21,15 +21,19 @@
* SOFTWARE.
*/
#ifndef __Hacl_Chacha20_Vec128_H
#define __Hacl_Chacha20_Vec128_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "libintvector.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Chacha20_Vec128_H
#define __Hacl_Chacha20_Vec128_H
#include "Hacl_Chacha20.h"
#include "Hacl_Kremlib.h"
......@@ -51,5 +55,9 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
uint8_t *n,
uint32_t ctr);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Chacha20_Vec128_H_DEFINED
#endif
......@@ -130,7 +130,7 @@ chacha20_core_256(
Lib_IntVector_Intrinsics_vec256 *ctx,
uint32_t ctr)
{
memcpy(k, ctx, (uint32_t)16U * sizeof(ctx[0U]));
memcpy(k, ctx, (uint32_t)16U * sizeof(Lib_IntVector_Intrinsics_vec256));
uint32_t ctr_u32 = (uint32_t)8U * ctr;
Lib_IntVector_Intrinsics_vec256 cv = Lib_IntVector_Intrinsics_vec256_load32(ctr_u32);
k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv);
......@@ -370,16 +370,16 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
k[15U] = v15;
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
Lib_IntVector_Intrinsics_vec256
x = Lib_IntVector_Intrinsics_vec256_load_le(uu____1 + i0 * (uint32_t)32U);
x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
Lib_IntVector_Intrinsics_vec256_store_le(uu____0 + i0 * (uint32_t)32U, y);
Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y);
}
}
if (rem1 > (uint32_t)0U) {
uint8_t *uu____2 = out + nb * (uint32_t)512U;
uint8_t *uu____3 = text + nb * (uint32_t)512U;
uint8_t plain[512U] = { 0U };
memcpy(plain, uu____3, rem * sizeof(uu____3[0U]));
memcpy(plain, uu____3, rem * sizeof(uint8_t));
Lib_IntVector_Intrinsics_vec256 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec256_zero;
......@@ -530,11 +530,11 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
k[15U] = v15;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
Lib_IntVector_Intrinsics_vec256
x = Lib_IntVector_Intrinsics_vec256_load_le(plain + i * (uint32_t)32U);
x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
Lib_IntVector_Intrinsics_vec256_store_le(plain + i * (uint32_t)32U, y);
Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y);
}
memcpy(uu____2, plain, rem * sizeof(plain[0U]));
memcpy(uu____2, plain, rem * sizeof(uint8_t));
}
}
......@@ -707,16 +707,16 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
k[15U] = v15;
for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
Lib_IntVector_Intrinsics_vec256
x = Lib_IntVector_Intrinsics_vec256_load_le(uu____1 + i0 * (uint32_t)32U);
x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
Lib_IntVector_Intrinsics_vec256_store_le(uu____0 + i0 * (uint32_t)32U, y);
Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y);
}
}
if (rem1 > (uint32_t)0U) {
uint8_t *uu____2 = out + nb * (uint32_t)512U;
uint8_t *uu____3 = cipher + nb * (uint32_t)512U;
uint8_t plain[512U] = { 0U };
memcpy(plain, uu____3, rem * sizeof(uu____3[0U]));
memcpy(plain, uu____3, rem * sizeof(uint8_t));
Lib_IntVector_Intrinsics_vec256 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec256_zero;
......@@ -867,10 +867,10 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
k[15U] = v15;
for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
Lib_IntVector_Intrinsics_vec256
x = Lib_IntVector_Intrinsics_vec256_load_le(plain + i * (uint32_t)32U);
x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
Lib_IntVector_Intrinsics_vec256_store_le(plain + i * (uint32_t)32U, y);
Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y);
}
memcpy(uu____2, plain, rem * sizeof(plain[0U]));
memcpy(uu____2, plain, rem * sizeof(uint8_t));
}
}
......@@ -21,15 +21,19 @@
* SOFTWARE.
*/
#ifndef __Hacl_Chacha20_Vec256_H
#define __Hacl_Chacha20_Vec256_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "libintvector.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Chacha20_Vec256_H
#define __Hacl_Chacha20_Vec256_H
#include "Hacl_Chacha20.h"
#include "Hacl_Kremlib.h"
......@@ -51,5 +55,9 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
uint8_t *n,
uint32_t ctr);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Chacha20_Vec256_H_DEFINED
#endif
This diff is collapsed.
......@@ -21,15 +21,20 @@
* SOFTWARE.
*/
#ifndef __Hacl_Curve25519_51_H
#define __Hacl_Curve25519_51_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Curve25519_51_H
#define __Hacl_Curve25519_51_H
#include "Hacl_Kremlib.h"
#include "Hacl_Bignum25519_51.h"
void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub);
......@@ -37,5 +42,9 @@ void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv);
bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Curve25519_51_H_DEFINED
#endif
......@@ -21,13 +21,21 @@
* SOFTWARE.
*/
#ifndef __Hacl_Kremlib_H
#define __Hacl_Kremlib_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Kremlib_H
#define __Hacl_Kremlib_H
static inline uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b);
static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b);
static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b);
......@@ -47,5 +55,9 @@ static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Kremlib_H_DEFINED
#endif
......@@ -29,9 +29,9 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load_le(b);
Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(b);
Lib_IntVector_Intrinsics_vec128
b2 = Lib_IntVector_Intrinsics_vec128_load_le(b + (uint32_t)16U);
b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + (uint32_t)16U);
Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
Lib_IntVector_Intrinsics_vec128 hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
Lib_IntVector_Intrinsics_vec128
......@@ -803,9 +803,9 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load_le(block);
Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
Lib_IntVector_Intrinsics_vec128
b2 = Lib_IntVector_Intrinsics_vec128_load_le(block + (uint32_t)16U);
b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
Lib_IntVector_Intrinsics_vec128
hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
......@@ -1236,7 +1236,7 @@ Hacl_Poly1305_128_poly1305_update(
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last, rem * sizeof(last[0U]));
memcpy(tmp, last, rem * sizeof(uint8_t));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
......
......@@ -21,15 +21,19 @@
* SOFTWARE.
*/
#ifndef __Hacl_Poly1305_128_H
#define __Hacl_Poly1305_128_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "libintvector.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Poly1305_128_H
#define __Hacl_Poly1305_128_H
#include "Hacl_Kremlib.h"
void
......@@ -62,5 +66,9 @@ Hacl_Poly1305_128_poly1305_finish(
void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Poly1305_128_H_DEFINED
#endif
......@@ -29,9 +29,9 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
Lib_IntVector_Intrinsics_vec256 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load_le(b);
Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(b);
Lib_IntVector_Intrinsics_vec256
hi = Lib_IntVector_Intrinsics_vec256_load_le(b + (uint32_t)32U);
hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256
mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256 m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
......@@ -1272,9 +1272,9 @@ Hacl_Poly1305_256_poly1305_update(
Lib_IntVector_Intrinsics_vec256 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load_le(block);
Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
Lib_IntVector_Intrinsics_vec256
hi = Lib_IntVector_Intrinsics_vec256_load_le(block + (uint32_t)32U);
hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
Lib_IntVector_Intrinsics_vec256
mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec256
......@@ -1707,7 +1707,7 @@ Hacl_Poly1305_256_poly1305_update(
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last, rem * sizeof(last[0U]));
memcpy(tmp, last, rem * sizeof(uint8_t));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
......
......@@ -21,15 +21,19 @@
* SOFTWARE.
*/
#ifndef __Hacl_Poly1305_256_H
#define __Hacl_Poly1305_256_H
#if defined(__cplusplus)
extern "C" {
#endif
#include "libintvector.h"
#include "kremlin/internal/types.h"
#include "kremlin/lowstar_endianness.h"
#include <string.h>
#include <stdbool.h>
#ifndef __Hacl_Poly1305_256_H
#define __Hacl_Poly1305_256_H
#include "Hacl_Kremlib.h"
void
......@@ -62,5 +66,9 @@ Hacl_Poly1305_256_poly1305_finish(
void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
#if defined(__cplusplus)
}
#endif
#define __Hacl_Poly1305_256_H_DEFINED
#endif
......@@ -340,7 +340,7 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
uint8_t *last = text + nb * (uint32_t)16U;
uint64_t e[5U] = { 0U };
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last, rem * sizeof(last[0U]));
memcpy(tmp, last, rem * sizeof(uint8_t));
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
......
......@@ -21,14 +21,18 @@
* SOFTWARE.
*/