diff options
Diffstat (limited to 'libs/libsodium/src/crypto_pwhash/argon2/argon2-fill-block-ref.c')
-rw-r--r-- | libs/libsodium/src/crypto_pwhash/argon2/argon2-fill-block-ref.c | 233 |
1 files changed, 233 insertions, 0 deletions
diff --git a/libs/libsodium/src/crypto_pwhash/argon2/argon2-fill-block-ref.c b/libs/libsodium/src/crypto_pwhash/argon2/argon2-fill-block-ref.c new file mode 100644 index 0000000000..75e8d8f5ea --- /dev/null +++ b/libs/libsodium/src/crypto_pwhash/argon2/argon2-fill-block-ref.c @@ -0,0 +1,233 @@ +/* + * Argon2 source code package + * + * Written by Daniel Dinu and Dmitry Khovratovich, 2015 + * + * This work is licensed under a Creative Commons CC0 1.0 License/Waiver. + * + * You should have received a copy of the CC0 Public Domain Dedication along + * with + * this software. If not, see + * <http://creativecommons.org/publicdomain/zero/1.0/>. + */ + +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#include "argon2-core.h" +#include "argon2.h" +#include "blamka-round-ref.h" +#include "private/common.h" + +static void +fill_block(const block *prev_block, const block *ref_block, block *next_block) +{ + block blockR, block_tmp; + unsigned i; + + copy_block(&blockR, ref_block); + xor_block(&blockR, prev_block); + copy_block(&block_tmp, &blockR); + /* Now blockR = ref_block + prev_block and bloc_tmp = ref_block + prev_block + Apply Blake2 on columns of 64-bit words: (0,1,...,15), then + (16,17,..31)... finally (112,113,...127) */ + for (i = 0; i < 8; ++i) { + BLAKE2_ROUND_NOMSG( + blockR.v[16 * i], blockR.v[16 * i + 1], blockR.v[16 * i + 2], + blockR.v[16 * i + 3], blockR.v[16 * i + 4], blockR.v[16 * i + 5], + blockR.v[16 * i + 6], blockR.v[16 * i + 7], blockR.v[16 * i + 8], + blockR.v[16 * i + 9], blockR.v[16 * i + 10], blockR.v[16 * i + 11], + blockR.v[16 * i + 12], blockR.v[16 * i + 13], blockR.v[16 * i + 14], + blockR.v[16 * i + 15]); + } + + /* Apply Blake2 on rows of 64-bit words: (0,1,16,17,...112,113), then + (2,3,18,19,...,114,115).. finally (14,15,30,31,...,126,127) */ + for (i = 0; i < 8; i++) { + BLAKE2_ROUND_NOMSG( + blockR.v[2 * i], blockR.v[2 * i + 1], blockR.v[2 * i + 16], + blockR.v[2 * i + 17], blockR.v[2 * i + 32], blockR.v[2 * i + 33], + blockR.v[2 * i + 48], blockR.v[2 * i + 49], blockR.v[2 * i + 64], + blockR.v[2 * i + 65], blockR.v[2 * i + 80], blockR.v[2 * i + 81], + blockR.v[2 * i + 96], blockR.v[2 * i + 97], blockR.v[2 * i + 112], + blockR.v[2 * i + 113]); + } + + copy_block(next_block, &block_tmp); + xor_block(next_block, &blockR); +} + +static void +fill_block_with_xor(const block *prev_block, const block *ref_block, + block *next_block) +{ + block blockR, block_tmp; + unsigned i; + + copy_block(&blockR, ref_block); + xor_block(&blockR, prev_block); + copy_block(&block_tmp, &blockR); + xor_block(&block_tmp, + next_block); /* Saving the next block contents for XOR over */ + /* Now blockR = ref_block + prev_block and bloc_tmp = ref_block + prev_block + * + next_block */ + /* Apply Blake2 on columns of 64-bit words: (0,1,...,15) , then + (16,17,..31)... finally (112,113,...127) */ + for (i = 0; i < 8; ++i) { + BLAKE2_ROUND_NOMSG( + blockR.v[16 * i], blockR.v[16 * i + 1], blockR.v[16 * i + 2], + blockR.v[16 * i + 3], blockR.v[16 * i + 4], blockR.v[16 * i + 5], + blockR.v[16 * i + 6], blockR.v[16 * i + 7], blockR.v[16 * i + 8], + blockR.v[16 * i + 9], blockR.v[16 * i + 10], blockR.v[16 * i + 11], + blockR.v[16 * i + 12], blockR.v[16 * i + 13], blockR.v[16 * i + 14], + blockR.v[16 * i + 15]); + } + + /* Apply Blake2 on rows of 64-bit words: (0,1,16,17,...112,113), then + (2,3,18,19,...,114,115).. finally (14,15,30,31,...,126,127) */ + for (i = 0; i < 8; i++) { + BLAKE2_ROUND_NOMSG( + blockR.v[2 * i], blockR.v[2 * i + 1], blockR.v[2 * i + 16], + blockR.v[2 * i + 17], blockR.v[2 * i + 32], blockR.v[2 * i + 33], + blockR.v[2 * i + 48], blockR.v[2 * i + 49], blockR.v[2 * i + 64], + blockR.v[2 * i + 65], blockR.v[2 * i + 80], blockR.v[2 * i + 81], + blockR.v[2 * i + 96], blockR.v[2 * i + 97], blockR.v[2 * i + 112], + blockR.v[2 * i + 113]); + } + + copy_block(next_block, &block_tmp); + xor_block(next_block, &blockR); +} + +/* + * Generate pseudo-random values to reference blocks in the segment and puts + * them into the array + * @param instance Pointer to the current instance + * @param position Pointer to the current position + * @param pseudo_rands Pointer to the array of 64-bit values + * @pre pseudo_rands must point to @a instance->segment_length allocated values + */ +static void +generate_addresses(const argon2_instance_t *instance, + const argon2_position_t *position, uint64_t *pseudo_rands) +{ + block zero_block, input_block, address_block, tmp_block; + uint32_t i; + + init_block_value(&zero_block, 0); + init_block_value(&input_block, 0); + + if (instance != NULL && position != NULL) { + input_block.v[0] = position->pass; + input_block.v[1] = position->lane; + input_block.v[2] = position->slice; + input_block.v[3] = instance->memory_blocks; + input_block.v[4] = instance->passes; + input_block.v[5] = instance->type; + + for (i = 0; i < instance->segment_length; ++i) { + if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) { + input_block.v[6]++; + init_block_value(&tmp_block, 0); + init_block_value(&address_block, 0); + fill_block_with_xor(&zero_block, &input_block, &tmp_block); + fill_block_with_xor(&zero_block, &tmp_block, &address_block); + } + + pseudo_rands[i] = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK]; + } + } +} + +void +fill_segment_ref(const argon2_instance_t *instance, argon2_position_t position) +{ + block *ref_block = NULL, *curr_block = NULL; + /* Pseudo-random values that determine the reference block position */ + uint64_t *pseudo_rands = NULL; + uint64_t pseudo_rand, ref_index, ref_lane; + uint32_t prev_offset, curr_offset; + uint32_t starting_index; + uint32_t i; + int data_independent_addressing = 1; + + if (instance == NULL) { + return; + } + + if (instance->type == Argon2_id && + (position.pass != 0 || position.slice >= ARGON2_SYNC_POINTS / 2)) { + data_independent_addressing = 0; + } + + pseudo_rands = instance->pseudo_rands; + + if (data_independent_addressing) { + generate_addresses(instance, &position, pseudo_rands); + } + + starting_index = 0; + + if ((0 == position.pass) && (0 == position.slice)) { + starting_index = 2; /* we have already generated the first two blocks */ + } + + /* Offset of the current block */ + curr_offset = position.lane * instance->lane_length + + position.slice * instance->segment_length + starting_index; + + if (0 == curr_offset % instance->lane_length) { + /* Last block in this lane */ + prev_offset = curr_offset + instance->lane_length - 1; + } else { + /* Previous block */ + prev_offset = curr_offset - 1; + } + + for (i = starting_index; i < instance->segment_length; + ++i, ++curr_offset, ++prev_offset) { + /*1.1 Rotating prev_offset if needed */ + if (curr_offset % instance->lane_length == 1) { + prev_offset = curr_offset - 1; + } + + /* 1.2 Computing the index of the reference block */ + /* 1.2.1 Taking pseudo-random value from the previous block */ + if (data_independent_addressing) { +#pragma warning(push) +#pragma warning(disable : 6385) + pseudo_rand = pseudo_rands[i]; +#pragma warning(pop) + } else { + pseudo_rand = instance->region->memory[prev_offset].v[0]; + } + + /* 1.2.2 Computing the lane of the reference block */ + ref_lane = ((pseudo_rand >> 32)) % instance->lanes; + + if ((position.pass == 0) && (position.slice == 0)) { + /* Can not reference other lanes yet */ + ref_lane = position.lane; + } + + /* 1.2.3 Computing the number of possible reference block within the + * lane. + */ + position.index = i; + ref_index = index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF, + ref_lane == position.lane); + + /* 2 Creating a new block */ + ref_block = instance->region->memory + + instance->lane_length * ref_lane + ref_index; + curr_block = instance->region->memory + curr_offset; + if (position.pass != 0) { + fill_block_with_xor(instance->region->memory + prev_offset, + ref_block, curr_block); + } else { + fill_block(instance->region->memory + prev_offset, ref_block, + curr_block); + } + } +} |