Lines Matching defs:base_crng
76 * crng_init is protected by base_crng->lock, and only increases
210 } base_crng = {
211 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
263 * We copy the new key into the base_crng, overwriting the old one,
268 spin_lock_irqsave(&base_crng.lock, flags);
269 memcpy(base_crng.key, key, sizeof(base_crng.key));
270 next_gen = base_crng.generation + 1;
273 WRITE_ONCE(base_crng.generation, next_gen);
276 spin_unlock_irqrestore(&base_crng.lock, flags);
328 * ready, we do fast key erasure with the base_crng directly, extracting
334 spin_lock_irqsave(&base_crng.lock, flags);
338 extract_entropy(base_crng.key, sizeof(base_crng.key));
339 crng_fast_key_erasure(base_crng.key, chacha_state,
342 spin_unlock_irqrestore(&base_crng.lock, flags);
351 * If our per-cpu crng is older than the base_crng, then it means
352 * somebody reseeded the base_crng. In that case, we do fast key
353 * erasure on the base_crng, and use its output as the new key
354 * for our per-cpu crng. This brings us up to date with base_crng.
356 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
357 spin_lock(&base_crng.lock);
358 crng_fast_key_erasure(base_crng.key, chacha_state,
360 crng->generation = base_crng.generation;
361 spin_unlock(&base_crng.lock);
514 next_gen = READ_ONCE(base_crng.generation); \
720 crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
731 spin_lock_irqsave(&base_crng.lock, flags);
734 extract_entropy(base_crng.key, sizeof(base_crng.key));
737 spin_unlock_irqrestore(&base_crng.lock, flags);