iPXE
aes.c
Go to the documentation of this file.
1/*
2 * Copyright (C) 2015 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25FILE_SECBOOT ( PERMITTED );
26
27/** @file
28 *
29 * AES algorithm
30 *
31 */
32
33#include <stdint.h>
34#include <string.h>
35#include <errno.h>
36#include <assert.h>
37#include <byteswap.h>
38#include <ipxe/rotate.h>
39#include <ipxe/crypto.h>
40#include <ipxe/ecb.h>
41#include <ipxe/cbc.h>
42#include <ipxe/gcm.h>
43#include <ipxe/aes.h>
44
45/** AES strides
46 *
47 * These are the strides (modulo 16) used to walk through the AES
48 * input state bytes in order of byte position after [Inv]ShiftRows.
49 */
51 /** Input stride for ShiftRows
52 *
53 * 0 4 8 c
54 * \ \ \
55 * 1 5 9 d
56 * \ \ \
57 * 2 6 a e
58 * \ \ \
59 * 3 7 b f
60 */
62 /** Input stride for InvShiftRows
63 *
64 * 0 4 8 c
65 * / / /
66 * 1 5 9 d
67 * / / /
68 * 2 6 a e
69 * / / /
70 * 3 7 b f
71 */
73};
74
75/** A single AES lookup table entry
76 *
77 * This represents the product (in the Galois field GF(2^8)) of an
78 * eight-byte vector multiplier with a single scalar multiplicand.
79 *
80 * The vector multipliers used for AES will be {1,1,1,3,2,1,1,3} for
81 * MixColumns and {1,9,13,11,14,9,13,11} for InvMixColumns. This
82 * allows for the result of multiplying any single column of the
83 * [Inv]MixColumns matrix by a scalar value to be obtained simply by
84 * extracting the relevant four-byte subset from the lookup table
85 * entry.
86 *
87 * For example, to find the result of multiplying the second column of
88 * the MixColumns matrix by the scalar value 0x80:
89 *
90 * MixColumns column[0]: { 2, 1, 1, 3 }
91 * MixColumns column[1]: { 3, 2, 1, 1 }
92 * MixColumns column[2]: { 1, 3, 2, 1 }
93 * MixColumns column[3]: { 1, 1, 3, 2 }
94 * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 }
95 * Scalar multiplicand: 0x80
96 * Lookup table entry: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b }
97 *
98 * The second column of the MixColumns matrix is {3,2,1,1}. The
99 * product of this column with the scalar value 0x80 can be obtained
100 * by extracting the relevant four-byte subset of the lookup table
101 * entry:
102 *
103 * MixColumns column[1]: { 3, 2, 1, 1 }
104 * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 }
105 * Lookup table entry: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b }
106 * Product: { 0x9b, 0x1b, 0x80, 0x80 }
107 *
108 * The column lookups require only seven bytes of the eight-byte
109 * entry: the remaining (first) byte is used to hold the scalar
110 * multiplicand itself (i.e. the first byte of the vector multiplier
111 * is always chosen to be 1).
112 */
114 /** Viewed as an array of bytes */
115 uint8_t byte[8];
116} __attribute__ (( packed ));
117
118/** An AES lookup table
119 *
120 * This represents the products (in the Galois field GF(2^8)) of a
121 * constant eight-byte vector multiplier with all possible 256 scalar
122 * multiplicands.
123 *
124 * The entries are indexed by the AES [Inv]SubBytes S-box output
125 * values (denoted S(N)). This allows for the result of multiplying
126 * any single column of the [Inv]MixColumns matrix by S(N) to be
127 * obtained simply by extracting the relevant four-byte subset from
128 * the Nth table entry. For example:
129 *
130 * Input byte (N): 0x3a
131 * SubBytes output S(N): 0x80
132 * MixColumns column[1]: { 3, 2, 1, 1 }
133 * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 }
134 * Table entry[0x3a]: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b }
135 * Product: { 0x9b, 0x1b, 0x80, 0x80 }
136 *
137 * Since the first byte of the eight-byte vector multiplier is always
138 * chosen to be 1, the value of S(N) may be lookup up by extracting
139 * the first byte of the Nth table entry.
140 */
141struct aes_table {
142 /** Table entries, indexed by S(N) */
144} __attribute__ (( aligned ( 8 ) ));
145
146/** AES MixColumns lookup table */
148
149/** AES InvMixColumns lookup table */
151
152/**
153 * Multiply [Inv]MixColumns matrix column by scalar multiplicand
154 *
155 * @v entry AES lookup table entry for scalar multiplicand
156 * @v column [Inv]MixColumns matrix column index
157 * @ret product Product of matrix column with scalar multiplicand
158 */
159static inline __attribute__ (( always_inline )) uint32_t
160aes_entry_column ( const union aes_table_entry *entry, unsigned int column ) {
161 const union {
163 uint32_t column;
164 } __attribute__ (( may_alias )) *product;
165
166 /* Locate relevant four-byte subset */
167 product = container_of ( &entry->byte[ 4 - column ],
168 typeof ( *product ), byte );
169
170 /* Extract this four-byte subset */
171 return product->column;
172}
173
174/**
175 * Multiply [Inv]MixColumns matrix column by S-boxed input byte
176 *
177 * @v table AES lookup table
178 * @v stride AES row shift stride
179 * @v in AES input state
180 * @v offset Output byte offset (after [Inv]ShiftRows)
181 * @ret product Product of matrix column with S(input byte)
182 *
183 * Note that the specified offset is not the offset of the input byte;
184 * it is the offset of the output byte which corresponds to the input
185 * byte. This output byte offset is used to calculate both the input
186 * byte offset and to select the appropriate matric column.
187 *
188 * With a compile-time constant offset, this function will optimise
189 * down to a single "movzbl" (to extract the input byte) and will
190 * generate a single x86 memory reference expression which can then be
191 * used directly within a single "xorl" instruction.
192 */
193static inline __attribute__ (( always_inline )) uint32_t
194aes_column ( const struct aes_table *table, size_t stride,
195 const union aes_matrix *in, size_t offset ) {
196 const union aes_table_entry *entry;
197 unsigned int byte;
198
199 /* Extract input byte corresponding to this output byte offset
200 * (i.e. perform [Inv]ShiftRows).
201 */
202 byte = in->byte[ ( stride * offset ) & 0xf ];
203
204 /* Locate lookup table entry for this input byte (i.e. perform
205 * [Inv]SubBytes).
206 */
207 entry = &table->entry[byte];
208
209 /* Multiply appropriate matrix column by this input byte
210 * (i.e. perform [Inv]MixColumns).
211 */
212 return aes_entry_column ( entry, ( offset & 0x3 ) );
213}
214
215/**
216 * Calculate intermediate round output column
217 *
218 * @v table AES lookup table
219 * @v stride AES row shift stride
220 * @v in AES input state
221 * @v key AES round key
222 * @v column Column index
223 * @ret output Output column value
224 */
225static inline __attribute__ (( always_inline )) uint32_t
226aes_output ( const struct aes_table *table, size_t stride,
227 const union aes_matrix *in, const union aes_matrix *key,
228 unsigned int column ) {
229 size_t offset = ( column * 4 );
230
231 /* Perform [Inv]ShiftRows, [Inv]SubBytes, [Inv]MixColumns, and
232 * AddRoundKey for this column. The loop is unrolled to allow
233 * for the required compile-time constant optimisations.
234 */
235 return ( aes_column ( table, stride, in, ( offset + 0 ) ) ^
236 aes_column ( table, stride, in, ( offset + 1 ) ) ^
237 aes_column ( table, stride, in, ( offset + 2 ) ) ^
238 aes_column ( table, stride, in, ( offset + 3 ) ) ^
239 key->column[column] );
240}
241
242/**
243 * Perform a single intermediate round
244 *
245 * @v table AES lookup table
246 * @v stride AES row shift stride
247 * @v in AES input state
248 * @v out AES output state
249 * @v key AES round key
250 */
251static inline __attribute__ (( always_inline )) void
252aes_round ( const struct aes_table *table, size_t stride,
253 const union aes_matrix *in, union aes_matrix *out,
254 const union aes_matrix *key ) {
255
256 /* Perform [Inv]ShiftRows, [Inv]SubBytes, [Inv]MixColumns, and
257 * AddRoundKey for all columns. The loop is unrolled to allow
258 * for the required compile-time constant optimisations.
259 */
260 out->column[0] = aes_output ( table, stride, in, key, 0 );
261 out->column[1] = aes_output ( table, stride, in, key, 1 );
262 out->column[2] = aes_output ( table, stride, in, key, 2 );
263 out->column[3] = aes_output ( table, stride, in, key, 3 );
264}
265
266/**
267 * Perform encryption intermediate rounds
268 *
269 * @v in AES input state
270 * @v out AES output state
271 * @v key Round keys
272 * @v rounds Number of rounds (must be odd)
273 *
274 * This function is deliberately marked as non-inlinable to ensure
275 * maximal availability of registers for GCC's register allocator,
276 * which has a tendency to otherwise spill performance-critical
277 * registers to the stack.
278 */
279static __attribute__ (( noinline )) void
281 const union aes_matrix *key, unsigned int rounds ) {
282 union aes_matrix *tmp;
283
284 /* Perform intermediate rounds */
285 do {
286 /* Perform one intermediate round */
288 in, out, key++ );
289
290 /* Swap input and output states for next round */
291 tmp = in;
292 in = out;
293 out = tmp;
294
295 } while ( --rounds );
296}
297
298/**
299 * Perform decryption intermediate rounds
300 *
301 * @v in AES input state
302 * @v out AES output state
303 * @v key Round keys
304 * @v rounds Number of rounds (must be odd)
305 *
306 * As with aes_encrypt_rounds(), this function is deliberately marked
307 * as non-inlinable.
308 *
309 * This function could potentially use the same binary code as is used
310 * for encryption. To compensate for the difference between ShiftRows
311 * and InvShiftRows, half of the input byte offsets would have to be
312 * modifiable at runtime (half by an offset of +4/-4, half by an
313 * offset of -4/+4 for ShiftRows/InvShiftRows). This can be
314 * accomplished in x86 assembly within the number of available
315 * registers, but GCC's register allocator struggles to do so,
316 * resulting in a significant performance decrease due to registers
317 * being spilled to the stack. We therefore use two separate but very
318 * similar binary functions based on the same C source.
319 */
320static __attribute__ (( noinline )) void
322 const union aes_matrix *key, unsigned int rounds ) {
323 union aes_matrix *tmp;
324
325 /* Perform intermediate rounds */
326 do {
327 /* Perform one intermediate round */
329 in, out, key++ );
330
331 /* Swap input and output states for next round */
332 tmp = in;
333 in = out;
334 out = tmp;
335
336 } while ( --rounds );
337}
338
339/**
340 * Perform standalone AddRoundKey
341 *
342 * @v state AES state
343 * @v key AES round key
344 */
345static inline __attribute__ (( always_inline )) void
347
348 state->column[0] ^= key->column[0];
349 state->column[1] ^= key->column[1];
350 state->column[2] ^= key->column[2];
351 state->column[3] ^= key->column[3];
352}
353
354/**
355 * Perform final round
356 *
357 * @v table AES lookup table
358 * @v stride AES row shift stride
359 * @v in AES input state
360 * @v out AES output state
361 * @v key AES round key
362 */
363static void aes_final ( const struct aes_table *table, size_t stride,
364 const union aes_matrix *in, union aes_matrix *out,
365 const union aes_matrix *key ) {
366 const union aes_table_entry *entry;
367 unsigned int byte;
368 size_t out_offset;
369 size_t in_offset;
370
371 /* Perform [Inv]ShiftRows and [Inv]SubBytes */
372 for ( out_offset = 0, in_offset = 0 ; out_offset < 16 ;
373 out_offset++, in_offset = ( ( in_offset + stride ) & 0xf ) ) {
374
375 /* Extract input byte (i.e. perform [Inv]ShiftRows) */
376 byte = in->byte[in_offset];
377
378 /* Locate lookup table entry for this input byte
379 * (i.e. perform [Inv]SubBytes).
380 */
381 entry = &table->entry[byte];
382
383 /* Store output byte */
384 out->byte[out_offset] = entry->byte[0];
385 }
386
387 /* Perform AddRoundKey */
389}
390
391/**
392 * Encrypt data
393 *
394 * @v ctx Context
395 * @v src Data to encrypt
396 * @v dst Buffer for encrypted data
397 * @v len Length of data
398 */
399static void aes_encrypt ( void *ctx, const void *src, void *dst, size_t len ) {
400 struct aes_context *aes = ctx;
401 union aes_matrix buffer[2];
402 union aes_matrix *in = &buffer[0];
403 union aes_matrix *out = &buffer[1];
404 unsigned int rounds = aes->rounds;
405
406 /* Sanity check */
407 assert ( len == sizeof ( *in ) );
408
409 /* Initialise input state */
410 memcpy ( in, src, sizeof ( *in ) );
411
412 /* Perform initial round (AddRoundKey) */
413 aes_addroundkey ( in, &aes->encrypt.key[0] );
414
415 /* Perform intermediate rounds (ShiftRows, SubBytes,
416 * MixColumns, AddRoundKey).
417 */
418 aes_encrypt_rounds ( in, out, &aes->encrypt.key[1], ( rounds - 2 ) );
419 in = out;
420
421 /* Perform final round (ShiftRows, SubBytes, AddRoundKey) */
422 out = dst;
424 &aes->encrypt.key[ rounds - 1 ] );
425}
426
427/**
428 * Decrypt data
429 *
430 * @v ctx Context
431 * @v src Data to decrypt
432 * @v dst Buffer for decrypted data
433 * @v len Length of data
434 */
435static void aes_decrypt ( void *ctx, const void *src, void *dst, size_t len ) {
436 struct aes_context *aes = ctx;
437 union aes_matrix buffer[2];
438 union aes_matrix *in = &buffer[0];
439 union aes_matrix *out = &buffer[1];
440 unsigned int rounds = aes->rounds;
441
442 /* Sanity check */
443 assert ( len == sizeof ( *in ) );
444
445 /* Initialise input state */
446 memcpy ( in, src, sizeof ( *in ) );
447
448 /* Perform initial round (AddRoundKey) */
449 aes_addroundkey ( in, &aes->decrypt.key[0] );
450
451 /* Perform intermediate rounds (InvShiftRows, InvSubBytes,
452 * InvMixColumns, AddRoundKey).
453 */
454 aes_decrypt_rounds ( in, out, &aes->decrypt.key[1], ( rounds - 2 ) );
455 in = out;
456
457 /* Perform final round (InvShiftRows, InvSubBytes, AddRoundKey) */
458 out = dst;
460 &aes->decrypt.key[ rounds - 1 ] );
461}
462
463/**
464 * Multiply a polynomial by (x) modulo (x^8 + x^4 + x^3 + x^2 + 1) in GF(2^8)
465 *
466 * @v poly Polynomial to be multiplied
467 * @ret result Result
468 */
469static __attribute__ (( const )) unsigned int aes_double ( unsigned int poly ) {
470
471 /* Multiply polynomial by (x), placing the resulting x^8
472 * coefficient in the LSB (i.e. rotate byte left by one).
473 */
474 poly = rol8 ( poly, 1 );
475
476 /* If coefficient of x^8 (in LSB) is non-zero, then reduce by
477 * subtracting (x^8 + x^4 + x^3 + x^2 + 1) in GF(2^8).
478 */
479 if ( poly & 0x01 ) {
480 poly ^= 0x01; /* Subtract x^8 (currently in LSB) */
481 poly ^= 0x1b; /* Subtract (x^4 + x^3 + x^2 + 1) */
482 }
483
484 return poly;
485}
486
487/**
488 * Fill in MixColumns lookup table entry
489 *
490 * @v entry AES lookup table entry for scalar multiplicand
491 *
492 * The MixColumns lookup table vector multiplier is {1,1,1,3,2,1,1,3}.
493 */
494static void aes_mixcolumns_entry ( union aes_table_entry *entry ) {
495 unsigned int scalar_x_1;
496 unsigned int scalar_x;
497 unsigned int scalar;
498
499 /* Retrieve scalar multiplicand */
500 scalar = entry->byte[0];
501 entry->byte[1] = scalar;
502 entry->byte[2] = scalar;
503 entry->byte[5] = scalar;
504 entry->byte[6] = scalar;
505
506 /* Calculate scalar multiplied by (x) */
507 scalar_x = aes_double ( scalar );
508 entry->byte[4] = scalar_x;
509
510 /* Calculate scalar multiplied by (x + 1) */
511 scalar_x_1 = ( scalar_x ^ scalar );
512 entry->byte[3] = scalar_x_1;
513 entry->byte[7] = scalar_x_1;
514}
515
516/**
517 * Fill in InvMixColumns lookup table entry
518 *
519 * @v entry AES lookup table entry for scalar multiplicand
520 *
521 * The InvMixColumns lookup table vector multiplier is {1,9,13,11,14,9,13,11}.
522 */
523static void aes_invmixcolumns_entry ( union aes_table_entry *entry ) {
524 unsigned int scalar_x3_x2_x;
525 unsigned int scalar_x3_x2_1;
526 unsigned int scalar_x3_x2;
527 unsigned int scalar_x3_x_1;
528 unsigned int scalar_x3_1;
529 unsigned int scalar_x3;
530 unsigned int scalar_x2;
531 unsigned int scalar_x;
532 unsigned int scalar;
533
534 /* Retrieve scalar multiplicand */
535 scalar = entry->byte[0];
536
537 /* Calculate scalar multiplied by (x) */
538 scalar_x = aes_double ( scalar );
539
540 /* Calculate scalar multiplied by (x^2) */
541 scalar_x2 = aes_double ( scalar_x );
542
543 /* Calculate scalar multiplied by (x^3) */
544 scalar_x3 = aes_double ( scalar_x2 );
545
546 /* Calculate scalar multiplied by (x^3 + 1) */
547 scalar_x3_1 = ( scalar_x3 ^ scalar );
548 entry->byte[1] = scalar_x3_1;
549 entry->byte[5] = scalar_x3_1;
550
551 /* Calculate scalar multiplied by (x^3 + x + 1) */
552 scalar_x3_x_1 = ( scalar_x3_1 ^ scalar_x );
553 entry->byte[3] = scalar_x3_x_1;
554 entry->byte[7] = scalar_x3_x_1;
555
556 /* Calculate scalar multiplied by (x^3 + x^2) */
557 scalar_x3_x2 = ( scalar_x3 ^ scalar_x2 );
558
559 /* Calculate scalar multiplied by (x^3 + x^2 + 1) */
560 scalar_x3_x2_1 = ( scalar_x3_x2 ^ scalar );
561 entry->byte[2] = scalar_x3_x2_1;
562 entry->byte[6] = scalar_x3_x2_1;
563
564 /* Calculate scalar multiplied by (x^3 + x^2 + x) */
565 scalar_x3_x2_x = ( scalar_x3_x2 ^ scalar_x );
566 entry->byte[4] = scalar_x3_x2_x;
567}
568
569/**
570 * Generate AES lookup tables
571 *
572 */
573static void aes_generate ( void ) {
574 union aes_table_entry *entry;
575 union aes_table_entry *inventry;
576 unsigned int poly = 0x01;
577 unsigned int invpoly = 0x01;
578 unsigned int transformed;
579 unsigned int i;
580
581 /* Iterate over non-zero values of GF(2^8) using generator (x + 1) */
582 do {
583
584 /* Multiply polynomial by (x + 1) */
585 poly ^= aes_double ( poly );
586
587 /* Divide inverse polynomial by (x + 1). This code
588 * fragment is taken directly from the Wikipedia page
589 * on the Rijndael S-box. An explanation of why it
590 * works would be greatly appreciated.
591 */
592 invpoly ^= ( invpoly << 1 );
593 invpoly ^= ( invpoly << 2 );
594 invpoly ^= ( invpoly << 4 );
595 if ( invpoly & 0x80 )
596 invpoly ^= 0x09;
597 invpoly &= 0xff;
598
599 /* Apply affine transformation */
600 transformed = ( 0x63 ^ invpoly ^ rol8 ( invpoly, 1 ) ^
601 rol8 ( invpoly, 2 ) ^ rol8 ( invpoly, 3 ) ^
602 rol8 ( invpoly, 4 ) );
603
604 /* Populate S-box (within MixColumns lookup table) */
605 aes_mixcolumns.entry[poly].byte[0] = transformed;
606
607 } while ( poly != 0x01 );
608
609 /* Populate zeroth S-box entry (which has no inverse) */
610 aes_mixcolumns.entry[0].byte[0] = 0x63;
611
612 /* Fill in MixColumns and InvMixColumns lookup tables */
613 for ( i = 0 ; i < 256 ; i++ ) {
614
615 /* Fill in MixColumns lookup table entry */
616 entry = &aes_mixcolumns.entry[i];
617 aes_mixcolumns_entry ( entry );
618
619 /* Populate inverse S-box (within InvMixColumns lookup table) */
620 inventry = &aes_invmixcolumns.entry[ entry->byte[0] ];
621 inventry->byte[0] = i;
622
623 /* Fill in InvMixColumns lookup table entry */
624 aes_invmixcolumns_entry ( inventry );
625 }
626}
627
628/**
629 * Rotate key column
630 *
631 * @v column Key column
632 * @ret column Updated key column
633 */
634static inline __attribute__ (( always_inline )) uint32_t
636
637 return ( ( __BYTE_ORDER == __LITTLE_ENDIAN ) ?
638 ror32 ( column, 8 ) : rol32 ( column, 8 ) );
639}
640
641/**
642 * Apply S-box to key column
643 *
644 * @v column Key column
645 * @ret column Updated key column
646 */
647static uint32_t aes_key_sbox ( uint32_t column ) {
648 unsigned int i;
650
651 for ( i = 0 ; i < 4 ; i++ ) {
652 byte = ( column & 0xff );
653 byte = aes_mixcolumns.entry[byte].byte[0];
654 column = ( ( column & ~0xff ) | byte );
655 column = rol32 ( column, 8 );
656 }
657 return column;
658}
659
660/**
661 * Apply schedule round constant to key column
662 *
663 * @v column Key column
664 * @v rcon Round constant
665 * @ret column Updated key column
666 */
667static inline __attribute__ (( always_inline )) uint32_t
668aes_key_rcon ( uint32_t column, unsigned int rcon ) {
669
670 return ( ( __BYTE_ORDER == __LITTLE_ENDIAN ) ?
671 ( column ^ rcon ) : ( column ^ ( rcon << 24 ) ) );
672}
673
674/**
675 * Set key
676 *
677 * @v ctx Context
678 * @v key Key
679 * @v keylen Key length
680 * @ret rc Return status code
681 */
682static int aes_setkey ( void *ctx, const void *key, size_t keylen ) {
683 struct aes_context *aes = ctx;
684 union aes_matrix *enc;
685 union aes_matrix *dec;
686 union aes_matrix temp;
687 union aes_matrix zero;
688 unsigned int rcon = 0x01;
689 unsigned int rounds;
690 size_t offset = 0;
691 uint32_t *prev;
692 uint32_t *next;
693 uint32_t *end;
695
696 /* Generate lookup tables, if not already done */
697 if ( ! aes_mixcolumns.entry[0].byte[0] )
698 aes_generate();
699
700 /* Validate key length and calculate number of intermediate rounds */
701 switch ( keylen ) {
702 case ( 128 / 8 ) :
703 rounds = 11;
704 break;
705 case ( 192 / 8 ) :
706 rounds = 13;
707 break;
708 case ( 256 / 8 ) :
709 rounds = 15;
710 break;
711 default:
712 DBGC ( aes, "AES %p unsupported key length (%zd bits)\n",
713 aes, ( keylen * 8 ) );
714 return -EINVAL;
715 }
716 aes->rounds = rounds;
717 enc = aes->encrypt.key;
718 end = enc[rounds].column;
719
720 /* Copy raw key */
721 memcpy ( enc, key, keylen );
722 prev = enc->column;
723 next = ( ( ( void * ) prev ) + keylen );
724 tmp = next[-1];
725
726 /* Construct expanded key */
727 while ( next < end ) {
728
729 /* If this is the first column of an expanded key
730 * block, or the middle column of an AES-256 key
731 * block, then apply the S-box.
732 */
733 if ( ( offset == 0 ) || ( ( offset | keylen ) == 48 ) )
734 tmp = aes_key_sbox ( tmp );
735
736 /* If this is the first column of an expanded key
737 * block then rotate and apply the round constant.
738 */
739 if ( offset == 0 ) {
740 tmp = aes_key_rotate ( tmp );
741 tmp = aes_key_rcon ( tmp, rcon );
742 rcon = aes_double ( rcon );
743 }
744
745 /* XOR with previous key column */
746 tmp ^= *prev;
747
748 /* Store column */
749 *next = tmp;
750
751 /* Move to next column */
752 offset += sizeof ( *next );
753 if ( offset == keylen )
754 offset = 0;
755 next++;
756 prev++;
757 }
758 DBGC2 ( aes, "AES %p expanded %zd-bit key:\n", aes, ( keylen * 8 ) );
759 DBGC2_HDA ( aes, 0, &aes->encrypt, ( rounds * sizeof ( *enc ) ) );
760
761 /* Convert to decryption key */
762 memset ( &zero, 0, sizeof ( zero ) );
763 dec = &aes->decrypt.key[ rounds - 1 ];
764 memcpy ( dec--, enc++, sizeof ( *dec ) );
765 while ( dec > aes->decrypt.key ) {
766 /* Perform InvMixColumns (by reusing the encryption
767 * final-round code to perform ShiftRows+SubBytes and
768 * reusing the decryption intermediate-round code to
769 * perform InvShiftRows+InvSubBytes+InvMixColumns, all
770 * with a zero encryption key).
771 */
773 enc++, &temp, &zero );
774 aes_decrypt_rounds ( &temp, dec--, &zero, 1 );
775 }
776 memcpy ( dec--, enc++, sizeof ( *dec ) );
777 DBGC2 ( aes, "AES %p inverted %zd-bit key:\n", aes, ( keylen * 8 ) );
778 DBGC2_HDA ( aes, 0, &aes->decrypt, ( rounds * sizeof ( *dec ) ) );
779
780 return 0;
781}
782
783/** Basic AES algorithm */
785 .name = "aes",
786 .ctxsize = sizeof ( struct aes_context ),
787 .blocksize = AES_BLOCKSIZE,
788 .alignsize = 0,
789 .authsize = 0,
790 .setkey = aes_setkey,
791 .setiv = cipher_null_setiv,
792 .encrypt = aes_encrypt,
793 .decrypt = aes_decrypt,
794 .auth = cipher_null_auth,
795};
796
797/* AES in Electronic Codebook mode */
800
801/* AES in Cipher Block Chaining mode */
804
805/* AES in Galois/Counter mode */
union @162305117151260234136356364136041353210355154177 key
Sense key.
Definition scsi.h:3
struct golan_eq_context ctx
Definition CIB_PRM.h:0
__be32 in[4]
Definition CIB_PRM.h:7
__be32 out[4]
Definition CIB_PRM.h:8
typeof(acpi_finder=acpi_find)
ACPI table finder.
Definition acpi.c:48
static uint32_t aes_key_sbox(uint32_t column)
Apply S-box to key column.
Definition aes.c:647
static struct aes_table aes_mixcolumns
AES MixColumns lookup table.
Definition aes.c:147
static void aes_decrypt_rounds(union aes_matrix *in, union aes_matrix *out, const union aes_matrix *key, unsigned int rounds)
Perform decryption intermediate rounds.
Definition aes.c:321
struct cipher_algorithm aes_algorithm
Basic AES algorithm.
Definition aes.c:784
static void aes_round(const struct aes_table *table, size_t stride, const union aes_matrix *in, union aes_matrix *out, const union aes_matrix *key)
Perform a single intermediate round.
Definition aes.c:252
static void aes_generate(void)
Generate AES lookup tables.
Definition aes.c:573
aes_stride
AES strides.
Definition aes.c:50
@ AES_STRIDE_SHIFTROWS
Input stride for ShiftRows.
Definition aes.c:61
@ AES_STRIDE_INVSHIFTROWS
Input stride for InvShiftRows.
Definition aes.c:72
static void aes_final(const struct aes_table *table, size_t stride, const union aes_matrix *in, union aes_matrix *out, const union aes_matrix *key)
Perform final round.
Definition aes.c:363
static void aes_addroundkey(union aes_matrix *state, const union aes_matrix *key)
Perform standalone AddRoundKey.
Definition aes.c:346
static uint32_t aes_key_rcon(uint32_t column, unsigned int rcon)
Apply schedule round constant to key column.
Definition aes.c:668
static uint32_t aes_entry_column(const union aes_table_entry *entry, unsigned int column)
Multiply [Inv]MixColumns matrix column by scalar multiplicand.
Definition aes.c:160
static void aes_decrypt(void *ctx, const void *src, void *dst, size_t len)
Decrypt data.
Definition aes.c:435
static uint32_t aes_output(const struct aes_table *table, size_t stride, const union aes_matrix *in, const union aes_matrix *key, unsigned int column)
Calculate intermediate round output column.
Definition aes.c:226
static int aes_setkey(void *ctx, const void *key, size_t keylen)
Set key.
Definition aes.c:682
static unsigned int aes_double(unsigned int poly)
Multiply a polynomial by (x) modulo (x^8 + x^4 + x^3 + x^2 + 1) in GF(2^8)
Definition aes.c:469
static void aes_encrypt_rounds(union aes_matrix *in, union aes_matrix *out, const union aes_matrix *key, unsigned int rounds)
Perform encryption intermediate rounds.
Definition aes.c:280
static uint32_t aes_column(const struct aes_table *table, size_t stride, const union aes_matrix *in, size_t offset)
Multiply [Inv]MixColumns matrix column by S-boxed input byte.
Definition aes.c:194
static void aes_mixcolumns_entry(union aes_table_entry *entry)
Fill in MixColumns lookup table entry.
Definition aes.c:494
static void aes_encrypt(void *ctx, const void *src, void *dst, size_t len)
Encrypt data.
Definition aes.c:399
static struct aes_table aes_invmixcolumns
AES InvMixColumns lookup table.
Definition aes.c:150
static void aes_invmixcolumns_entry(union aes_table_entry *entry)
Fill in InvMixColumns lookup table entry.
Definition aes.c:523
static uint32_t aes_key_rotate(uint32_t column)
Rotate key column.
Definition aes.c:635
AES algorithm.
struct cipher_algorithm aes_ecb_algorithm
struct cipher_algorithm aes_cbc_algorithm
struct cipher_algorithm aes_gcm_algorithm
#define AES_BLOCKSIZE
AES blocksize.
Definition aes.h:16
unsigned int uint32_t
Definition stdint.h:12
unsigned char uint8_t
Definition stdint.h:10
#define __BYTE_ORDER
Definition endian.h:7
static const void * src
Definition string.h:48
Assertions.
#define assert(condition)
Assert a condition at run-time.
Definition assert.h:50
uint16_t offset
Offset to command line.
Definition bzimage.h:3
Cipher-block chaining.
#define CBC_CIPHER(_cbc_name, _cbc_cipher, _raw_cipher, _raw_context, _blocksize)
Create a cipher-block chaining mode of behaviour of an existing cipher.
Definition cbc.h:65
void cipher_null_setiv(void *ctx __unused, const void *iv __unused, size_t ivlen __unused)
Definition crypto_null.c:65
void cipher_null_auth(void *ctx __unused, void *auth __unused)
Definition crypto_null.c:80
uint32_t next
Next descriptor address.
Definition dwmac.h:11
ring len
Length.
Definition dwmac.h:226
Electronic codebook (ECB)
#define ECB_CIPHER(_ecb_name, _ecb_cipher, _raw_cipher, _raw_context, _blocksize)
Create a cipher-block chaining mode of behaviour of an existing cipher.
Definition ecb.h:29
struct ena_llq_option stride
Descriptor strides.
Definition ena.h:11
Error codes.
uint8_t state
State.
Definition eth_slow.h:36
Galois/Counter Mode (GCM)
#define GCM_CIPHER(_gcm_name, _gcm_cipher, _raw_cipher, _raw_context, _blocksize)
Create a GCM mode of behaviour of an existing cipher.
Definition gcm.h:81
#define DBGC2(...)
Definition compiler.h:522
#define DBGC2_HDA(...)
Definition compiler.h:523
#define DBGC(...)
Definition compiler.h:505
uint32_t buffer
Buffer index (or NETVSC_RNDIS_NO_BUFFER)
Definition netvsc.h:5
#define FILE_LICENCE(_licence)
Declare a particular licence as applying to a file.
Definition compiler.h:896
#define EINVAL
Invalid argument.
Definition errno.h:429
#define FILE_SECBOOT(_status)
Declare a file's UEFI Secure Boot permission status.
Definition compiler.h:926
#define __attribute__(x)
Definition compiler.h:10
#define __LITTLE_ENDIAN
Constant representing little-endian byte order.
Definition endian.h:13
Cryptographic API.
uint8_t product
Product string.
Definition smbios.h:5
String functions.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
void * memset(void *dest, int character, size_t len) __nonnull
unsigned long tmp
Definition linux_pci.h:65
uint32_t end
Ending offset.
Definition netvsc.h:7
Bit operations.
unsigned char byte
Definition smc9000.h:38
#define container_of(ptr, type, field)
Get containing structure.
Definition stddef.h:36
AES context.
Definition aes.h:36
struct aes_round_keys decrypt
Decryption keys.
Definition aes.h:40
unsigned int rounds
Number of rounds.
Definition aes.h:42
struct aes_round_keys encrypt
Encryption keys.
Definition aes.h:38
union aes_matrix key[AES_MAX_ROUNDS]
Round keys.
Definition aes.h:32
An AES lookup table.
Definition aes.c:141
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition aes.c:143
A cipher algorithm.
Definition crypto.h:51
AES matrix.
Definition aes.h:22
uint32_t column[4]
Viewed as an array of four-byte columns.
Definition aes.h:26
A single AES lookup table entry.
Definition aes.c:113
uint8_t byte[8]
Viewed as an array of bytes.
Definition aes.c:115
static u32 ror32(u32 v, int bits)
Rotate 32-bit value right.
Definition wpa_tkip.c:162
static u32 rol32(u32 v, int bits)
Rotate 32-bit value left.
Definition wpa_tkip.c:174