iPXE
aes.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2015 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 /** @file
27  *
28  * AES algorithm
29  *
30  */
31 
32 #include <stdint.h>
33 #include <string.h>
34 #include <errno.h>
35 #include <assert.h>
36 #include <byteswap.h>
37 #include <ipxe/rotate.h>
38 #include <ipxe/crypto.h>
39 #include <ipxe/ecb.h>
40 #include <ipxe/cbc.h>
41 #include <ipxe/gcm.h>
42 #include <ipxe/aes.h>
43 
44 /** AES strides
45  *
46  * These are the strides (modulo 16) used to walk through the AES
47  * input state bytes in order of byte position after [Inv]ShiftRows.
48  */
49 enum aes_stride {
50  /** Input stride for ShiftRows
51  *
52  * 0 4 8 c
53  * \ \ \
54  * 1 5 9 d
55  * \ \ \
56  * 2 6 a e
57  * \ \ \
58  * 3 7 b f
59  */
61  /** Input stride for InvShiftRows
62  *
63  * 0 4 8 c
64  * / / /
65  * 1 5 9 d
66  * / / /
67  * 2 6 a e
68  * / / /
69  * 3 7 b f
70  */
72 };
73 
74 /** A single AES lookup table entry
75  *
76  * This represents the product (in the Galois field GF(2^8)) of an
77  * eight-byte vector multiplier with a single scalar multiplicand.
78  *
79  * The vector multipliers used for AES will be {1,1,1,3,2,1,1,3} for
80  * MixColumns and {1,9,13,11,14,9,13,11} for InvMixColumns. This
81  * allows for the result of multiplying any single column of the
82  * [Inv]MixColumns matrix by a scalar value to be obtained simply by
83  * extracting the relevant four-byte subset from the lookup table
84  * entry.
85  *
86  * For example, to find the result of multiplying the second column of
87  * the MixColumns matrix by the scalar value 0x80:
88  *
89  * MixColumns column[0]: { 2, 1, 1, 3 }
90  * MixColumns column[1]: { 3, 2, 1, 1 }
91  * MixColumns column[2]: { 1, 3, 2, 1 }
92  * MixColumns column[3]: { 1, 1, 3, 2 }
93  * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 }
94  * Scalar multiplicand: 0x80
95  * Lookup table entry: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b }
96  *
97  * The second column of the MixColumns matrix is {3,2,1,1}. The
98  * product of this column with the scalar value 0x80 can be obtained
99  * by extracting the relevant four-byte subset of the lookup table
100  * entry:
101  *
102  * MixColumns column[1]: { 3, 2, 1, 1 }
103  * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 }
104  * Lookup table entry: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b }
105  * Product: { 0x9b, 0x1b, 0x80, 0x80 }
106  *
107  * The column lookups require only seven bytes of the eight-byte
108  * entry: the remaining (first) byte is used to hold the scalar
109  * multiplicand itself (i.e. the first byte of the vector multiplier
110  * is always chosen to be 1).
111  */
113  /** Viewed as an array of bytes */
114  uint8_t byte[8];
115 } __attribute__ (( packed ));
116 
117 /** An AES lookup table
118  *
119  * This represents the products (in the Galois field GF(2^8)) of a
120  * constant eight-byte vector multiplier with all possible 256 scalar
121  * multiplicands.
122  *
123  * The entries are indexed by the AES [Inv]SubBytes S-box output
124  * values (denoted S(N)). This allows for the result of multiplying
125  * any single column of the [Inv]MixColumns matrix by S(N) to be
126  * obtained simply by extracting the relevant four-byte subset from
127  * the Nth table entry. For example:
128  *
129  * Input byte (N): 0x3a
130  * SubBytes output S(N): 0x80
131  * MixColumns column[1]: { 3, 2, 1, 1 }
132  * Vector multiplier: { 1, 1, 1, 3, 2, 1, 1, 3 }
133  * Table entry[0x3a]: { 0x80, 0x80, 0x80, 0x9b, 0x1b, 0x80, 0x80, 0x9b }
134  * Product: { 0x9b, 0x1b, 0x80, 0x80 }
135  *
136  * Since the first byte of the eight-byte vector multiplier is always
137  * chosen to be 1, the value of S(N) may be lookup up by extracting
138  * the first byte of the Nth table entry.
139  */
140 struct aes_table {
141  /** Table entries, indexed by S(N) */
142  union aes_table_entry entry[256];
143 } __attribute__ (( aligned ( 8 ) ));
144 
145 /** AES MixColumns lookup table */
146 static struct aes_table aes_mixcolumns;
147 
148 /** AES InvMixColumns lookup table */
150 
151 /**
152  * Multiply [Inv]MixColumns matrix column by scalar multiplicand
153  *
154  * @v entry AES lookup table entry for scalar multiplicand
155  * @v column [Inv]MixColumns matrix column index
156  * @ret product Product of matrix column with scalar multiplicand
157  */
158 static inline __attribute__ (( always_inline )) uint32_t
159 aes_entry_column ( const union aes_table_entry *entry, unsigned int column ) {
160  const union {
161  uint8_t byte;
163  } __attribute__ (( may_alias )) *product;
164 
165  /* Locate relevant four-byte subset */
166  product = container_of ( &entry->byte[ 4 - column ],
167  typeof ( *product ), byte );
168 
169  /* Extract this four-byte subset */
170  return product->column;
171 }
172 
173 /**
174  * Multiply [Inv]MixColumns matrix column by S-boxed input byte
175  *
176  * @v table AES lookup table
177  * @v stride AES row shift stride
178  * @v in AES input state
179  * @v offset Output byte offset (after [Inv]ShiftRows)
180  * @ret product Product of matrix column with S(input byte)
181  *
182  * Note that the specified offset is not the offset of the input byte;
183  * it is the offset of the output byte which corresponds to the input
184  * byte. This output byte offset is used to calculate both the input
185  * byte offset and to select the appropriate matric column.
186  *
187  * With a compile-time constant offset, this function will optimise
188  * down to a single "movzbl" (to extract the input byte) and will
189  * generate a single x86 memory reference expression which can then be
190  * used directly within a single "xorl" instruction.
191  */
192 static inline __attribute__ (( always_inline )) uint32_t
193 aes_column ( const struct aes_table *table, size_t stride,
194  const union aes_matrix *in, size_t offset ) {
195  const union aes_table_entry *entry;
196  unsigned int byte;
197 
198  /* Extract input byte corresponding to this output byte offset
199  * (i.e. perform [Inv]ShiftRows).
200  */
201  byte = in->byte[ ( stride * offset ) & 0xf ];
202 
203  /* Locate lookup table entry for this input byte (i.e. perform
204  * [Inv]SubBytes).
205  */
206  entry = &table->entry[byte];
207 
208  /* Multiply appropriate matrix column by this input byte
209  * (i.e. perform [Inv]MixColumns).
210  */
211  return aes_entry_column ( entry, ( offset & 0x3 ) );
212 }
213 
214 /**
215  * Calculate intermediate round output column
216  *
217  * @v table AES lookup table
218  * @v stride AES row shift stride
219  * @v in AES input state
220  * @v key AES round key
221  * @v column Column index
222  * @ret output Output column value
223  */
224 static inline __attribute__ (( always_inline )) uint32_t
225 aes_output ( const struct aes_table *table, size_t stride,
226  const union aes_matrix *in, const union aes_matrix *key,
227  unsigned int column ) {
228  size_t offset = ( column * 4 );
229 
230  /* Perform [Inv]ShiftRows, [Inv]SubBytes, [Inv]MixColumns, and
231  * AddRoundKey for this column. The loop is unrolled to allow
232  * for the required compile-time constant optimisations.
233  */
234  return ( aes_column ( table, stride, in, ( offset + 0 ) ) ^
235  aes_column ( table, stride, in, ( offset + 1 ) ) ^
236  aes_column ( table, stride, in, ( offset + 2 ) ) ^
237  aes_column ( table, stride, in, ( offset + 3 ) ) ^
238  key->column[column] );
239 }
240 
241 /**
242  * Perform a single intermediate round
243  *
244  * @v table AES lookup table
245  * @v stride AES row shift stride
246  * @v in AES input state
247  * @v out AES output state
248  * @v key AES round key
249  */
250 static inline __attribute__ (( always_inline )) void
251 aes_round ( const struct aes_table *table, size_t stride,
252  const union aes_matrix *in, union aes_matrix *out,
253  const union aes_matrix *key ) {
254 
255  /* Perform [Inv]ShiftRows, [Inv]SubBytes, [Inv]MixColumns, and
256  * AddRoundKey for all columns. The loop is unrolled to allow
257  * for the required compile-time constant optimisations.
258  */
259  out->column[0] = aes_output ( table, stride, in, key, 0 );
260  out->column[1] = aes_output ( table, stride, in, key, 1 );
261  out->column[2] = aes_output ( table, stride, in, key, 2 );
262  out->column[3] = aes_output ( table, stride, in, key, 3 );
263 }
264 
265 /**
266  * Perform encryption intermediate rounds
267  *
268  * @v in AES input state
269  * @v out AES output state
270  * @v key Round keys
271  * @v rounds Number of rounds (must be odd)
272  *
273  * This function is deliberately marked as non-inlinable to ensure
274  * maximal availability of registers for GCC's register allocator,
275  * which has a tendency to otherwise spill performance-critical
276  * registers to the stack.
277  */
278 static __attribute__ (( noinline )) void
279 aes_encrypt_rounds ( union aes_matrix *in, union aes_matrix *out,
280  const union aes_matrix *key, unsigned int rounds ) {
281  union aes_matrix *tmp;
282 
283  /* Perform intermediate rounds */
284  do {
285  /* Perform one intermediate round */
286  aes_round ( &aes_mixcolumns, AES_STRIDE_SHIFTROWS,
287  in, out, key++ );
288 
289  /* Swap input and output states for next round */
290  tmp = in;
291  in = out;
292  out = tmp;
293 
294  } while ( --rounds );
295 }
296 
297 /**
298  * Perform decryption intermediate rounds
299  *
300  * @v in AES input state
301  * @v out AES output state
302  * @v key Round keys
303  * @v rounds Number of rounds (must be odd)
304  *
305  * As with aes_encrypt_rounds(), this function is deliberately marked
306  * as non-inlinable.
307  *
308  * This function could potentially use the same binary code as is used
309  * for encryption. To compensate for the difference between ShiftRows
310  * and InvShiftRows, half of the input byte offsets would have to be
311  * modifiable at runtime (half by an offset of +4/-4, half by an
312  * offset of -4/+4 for ShiftRows/InvShiftRows). This can be
313  * accomplished in x86 assembly within the number of available
314  * registers, but GCC's register allocator struggles to do so,
315  * resulting in a significant performance decrease due to registers
316  * being spilled to the stack. We therefore use two separate but very
317  * similar binary functions based on the same C source.
318  */
319 static __attribute__ (( noinline )) void
320 aes_decrypt_rounds ( union aes_matrix *in, union aes_matrix *out,
321  const union aes_matrix *key, unsigned int rounds ) {
322  union aes_matrix *tmp;
323 
324  /* Perform intermediate rounds */
325  do {
326  /* Perform one intermediate round */
328  in, out, key++ );
329 
330  /* Swap input and output states for next round */
331  tmp = in;
332  in = out;
333  out = tmp;
334 
335  } while ( --rounds );
336 }
337 
338 /**
339  * Perform standalone AddRoundKey
340  *
341  * @v state AES state
342  * @v key AES round key
343  */
344 static inline __attribute__ (( always_inline )) void
345 aes_addroundkey ( union aes_matrix *state, const union aes_matrix *key ) {
346 
347  state->column[0] ^= key->column[0];
348  state->column[1] ^= key->column[1];
349  state->column[2] ^= key->column[2];
350  state->column[3] ^= key->column[3];
351 }
352 
353 /**
354  * Perform final round
355  *
356  * @v table AES lookup table
357  * @v stride AES row shift stride
358  * @v in AES input state
359  * @v out AES output state
360  * @v key AES round key
361  */
362 static void aes_final ( const struct aes_table *table, size_t stride,
363  const union aes_matrix *in, union aes_matrix *out,
364  const union aes_matrix *key ) {
365  const union aes_table_entry *entry;
366  unsigned int byte;
367  size_t out_offset;
368  size_t in_offset;
369 
370  /* Perform [Inv]ShiftRows and [Inv]SubBytes */
371  for ( out_offset = 0, in_offset = 0 ; out_offset < 16 ;
372  out_offset++, in_offset = ( ( in_offset + stride ) & 0xf ) ) {
373 
374  /* Extract input byte (i.e. perform [Inv]ShiftRows) */
375  byte = in->byte[in_offset];
376 
377  /* Locate lookup table entry for this input byte
378  * (i.e. perform [Inv]SubBytes).
379  */
380  entry = &table->entry[byte];
381 
382  /* Store output byte */
383  out->byte[out_offset] = entry->byte[0];
384  }
385 
386  /* Perform AddRoundKey */
387  aes_addroundkey ( out, key );
388 }
389 
390 /**
391  * Encrypt data
392  *
393  * @v ctx Context
394  * @v src Data to encrypt
395  * @v dst Buffer for encrypted data
396  * @v len Length of data
397  */
398 static void aes_encrypt ( void *ctx, const void *src, void *dst, size_t len ) {
399  struct aes_context *aes = ctx;
400  union aes_matrix buffer[2];
401  union aes_matrix *in = &buffer[0];
402  union aes_matrix *out = &buffer[1];
403  unsigned int rounds = aes->rounds;
404 
405  /* Sanity check */
406  assert ( len == sizeof ( *in ) );
407 
408  /* Initialise input state */
409  memcpy ( in, src, sizeof ( *in ) );
410 
411  /* Perform initial round (AddRoundKey) */
412  aes_addroundkey ( in, &aes->encrypt.key[0] );
413 
414  /* Perform intermediate rounds (ShiftRows, SubBytes,
415  * MixColumns, AddRoundKey).
416  */
417  aes_encrypt_rounds ( in, out, &aes->encrypt.key[1], ( rounds - 2 ) );
418  in = out;
419 
420  /* Perform final round (ShiftRows, SubBytes, AddRoundKey) */
421  out = dst;
423  &aes->encrypt.key[ rounds - 1 ] );
424 }
425 
426 /**
427  * Decrypt data
428  *
429  * @v ctx Context
430  * @v src Data to decrypt
431  * @v dst Buffer for decrypted data
432  * @v len Length of data
433  */
434 static void aes_decrypt ( void *ctx, const void *src, void *dst, size_t len ) {
435  struct aes_context *aes = ctx;
436  union aes_matrix buffer[2];
437  union aes_matrix *in = &buffer[0];
438  union aes_matrix *out = &buffer[1];
439  unsigned int rounds = aes->rounds;
440 
441  /* Sanity check */
442  assert ( len == sizeof ( *in ) );
443 
444  /* Initialise input state */
445  memcpy ( in, src, sizeof ( *in ) );
446 
447  /* Perform initial round (AddRoundKey) */
448  aes_addroundkey ( in, &aes->decrypt.key[0] );
449 
450  /* Perform intermediate rounds (InvShiftRows, InvSubBytes,
451  * InvMixColumns, AddRoundKey).
452  */
453  aes_decrypt_rounds ( in, out, &aes->decrypt.key[1], ( rounds - 2 ) );
454  in = out;
455 
456  /* Perform final round (InvShiftRows, InvSubBytes, AddRoundKey) */
457  out = dst;
459  &aes->decrypt.key[ rounds - 1 ] );
460 }
461 
462 /**
463  * Multiply a polynomial by (x) modulo (x^8 + x^4 + x^3 + x^2 + 1) in GF(2^8)
464  *
465  * @v poly Polynomial to be multiplied
466  * @ret result Result
467  */
468 static __attribute__ (( const )) unsigned int aes_double ( unsigned int poly ) {
469 
470  /* Multiply polynomial by (x), placing the resulting x^8
471  * coefficient in the LSB (i.e. rotate byte left by one).
472  */
473  poly = rol8 ( poly, 1 );
474 
475  /* If coefficient of x^8 (in LSB) is non-zero, then reduce by
476  * subtracting (x^8 + x^4 + x^3 + x^2 + 1) in GF(2^8).
477  */
478  if ( poly & 0x01 ) {
479  poly ^= 0x01; /* Subtract x^8 (currently in LSB) */
480  poly ^= 0x1b; /* Subtract (x^4 + x^3 + x^2 + 1) */
481  }
482 
483  return poly;
484 }
485 
486 /**
487  * Fill in MixColumns lookup table entry
488  *
489  * @v entry AES lookup table entry for scalar multiplicand
490  *
491  * The MixColumns lookup table vector multiplier is {1,1,1,3,2,1,1,3}.
492  */
494  unsigned int scalar_x_1;
495  unsigned int scalar_x;
496  unsigned int scalar;
497 
498  /* Retrieve scalar multiplicand */
499  scalar = entry->byte[0];
500  entry->byte[1] = scalar;
501  entry->byte[2] = scalar;
502  entry->byte[5] = scalar;
503  entry->byte[6] = scalar;
504 
505  /* Calculate scalar multiplied by (x) */
506  scalar_x = aes_double ( scalar );
507  entry->byte[4] = scalar_x;
508 
509  /* Calculate scalar multiplied by (x + 1) */
510  scalar_x_1 = ( scalar_x ^ scalar );
511  entry->byte[3] = scalar_x_1;
512  entry->byte[7] = scalar_x_1;
513 }
514 
515 /**
516  * Fill in InvMixColumns lookup table entry
517  *
518  * @v entry AES lookup table entry for scalar multiplicand
519  *
520  * The InvMixColumns lookup table vector multiplier is {1,9,13,11,14,9,13,11}.
521  */
523  unsigned int scalar_x3_x2_x;
524  unsigned int scalar_x3_x2_1;
525  unsigned int scalar_x3_x2;
526  unsigned int scalar_x3_x_1;
527  unsigned int scalar_x3_1;
528  unsigned int scalar_x3;
529  unsigned int scalar_x2;
530  unsigned int scalar_x;
531  unsigned int scalar;
532 
533  /* Retrieve scalar multiplicand */
534  scalar = entry->byte[0];
535 
536  /* Calculate scalar multiplied by (x) */
537  scalar_x = aes_double ( scalar );
538 
539  /* Calculate scalar multiplied by (x^2) */
540  scalar_x2 = aes_double ( scalar_x );
541 
542  /* Calculate scalar multiplied by (x^3) */
543  scalar_x3 = aes_double ( scalar_x2 );
544 
545  /* Calculate scalar multiplied by (x^3 + 1) */
546  scalar_x3_1 = ( scalar_x3 ^ scalar );
547  entry->byte[1] = scalar_x3_1;
548  entry->byte[5] = scalar_x3_1;
549 
550  /* Calculate scalar multiplied by (x^3 + x + 1) */
551  scalar_x3_x_1 = ( scalar_x3_1 ^ scalar_x );
552  entry->byte[3] = scalar_x3_x_1;
553  entry->byte[7] = scalar_x3_x_1;
554 
555  /* Calculate scalar multiplied by (x^3 + x^2) */
556  scalar_x3_x2 = ( scalar_x3 ^ scalar_x2 );
557 
558  /* Calculate scalar multiplied by (x^3 + x^2 + 1) */
559  scalar_x3_x2_1 = ( scalar_x3_x2 ^ scalar );
560  entry->byte[2] = scalar_x3_x2_1;
561  entry->byte[6] = scalar_x3_x2_1;
562 
563  /* Calculate scalar multiplied by (x^3 + x^2 + x) */
564  scalar_x3_x2_x = ( scalar_x3_x2 ^ scalar_x );
565  entry->byte[4] = scalar_x3_x2_x;
566 }
567 
568 /**
569  * Generate AES lookup tables
570  *
571  */
572 static void aes_generate ( void ) {
573  union aes_table_entry *entry;
574  union aes_table_entry *inventry;
575  unsigned int poly = 0x01;
576  unsigned int invpoly = 0x01;
577  unsigned int transformed;
578  unsigned int i;
579 
580  /* Iterate over non-zero values of GF(2^8) using generator (x + 1) */
581  do {
582 
583  /* Multiply polynomial by (x + 1) */
584  poly ^= aes_double ( poly );
585 
586  /* Divide inverse polynomial by (x + 1). This code
587  * fragment is taken directly from the Wikipedia page
588  * on the Rijndael S-box. An explanation of why it
589  * works would be greatly appreciated.
590  */
591  invpoly ^= ( invpoly << 1 );
592  invpoly ^= ( invpoly << 2 );
593  invpoly ^= ( invpoly << 4 );
594  if ( invpoly & 0x80 )
595  invpoly ^= 0x09;
596  invpoly &= 0xff;
597 
598  /* Apply affine transformation */
599  transformed = ( 0x63 ^ invpoly ^ rol8 ( invpoly, 1 ) ^
600  rol8 ( invpoly, 2 ) ^ rol8 ( invpoly, 3 ) ^
601  rol8 ( invpoly, 4 ) );
602 
603  /* Populate S-box (within MixColumns lookup table) */
604  aes_mixcolumns.entry[poly].byte[0] = transformed;
605 
606  } while ( poly != 0x01 );
607 
608  /* Populate zeroth S-box entry (which has no inverse) */
609  aes_mixcolumns.entry[0].byte[0] = 0x63;
610 
611  /* Fill in MixColumns and InvMixColumns lookup tables */
612  for ( i = 0 ; i < 256 ; i++ ) {
613 
614  /* Fill in MixColumns lookup table entry */
615  entry = &aes_mixcolumns.entry[i];
617 
618  /* Populate inverse S-box (within InvMixColumns lookup table) */
619  inventry = &aes_invmixcolumns.entry[ entry->byte[0] ];
620  inventry->byte[0] = i;
621 
622  /* Fill in InvMixColumns lookup table entry */
623  aes_invmixcolumns_entry ( inventry );
624  }
625 }
626 
627 /**
628  * Rotate key column
629  *
630  * @v column Key column
631  * @ret column Updated key column
632  */
633 static inline __attribute__ (( always_inline )) uint32_t
634 aes_key_rotate ( uint32_t column ) {
635 
636  return ( ( __BYTE_ORDER == __LITTLE_ENDIAN ) ?
637  ror32 ( column, 8 ) : rol32 ( column, 8 ) );
638 }
639 
640 /**
641  * Apply S-box to key column
642  *
643  * @v column Key column
644  * @ret column Updated key column
645  */
647  unsigned int i;
648  uint8_t byte;
649 
650  for ( i = 0 ; i < 4 ; i++ ) {
651  byte = ( column & 0xff );
652  byte = aes_mixcolumns.entry[byte].byte[0];
653  column = ( ( column & ~0xff ) | byte );
654  column = rol32 ( column, 8 );
655  }
656  return column;
657 }
658 
659 /**
660  * Apply schedule round constant to key column
661  *
662  * @v column Key column
663  * @v rcon Round constant
664  * @ret column Updated key column
665  */
666 static inline __attribute__ (( always_inline )) uint32_t
667 aes_key_rcon ( uint32_t column, unsigned int rcon ) {
668 
669  return ( ( __BYTE_ORDER == __LITTLE_ENDIAN ) ?
670  ( column ^ rcon ) : ( column ^ ( rcon << 24 ) ) );
671 }
672 
673 /**
674  * Set key
675  *
676  * @v ctx Context
677  * @v key Key
678  * @v keylen Key length
679  * @ret rc Return status code
680  */
681 static int aes_setkey ( void *ctx, const void *key, size_t keylen ) {
682  struct aes_context *aes = ctx;
683  union aes_matrix *enc;
684  union aes_matrix *dec;
685  union aes_matrix temp;
686  union aes_matrix zero;
687  unsigned int rcon = 0x01;
688  unsigned int rounds;
689  size_t offset = 0;
690  uint32_t *prev;
691  uint32_t *next;
692  uint32_t *end;
693  uint32_t tmp;
694 
695  /* Generate lookup tables, if not already done */
696  if ( ! aes_mixcolumns.entry[0].byte[0] )
697  aes_generate();
698 
699  /* Validate key length and calculate number of intermediate rounds */
700  switch ( keylen ) {
701  case ( 128 / 8 ) :
702  rounds = 11;
703  break;
704  case ( 192 / 8 ) :
705  rounds = 13;
706  break;
707  case ( 256 / 8 ) :
708  rounds = 15;
709  break;
710  default:
711  DBGC ( aes, "AES %p unsupported key length (%zd bits)\n",
712  aes, ( keylen * 8 ) );
713  return -EINVAL;
714  }
715  aes->rounds = rounds;
716  enc = aes->encrypt.key;
717  end = enc[rounds].column;
718 
719  /* Copy raw key */
720  memcpy ( enc, key, keylen );
721  prev = enc->column;
722  next = ( ( ( void * ) prev ) + keylen );
723  tmp = next[-1];
724 
725  /* Construct expanded key */
726  while ( next < end ) {
727 
728  /* If this is the first column of an expanded key
729  * block, or the middle column of an AES-256 key
730  * block, then apply the S-box.
731  */
732  if ( ( offset == 0 ) || ( ( offset | keylen ) == 48 ) )
733  tmp = aes_key_sbox ( tmp );
734 
735  /* If this is the first column of an expanded key
736  * block then rotate and apply the round constant.
737  */
738  if ( offset == 0 ) {
739  tmp = aes_key_rotate ( tmp );
740  tmp = aes_key_rcon ( tmp, rcon );
741  rcon = aes_double ( rcon );
742  }
743 
744  /* XOR with previous key column */
745  tmp ^= *prev;
746 
747  /* Store column */
748  *next = tmp;
749 
750  /* Move to next column */
751  offset += sizeof ( *next );
752  if ( offset == keylen )
753  offset = 0;
754  next++;
755  prev++;
756  }
757  DBGC2 ( aes, "AES %p expanded %zd-bit key:\n", aes, ( keylen * 8 ) );
758  DBGC2_HDA ( aes, 0, &aes->encrypt, ( rounds * sizeof ( *enc ) ) );
759 
760  /* Convert to decryption key */
761  memset ( &zero, 0, sizeof ( zero ) );
762  dec = &aes->decrypt.key[ rounds - 1 ];
763  memcpy ( dec--, enc++, sizeof ( *dec ) );
764  while ( dec > aes->decrypt.key ) {
765  /* Perform InvMixColumns (by reusing the encryption
766  * final-round code to perform ShiftRows+SubBytes and
767  * reusing the decryption intermediate-round code to
768  * perform InvShiftRows+InvSubBytes+InvMixColumns, all
769  * with a zero encryption key).
770  */
772  enc++, &temp, &zero );
773  aes_decrypt_rounds ( &temp, dec--, &zero, 1 );
774  }
775  memcpy ( dec--, enc++, sizeof ( *dec ) );
776  DBGC2 ( aes, "AES %p inverted %zd-bit key:\n", aes, ( keylen * 8 ) );
777  DBGC2_HDA ( aes, 0, &aes->decrypt, ( rounds * sizeof ( *dec ) ) );
778 
779  return 0;
780 }
781 
782 /** Basic AES algorithm */
784  .name = "aes",
785  .ctxsize = sizeof ( struct aes_context ),
786  .blocksize = AES_BLOCKSIZE,
787  .alignsize = 0,
788  .authsize = 0,
789  .setkey = aes_setkey,
790  .setiv = cipher_null_setiv,
791  .encrypt = aes_encrypt,
792  .decrypt = aes_decrypt,
793  .auth = cipher_null_auth,
794 };
795 
796 /* AES in Electronic Codebook mode */
797 ECB_CIPHER ( aes_ecb, aes_ecb_algorithm,
799 
800 /* AES in Cipher Block Chaining mode */
801 CBC_CIPHER ( aes_cbc, aes_cbc_algorithm,
803 
804 /* AES in Galois/Counter mode */
805 GCM_CIPHER ( aes_gcm, aes_gcm_algorithm,
unsigned int rounds
Number of rounds.
Definition: aes.h:41
#define EINVAL
Invalid argument.
Definition: errno.h:428
A single AES lookup table entry.
Definition: aes.c:112
union aes_table_entry __attribute__((packed))
struct cipher_algorithm aes_gcm_algorithm
__be32 in[4]
Definition: CIB_PRM.h:35
uint8_t state
State.
Definition: eth_slow.h:47
static u32 rol32(u32 v, int bits)
Rotate 32-bit value left.
Definition: wpa_tkip.c:173
uint32_t next
Next descriptor address.
Definition: myson.h:18
Error codes.
#define __LITTLE_ENDIAN
Constant representing little-endian byte order.
Definition: endian.h:12
static void const void void * dst
Definition: crypto.h:244
#define __BYTE_ORDER
Definition: endian.h:6
static const void const void * scalar
Definition: crypto.h:335
static void const void * src
Definition: crypto.h:244
void cipher_null_setiv(void *ctx __unused, const void *iv __unused, size_t ivlen __unused)
Definition: crypto_null.c:64
#define DBGC(...)
Definition: compiler.h:505
uint8_t byte[8]
Viewed as an array of bytes.
Definition: aes.c:26
Cryptographic API.
AES context.
Definition: aes.h:35
uint32_t buffer
Buffer index (or NETVSC_RNDIS_NO_BUFFER)
Definition: netvsc.h:16
uint32_t zero
Must be zero.
Definition: ntlm.h:24
struct cipher_algorithm aes_algorithm
Basic AES algorithm.
Definition: aes.c:783
struct aes_round_keys encrypt
Encryption keys.
Definition: aes.h:37
static u32 ror32(u32 v, int bits)
Rotate 32-bit value right.
Definition: wpa_tkip.c:161
__be32 out[4]
Definition: CIB_PRM.h:36
Electronic codebook (ECB)
unsigned long tmp
Definition: linux_pci.h:53
static struct aes_table aes_invmixcolumns
AES InvMixColumns lookup table.
Definition: aes.c:149
struct aes_round_keys decrypt
Decryption keys.
Definition: aes.h:39
void * memcpy(void *dest, const void *src, size_t len) __nonnull
static uint32_t aes_key_sbox(uint32_t column)
Apply S-box to key column.
Definition: aes.c:646
static void const void size_t keylen
Definition: crypto.h:233
GCM_CIPHER(aes_gcm, aes_gcm_algorithm, aes_algorithm, struct aes_context, AES_BLOCKSIZE)
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
Assertions.
static void aes_invmixcolumns_entry(union aes_table_entry *entry)
Fill in InvMixColumns lookup table entry.
Definition: aes.c:522
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
#define container_of(ptr, type, field)
Get containing structure.
Definition: stddef.h:35
static void aes_final(const struct aes_table *table, size_t stride, const union aes_matrix *in, union aes_matrix *out, const union aes_matrix *key)
Perform final round.
Definition: aes.c:362
An AES lookup table.
Definition: aes.c:140
static userptr_t size_t offset
Offset of the first segment within the content.
Definition: deflate.h:259
AES algorithm.
#define DBGC2_HDA(...)
Definition: compiler.h:523
uint8_t byte[8]
Viewed as an array of bytes.
Definition: aes.c:114
static struct aes_table aes_mixcolumns
AES MixColumns lookup table.
Definition: aes.c:146
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition: aes.c:26
aes_stride
AES strides.
Definition: aes.c:49
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition: aes.c:142
static void aes_generate(void)
Generate AES lookup tables.
Definition: aes.c:572
struct golan_eq_context ctx
Definition: CIB_PRM.h:28
uint32_t column[4]
Viewed as an array of four-byte columns.
Definition: aes.h:14
unsigned char uint8_t
Definition: stdint.h:10
AES matrix.
Definition: aes.h:21
union aes_matrix key[AES_MAX_ROUNDS]
Round keys.
Definition: aes.h:31
CBC_CIPHER(aes_cbc, aes_cbc_algorithm, aes_algorithm, struct aes_context, AES_BLOCKSIZE)
Cipher-block chaining.
unsigned int uint32_t
Definition: stdint.h:12
struct cipher_algorithm aes_cbc_algorithm
uint32_t len
Length.
Definition: ena.h:14
#define DBGC2(...)
Definition: compiler.h:522
uint32_t column[4]
Viewed as an array of four-byte columns.
Definition: aes.h:25
static void aes_decrypt(void *ctx, const void *src, void *dst, size_t len)
Decrypt data.
Definition: aes.c:434
Galois/Counter Mode (GCM)
static void aes_encrypt(void *ctx, const void *src, void *dst, size_t len)
Encrypt data.
Definition: aes.c:398
uint32_t end
Ending offset.
Definition: netvsc.h:18
A cipher algorithm.
Definition: crypto.h:49
uint8_t product
Product string.
Definition: smbios.h:16
Input stride for ShiftRows.
Definition: aes.c:60
#define AES_BLOCKSIZE
AES blocksize.
Definition: aes.h:15
typeof(acpi_finder=acpi_find)
ACPI table finder.
Definition: acpi.c:45
struct cipher_algorithm aes_ecb_algorithm
ECB_CIPHER(aes_ecb, aes_ecb_algorithm, aes_algorithm, struct aes_context, AES_BLOCKSIZE)
const char * name
Algorithm name.
Definition: crypto.h:51
void cipher_null_auth(void *ctx __unused, void *auth __unused)
Definition: crypto_null.c:79
static void aes_mixcolumns_entry(union aes_table_entry *entry)
Fill in MixColumns lookup table entry.
Definition: aes.c:493
Input stride for InvShiftRows.
Definition: aes.c:71
static int aes_setkey(void *ctx, const void *key, size_t keylen)
Set key.
Definition: aes.c:681
String functions.
union @382 key
Sense key.
Definition: crypto.h:284
Bit operations.
void * memset(void *dest, int character, size_t len) __nonnull