Annotation of sys/arch/i386/i386/via.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: via.c,v 1.12 2007/08/14 20:10:05 henric Exp $ */
2: /* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
3:
4: /*-
5: * Copyright (c) 2003 Jason Wright
6: * Copyright (c) 2003, 2004 Theo de Raadt
7: * All rights reserved.
8: *
9: * Permission to use, copy, modify, and distribute this software for any
10: * purpose with or without fee is hereby granted, provided that the above
11: * copyright notice and this permission notice appear in all copies.
12: *
13: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20: */
21:
22: #include <sys/param.h>
23: #include <sys/systm.h>
24: #include <sys/signalvar.h>
25: #include <sys/kernel.h>
26: #include <sys/proc.h>
27: #include <sys/user.h>
28: #include <sys/exec.h>
29: #include <sys/buf.h>
30: #include <sys/reboot.h>
31: #include <sys/conf.h>
32: #include <sys/file.h>
33: #include <sys/timeout.h>
34: #include <sys/malloc.h>
35: #include <sys/mbuf.h>
36: #include <sys/extent.h>
37: #include <sys/sysctl.h>
38:
39: #ifdef CRYPTO
40: #include <crypto/cryptodev.h>
41: #include <crypto/rijndael.h>
42: #include <crypto/xform.h>
43: #include <crypto/cryptosoft.h>
44: #endif
45:
46: #include <uvm/uvm_extern.h>
47:
48: #include <machine/cpu.h>
49: #include <machine/cpufunc.h>
50: #include <machine/gdt.h>
51: #include <machine/pio.h>
52: #include <machine/bus.h>
53: #include <machine/psl.h>
54: #include <machine/reg.h>
55: #include <machine/specialreg.h>
56: #include <machine/biosvar.h>
57:
58: #include <dev/rndvar.h>
59:
60: void viac3_rnd(void *);
61:
62:
63: #ifdef CRYPTO
64:
65: struct viac3_session {
66: u_int32_t ses_ekey[4 * (AES_MAXROUNDS + 1) + 4]; /* 128 bit aligned */
67: u_int32_t ses_dkey[4 * (AES_MAXROUNDS + 1) + 4]; /* 128 bit aligned */
68: u_int8_t ses_iv[16]; /* 128 bit aligned */
69: u_int32_t ses_cw0;
70: struct swcr_data *swd;
71: int ses_klen;
72: int ses_used;
73: };
74:
75: struct viac3_softc {
76: u_int32_t op_cw[4]; /* 128 bit aligned */
77: u_int8_t op_iv[16]; /* 128 bit aligned */
78: void *op_buf;
79:
80: /* normal softc stuff */
81: int32_t sc_cid;
82: int sc_nsessions;
83: struct viac3_session *sc_sessions;
84: };
85:
86: #define VIAC3_SESSION(sid) ((sid) & 0x0fffffff)
87: #define VIAC3_SID(crd,ses) (((crd) << 28) | ((ses) & 0x0fffffff))
88:
89: static struct viac3_softc *vc3_sc;
90: extern int i386_has_xcrypt;
91:
92: extern u_int8_t hmac_ipad_buffer[64];
93: extern u_int8_t hmac_opad_buffer[64];
94:
95: void viac3_crypto_setup(void);
96: int viac3_crypto_newsession(u_int32_t *, struct cryptoini *);
97: int viac3_crypto_process(struct cryptop *);
98: int viac3_crypto_swauth(struct cryptop *, struct cryptodesc *,
99: struct swcr_data *, caddr_t);
100: int viac3_crypto_encdec(struct cryptop *, struct cryptodesc *,
101: struct viac3_session *, struct viac3_softc *, caddr_t);
102: int viac3_crypto_freesession(u_int64_t);
103: static __inline void viac3_cbc(void *, void *, void *, void *, int, void *);
104:
105: void
106: viac3_crypto_setup(void)
107: {
108: int algs[CRYPTO_ALGORITHM_MAX + 1];
109:
110: if ((vc3_sc = malloc(sizeof(*vc3_sc), M_DEVBUF, M_NOWAIT)) == NULL)
111: return; /* YYY bitch? */
112: bzero(vc3_sc, sizeof(*vc3_sc));
113:
114: bzero(algs, sizeof(algs));
115: algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
116: algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
117: algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
118: algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
119: algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
120: algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
121: algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
122:
123: vc3_sc->sc_cid = crypto_get_driverid(0);
124: if (vc3_sc->sc_cid < 0)
125: return; /* YYY bitch? */
126:
127: crypto_register(vc3_sc->sc_cid, algs, viac3_crypto_newsession,
128: viac3_crypto_freesession, viac3_crypto_process);
129: }
130:
131: int
132: viac3_crypto_newsession(u_int32_t *sidp, struct cryptoini *cri)
133: {
134: struct cryptoini *c;
135: struct viac3_softc *sc = vc3_sc;
136: struct viac3_session *ses = NULL;
137: struct auth_hash *axf;
138: struct swcr_data *swd;
139: int sesn, i, cw0;
140:
141: if (sc == NULL || sidp == NULL || cri == NULL)
142: return (EINVAL);
143:
144: if (sc->sc_sessions == NULL) {
145: ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF,
146: M_NOWAIT);
147: if (ses == NULL)
148: return (ENOMEM);
149: sesn = 0;
150: sc->sc_nsessions = 1;
151: } else {
152: for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
153: if (sc->sc_sessions[sesn].ses_used == 0) {
154: ses = &sc->sc_sessions[sesn];
155: break;
156: }
157: }
158:
159: if (ses == NULL) {
160: sesn = sc->sc_nsessions;
161: ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF,
162: M_NOWAIT);
163: if (ses == NULL)
164: return (ENOMEM);
165: bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
166: bzero(sc->sc_sessions, sesn * sizeof(*ses));
167: free(sc->sc_sessions, M_DEVBUF);
168: sc->sc_sessions = ses;
169: ses = &sc->sc_sessions[sesn];
170: sc->sc_nsessions++;
171: }
172: }
173:
174: bzero(ses, sizeof(*ses));
175: ses->ses_used = 1;
176:
177: for (c = cri; c != NULL; c = c->cri_next) {
178: switch (c->cri_alg) {
179: case CRYPTO_AES_CBC:
180: switch (c->cri_klen) {
181: case 128:
182: cw0 = C3_CRYPT_CWLO_KEY128;
183: break;
184: case 192:
185: cw0 = C3_CRYPT_CWLO_KEY192;
186: break;
187: case 256:
188: cw0 = C3_CRYPT_CWLO_KEY256;
189: break;
190: default:
191: viac3_crypto_freesession(sesn);
192: return (EINVAL);
193: }
194: cw0 |= C3_CRYPT_CWLO_ALG_AES | C3_CRYPT_CWLO_KEYGEN_SW |
195: C3_CRYPT_CWLO_NORMAL;
196:
197: get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv));
198: ses->ses_klen = c->cri_klen;
199: ses->ses_cw0 = cw0;
200:
201: /* Build expanded keys for both directions */
202: rijndaelKeySetupEnc(ses->ses_ekey, c->cri_key,
203: c->cri_klen);
204: rijndaelKeySetupDec(ses->ses_dkey, c->cri_key,
205: c->cri_klen);
206: for (i = 0; i < 4 * (AES_MAXROUNDS + 1); i++) {
207: ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
208: ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
209: }
210:
211: break;
212:
213: case CRYPTO_MD5_HMAC:
214: axf = &auth_hash_hmac_md5_96;
215: goto authcommon;
216: case CRYPTO_SHA1_HMAC:
217: axf = &auth_hash_hmac_sha1_96;
218: goto authcommon;
219: case CRYPTO_RIPEMD160_HMAC:
220: axf = &auth_hash_hmac_ripemd_160_96;
221: goto authcommon;
222: case CRYPTO_SHA2_256_HMAC:
223: axf = &auth_hash_hmac_sha2_256_96;
224: goto authcommon;
225: case CRYPTO_SHA2_384_HMAC:
226: axf = &auth_hash_hmac_sha2_384_96;
227: goto authcommon;
228: case CRYPTO_SHA2_512_HMAC:
229: axf = &auth_hash_hmac_sha2_512_96;
230: authcommon:
231: MALLOC(swd, struct swcr_data *,
232: sizeof(struct swcr_data), M_CRYPTO_DATA,
233: M_NOWAIT);
234: if (swd == NULL) {
235: viac3_crypto_freesession(sesn);
236: return (ENOMEM);
237: }
238: bzero(swd, sizeof(struct swcr_data));
239: ses->swd = swd;
240:
241: swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
242: M_NOWAIT);
243: if (swd->sw_ictx == NULL) {
244: viac3_crypto_freesession(sesn);
245: return (ENOMEM);
246: }
247:
248: swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
249: M_NOWAIT);
250: if (swd->sw_octx == NULL) {
251: viac3_crypto_freesession(sesn);
252: return (ENOMEM);
253: }
254:
255: for (i = 0; i < c->cri_klen / 8; i++)
256: c->cri_key[i] ^= HMAC_IPAD_VAL;
257:
258: axf->Init(swd->sw_ictx);
259: axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
260: axf->Update(swd->sw_ictx, hmac_ipad_buffer,
261: HMAC_BLOCK_LEN - (c->cri_klen / 8));
262:
263: for (i = 0; i < c->cri_klen / 8; i++)
264: c->cri_key[i] ^= (HMAC_IPAD_VAL ^
265: HMAC_OPAD_VAL);
266:
267: axf->Init(swd->sw_octx);
268: axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
269: axf->Update(swd->sw_octx, hmac_opad_buffer,
270: HMAC_BLOCK_LEN - (c->cri_klen / 8));
271:
272: for (i = 0; i < c->cri_klen / 8; i++)
273: c->cri_key[i] ^= HMAC_OPAD_VAL;
274:
275: swd->sw_axf = axf;
276: swd->sw_alg = c->cri_alg;
277:
278: break;
279: default:
280: viac3_crypto_freesession(sesn);
281: return (EINVAL);
282: }
283: }
284:
285: *sidp = VIAC3_SID(0, sesn);
286: return (0);
287: }
288:
289: int
290: viac3_crypto_freesession(u_int64_t tid)
291: {
292: struct viac3_softc *sc = vc3_sc;
293: struct swcr_data *swd;
294: struct auth_hash *axf;
295: int sesn;
296: u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
297:
298: if (sc == NULL)
299: return (EINVAL);
300: sesn = VIAC3_SESSION(sid);
301: if (sesn >= sc->sc_nsessions)
302: return (EINVAL);
303:
304: if (sc->sc_sessions[sesn].swd) {
305: swd = sc->sc_sessions[sesn].swd;
306: axf = swd->sw_axf;
307:
308: if (swd->sw_ictx) {
309: bzero(swd->sw_ictx, axf->ctxsize);
310: free(swd->sw_ictx, M_CRYPTO_DATA);
311: }
312: if (swd->sw_octx) {
313: bzero(swd->sw_octx, axf->ctxsize);
314: free(swd->sw_octx, M_CRYPTO_DATA);
315: }
316: FREE(swd, M_CRYPTO_DATA);
317: }
318:
319: bzero(&sc->sc_sessions[sesn], sizeof(sc->sc_sessions[sesn]));
320: return (0);
321: }
322:
323: static __inline void
324: viac3_cbc(void *cw, void *src, void *dst, void *key, int rep,
325: void *iv)
326: {
327: unsigned int creg0;
328:
329: creg0 = rcr0(); /* Permit access to SIMD/FPU path */
330: lcr0(creg0 & ~(CR0_EM|CR0_TS));
331:
332: /* Do the deed */
333: __asm __volatile("pushfl; popfl");
334: __asm __volatile("rep xcrypt-cbc" :
335: : "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
336: : "memory", "cc");
337:
338: lcr0(creg0);
339: }
340:
341: int
342: viac3_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
343: struct swcr_data *sw, caddr_t buf)
344: {
345: int type;
346:
347: if (crp->crp_flags & CRYPTO_F_IMBUF)
348: type = CRYPTO_BUF_MBUF;
349: else
350: type= CRYPTO_BUF_IOV;
351:
352: return (swcr_authcompute(crp, crd, sw, buf, type));
353: }
354:
355: int
356: viac3_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
357: struct viac3_session *ses, struct viac3_softc *sc, caddr_t buf)
358: {
359: u_int32_t *key;
360: int err = 0;
361:
362: if ((crd->crd_len % 16) != 0) {
363: err = EINVAL;
364: return (err);
365: }
366:
367: sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT);
368: if (sc->op_buf == NULL) {
369: err = ENOMEM;
370: return (err);
371: }
372:
373: if (crd->crd_flags & CRD_F_ENCRYPT) {
374: sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT;
375: key = ses->ses_ekey;
376: if (crd->crd_flags & CRD_F_IV_EXPLICIT)
377: bcopy(crd->crd_iv, sc->op_iv, 16);
378: else
379: bcopy(ses->ses_iv, sc->op_iv, 16);
380:
381: if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
382: if (crp->crp_flags & CRYPTO_F_IMBUF)
383: m_copyback((struct mbuf *)crp->crp_buf,
384: crd->crd_inject, 16, sc->op_iv);
385: else if (crp->crp_flags & CRYPTO_F_IOV)
386: cuio_copyback((struct uio *)crp->crp_buf,
387: crd->crd_inject, 16, sc->op_iv);
388: else
389: bcopy(sc->op_iv,
390: crp->crp_buf + crd->crd_inject, 16);
391: }
392: } else {
393: sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT;
394: key = ses->ses_dkey;
395: if (crd->crd_flags & CRD_F_IV_EXPLICIT)
396: bcopy(crd->crd_iv, sc->op_iv, 16);
397: else {
398: if (crp->crp_flags & CRYPTO_F_IMBUF)
399: m_copydata((struct mbuf *)crp->crp_buf,
400: crd->crd_inject, 16, sc->op_iv);
401: else if (crp->crp_flags & CRYPTO_F_IOV)
402: cuio_copydata((struct uio *)crp->crp_buf,
403: crd->crd_inject, 16, sc->op_iv);
404: else
405: bcopy(crp->crp_buf + crd->crd_inject,
406: sc->op_iv, 16);
407: }
408: }
409:
410: if (crp->crp_flags & CRYPTO_F_IMBUF)
411: m_copydata((struct mbuf *)crp->crp_buf,
412: crd->crd_skip, crd->crd_len, sc->op_buf);
413: else if (crp->crp_flags & CRYPTO_F_IOV)
414: cuio_copydata((struct uio *)crp->crp_buf,
415: crd->crd_skip, crd->crd_len, sc->op_buf);
416: else
417: bcopy(crp->crp_buf + crd->crd_skip, sc->op_buf, crd->crd_len);
418:
419: sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0;
420: viac3_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key,
421: crd->crd_len / 16, sc->op_iv);
422:
423: if (crp->crp_flags & CRYPTO_F_IMBUF)
424: m_copyback((struct mbuf *)crp->crp_buf,
425: crd->crd_skip, crd->crd_len, sc->op_buf);
426: else if (crp->crp_flags & CRYPTO_F_IOV)
427: cuio_copyback((struct uio *)crp->crp_buf,
428: crd->crd_skip, crd->crd_len, sc->op_buf);
429: else
430: bcopy(sc->op_buf, crp->crp_buf + crd->crd_skip,
431: crd->crd_len);
432:
433: /* copy out last block for use as next session IV */
434: if (crd->crd_flags & CRD_F_ENCRYPT) {
435: if (crp->crp_flags & CRYPTO_F_IMBUF)
436: m_copydata((struct mbuf *)crp->crp_buf,
437: crd->crd_skip + crd->crd_len - 16, 16,
438: ses->ses_iv);
439: else if (crp->crp_flags & CRYPTO_F_IOV)
440: cuio_copydata((struct uio *)crp->crp_buf,
441: crd->crd_skip + crd->crd_len - 16, 16,
442: ses->ses_iv);
443: else
444: bcopy(crp->crp_buf + crd->crd_skip +
445: crd->crd_len - 16, ses->ses_iv, 16);
446: }
447:
448: if (sc->op_buf != NULL) {
449: bzero(sc->op_buf, crd->crd_len);
450: free(sc->op_buf, M_DEVBUF);
451: sc->op_buf = NULL;
452: }
453:
454: return (err);
455: }
456:
457: int
458: viac3_crypto_process(struct cryptop *crp)
459: {
460: struct viac3_softc *sc = vc3_sc;
461: struct viac3_session *ses;
462: struct cryptodesc *crd;
463: int sesn, err = 0;
464:
465: if (crp == NULL || crp->crp_callback == NULL) {
466: err = EINVAL;
467: goto out;
468: }
469:
470: sesn = VIAC3_SESSION(crp->crp_sid);
471: if (sesn >= sc->sc_nsessions) {
472: err = EINVAL;
473: goto out;
474: }
475: ses = &sc->sc_sessions[sesn];
476:
477: for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
478: switch (crd->crd_alg) {
479: case CRYPTO_AES_CBC:
480: if ((err = viac3_crypto_encdec(crp, crd, ses, sc,
481: crp->crp_buf)) != 0)
482: goto out;
483: break;
484:
485: case CRYPTO_MD5_HMAC:
486: case CRYPTO_SHA1_HMAC:
487: case CRYPTO_RIPEMD160_HMAC:
488: case CRYPTO_SHA2_256_HMAC:
489: case CRYPTO_SHA2_384_HMAC:
490: case CRYPTO_SHA2_512_HMAC:
491: if ((err = viac3_crypto_swauth(crp, crd, ses->swd,
492: crp->crp_buf)) != 0)
493: goto out;
494: break;
495:
496: default:
497: err = EINVAL;
498: goto out;
499: }
500: }
501: out:
502: crp->crp_etype = err;
503: crypto_done(crp);
504: return (err);
505: }
506:
507: #endif /* CRYPTO */
508:
509: #if defined(I686_CPU)
510: /*
511: * Note, the VIA C3 Nehemiah provides 4 internal 8-byte buffers, which
512: * store random data, and can be accessed a lot quicker than waiting
513: * for new data to be generated. As we are using every 8th bit only
514: * due to whitening. Since the RNG generates in excess of 21KB/s at
515: * its worst, collecting 64 bytes worth of entropy should not affect
516: * things significantly.
517: *
518: * Note, due to some weirdness in the RNG, we need at least 7 bytes
519: * extra on the end of our buffer. Also, there is an outside chance
520: * that the VIA RNG can "wedge", as the generated bit-rate is variable.
521: * We could do all sorts of startup testing and things, but
522: * frankly, I don't really see the point. If the RNG wedges, then the
523: * chances of you having a defective CPU are very high. Let it wedge.
524: *
525: * Adding to the whole confusion, in order to access the RNG, we need
526: * to have FXSR support enabled, and the correct FPU enable bits must
527: * be there to enable the FPU in kernel. It would be nice if all this
528: * mumbo-jumbo was not needed in order to use the RNG. Oh well, life
529: * does go on...
530: */
531: #define VIAC3_RNG_BUFSIZ 16 /* 32bit words */
532: struct timeout viac3_rnd_tmo;
533: int viac3_rnd_present;
534:
535: void
536: viac3_rnd(void *v)
537: {
538: struct timeout *tmo = v;
539: unsigned int *p, i, rv, creg0, len = VIAC3_RNG_BUFSIZ;
540: static int buffer[VIAC3_RNG_BUFSIZ + 2]; /* XXX why + 2? */
541: #ifdef MULTIPROCESSOR
542: int s = splipi();
543: #endif
544:
545: creg0 = rcr0(); /* Permit access to SIMD/FPU path */
546: lcr0(creg0 & ~(CR0_EM|CR0_TS));
547:
548: /*
549: * Here we collect the random data from the VIA C3 RNG. We make
550: * sure that we turn on maximum whitening (%edx[0,1] == "11"), so
551: * that we get the best random data possible.
552: */
553: __asm __volatile("rep xstore-rng"
554: : "=a" (rv) : "d" (3), "D" (buffer), "c" (len*sizeof(int))
555: : "memory", "cc");
556:
557: lcr0(creg0);
558:
559: #ifdef MULTIPROCESSOR
560: splx(s);
561: #endif
562:
563: for (i = 0, p = buffer; i < VIAC3_RNG_BUFSIZ; i++, p++)
564: add_true_randomness(*p);
565:
566: timeout_add(tmo, (hz > 100) ? (hz / 100) : 1);
567: }
568:
569: #endif /* defined(I686_CPU) */
CVSweb