Annotation of sys/arch/sparc64/sparc64/machdep.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: machdep.c,v 1.93 2007/08/04 16:44:15 kettenis Exp $ */
2: /* $NetBSD: machdep.c,v 1.108 2001/07/24 19:30:14 eeh Exp $ */
3:
4: /*-
5: * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: /*
42: * Copyright (c) 1992, 1993
43: * The Regents of the University of California. All rights reserved.
44: *
45: * This software was developed by the Computer Systems Engineering group
46: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
47: * contributed to Berkeley.
48: *
49: * All advertising materials mentioning features or use of this software
50: * must display the following acknowledgement:
51: * This product includes software developed by the University of
52: * California, Lawrence Berkeley Laboratory.
53: *
54: * Redistribution and use in source and binary forms, with or without
55: * modification, are permitted provided that the following conditions
56: * are met:
57: * 1. Redistributions of source code must retain the above copyright
58: * notice, this list of conditions and the following disclaimer.
59: * 2. Redistributions in binary form must reproduce the above copyright
60: * notice, this list of conditions and the following disclaimer in the
61: * documentation and/or other materials provided with the distribution.
62: * 3. Neither the name of the University nor the names of its contributors
63: * may be used to endorse or promote products derived from this software
64: * without specific prior written permission.
65: *
66: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
67: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
68: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
69: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
70: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
71: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
72: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
73: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
74: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
75: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
76: * SUCH DAMAGE.
77: *
78: * @(#)machdep.c 8.6 (Berkeley) 1/14/94
79: */
80:
81: #include <sys/param.h>
82: #include <sys/extent.h>
83: #include <sys/signal.h>
84: #include <sys/signalvar.h>
85: #include <sys/proc.h>
86: #include <sys/user.h>
87: #include <sys/buf.h>
88: #include <sys/device.h>
89: #include <sys/reboot.h>
90: #include <sys/systm.h>
91: #include <sys/kernel.h>
92: #include <sys/conf.h>
93: #include <sys/file.h>
94: #include <sys/malloc.h>
95: #include <sys/mbuf.h>
96: #include <sys/mount.h>
97: #include <sys/msgbuf.h>
98: #include <sys/syscallargs.h>
99: #include <sys/exec.h>
100:
101: #include <uvm/uvm.h>
102:
103: #include <sys/sysctl.h>
104: #include <sys/exec_elf.h>
105: #include <dev/rndvar.h>
106:
107: #ifdef SYSVMSG
108: #include <sys/msg.h>
109: #endif
110:
111: #define _SPARC_BUS_DMA_PRIVATE
112: #include <machine/autoconf.h>
113: #include <machine/bus.h>
114: #include <machine/frame.h>
115: #include <machine/cpu.h>
116: #include <machine/pmap.h>
117: #include <machine/openfirm.h>
118: #include <machine/sparc64.h>
119:
120: #include <sparc64/sparc64/cache.h>
121:
122: #include "pckbc.h"
123: #include "pckbd.h"
124: #if (NPCKBC > 0) && (NPCKBD == 0)
125: #include <dev/ic/pckbcvar.h>
126: #endif
127:
128: int _bus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, int,
129: bus_size_t, bus_size_t, int, bus_dmamap_t *);
130: void _bus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
131: int _bus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *,
132: bus_size_t, struct proc *, int);
133: int _bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
134: struct mbuf *, int);
135: int _bus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
136: struct uio *, int);
137: int _bus_dmamap_load_raw(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
138: bus_dma_segment_t *, int, bus_size_t, int);
139: void _bus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
140: void _bus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
141: bus_addr_t, bus_size_t, int);
142:
143: int _bus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t tag, bus_size_t size,
144: bus_size_t alignment, bus_size_t boundary,
145: bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
146:
147: void _bus_dmamem_free(bus_dma_tag_t tag, bus_dma_tag_t,
148: bus_dma_segment_t *segs, int nsegs);
149: int _bus_dmamem_map(bus_dma_tag_t tag, bus_dma_tag_t,
150: bus_dma_segment_t *segs, int nsegs, size_t size, caddr_t *kvap,
151: int flags);
152: void _bus_dmamem_unmap(bus_dma_tag_t tag, bus_dma_tag_t, caddr_t kva,
153: size_t size);
154: paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_tag_t,
155: bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags);
156:
157: int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_dma_tag_t,
158: bus_size_t size, bus_size_t alignment, bus_size_t boundary,
159: bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
160: vaddr_t low, vaddr_t high);
161:
162: /*
163: * The "bus_space_debug" flags used by macros elsewhere.
164: * A good set of flags to use when first debugging something is:
165: * int bus_space_debug = BSDB_ACCESS | BSDB_ASSERT | BSDB_MAP;
166: */
167: int bus_space_debug = 0;
168:
169: struct vm_map *exec_map = NULL;
170: extern vaddr_t avail_end;
171:
172: /*
173: * Declare these as initialized data so we can patch them.
174: */
175: #ifndef BUFCACHEPERCENT
176: #define BUFCACHEPERCENT 10
177: #endif
178:
179: #ifdef BUFPAGES
180: int bufpages = BUFPAGES;
181: #else
182: int bufpages = 0;
183: #endif
184: int bufcachepercent = BUFCACHEPERCENT;
185:
186: int physmem;
187: u_long _randseed;
188: extern caddr_t msgbufaddr;
189:
190: int sparc_led_blink;
191: int kbd_reset;
192:
193: #ifdef APERTURE
194: #ifdef INSECURE
195: int allowaperture = 1;
196: #else
197: int allowaperture = 0;
198: #endif
199: #endif
200:
201: extern int ceccerrs;
202: extern int64_t cecclast;
203:
204: /*
205: * Maximum number of DMA segments we'll allow in dmamem_load()
206: * routines. Can be overridden in config files, etc.
207: */
208: #ifndef MAX_DMA_SEGS
209: #define MAX_DMA_SEGS 20
210: #endif
211:
212: /*
213: * safepri is a safe priority for sleep to set for a spin-wait
214: * during autoconfiguration or after a panic.
215: */
216: int safepri = 0;
217:
218: void blink_led_timeout(void *);
219: caddr_t allocsys(caddr_t);
220: void dumpsys(void);
221: void stackdump(void);
222:
223: /*
224: * Machine-dependent startup code
225: */
226: void
227: cpu_startup()
228: {
229: caddr_t v;
230: long sz;
231: #ifdef DEBUG
232: extern int pmapdebug;
233: int opmapdebug = pmapdebug;
234: #endif
235: vaddr_t minaddr, maxaddr;
236: extern struct user *proc0paddr;
237:
238: #ifdef DEBUG
239: pmapdebug = 0;
240: #endif
241:
242: proc0.p_addr = proc0paddr;
243:
244: /*
245: * Good {morning,afternoon,evening,night}.
246: */
247: printf(version);
248: /*identifycpu();*/
249: printf("real mem = %lu (%luMB)\n", ctob(physmem),
250: ctob(physmem)/1024/1024);
251: /*
252: * Find out how much space we need, allocate it,
253: * and then give everything true virtual addresses.
254: */
255: sz = (long)allocsys(NULL);
256: if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
257: panic("startup: no room for %lx bytes of tables", sz);
258: if (allocsys(v) - v != sz)
259: panic("startup: table size inconsistency");
260:
261: /*
262: * Determine how many buffers to allocate.
263: * We allocate bufcachepercent% of memory for buffer space.
264: */
265: if (bufpages == 0)
266: bufpages = physmem * bufcachepercent / 100;
267:
268: /*
269: * Allocate a submap for exec arguments. This map effectively
270: * limits the number of processes exec'ing at any time.
271: */
272: minaddr = vm_map_min(kernel_map);
273: exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
274: 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
275:
276: #ifdef DEBUG
277: pmapdebug = opmapdebug;
278: #endif
279: printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
280: ptoa(uvmexp.free)/1024/1024);
281:
282: /*
283: * Set up buffers, so they can be used to read disk labels.
284: */
285: bufinit();
286:
287: #if 0
288: pmap_redzone();
289: #endif
290: }
291:
292: caddr_t
293: allocsys(caddr_t v)
294: {
295: #define valloc(name, type, num) \
296: v = (caddr_t)(((name) = (type *)v) + (num))
297: #ifdef SYSVMSG
298: valloc(msgpool, char, msginfo.msgmax);
299: valloc(msgmaps, struct msgmap, msginfo.msgseg);
300: valloc(msghdrs, struct msg, msginfo.msgtql);
301: valloc(msqids, struct msqid_ds, msginfo.msgmni);
302: #endif
303:
304: return (v);
305: }
306:
307: /*
308: * Set up registers on exec.
309: */
310:
311: #define STACK_OFFSET BIAS
312: #define CPOUTREG(l,v) copyout(&(v), (l), sizeof(v))
313: #undef CCFSZ
314: #define CCFSZ CC64FSZ
315:
316: /* ARGSUSED */
317: void
318: setregs(p, pack, stack, retval)
319: struct proc *p;
320: struct exec_package *pack;
321: vaddr_t stack;
322: register_t *retval;
323: {
324: struct trapframe64 *tf = p->p_md.md_tf;
325: struct fpstate64 *fs;
326: int64_t tstate;
327: int pstate = PSTATE_USER;
328: Elf_Ehdr *eh = pack->ep_hdr;
329:
330: /*
331: * Setup the process StackGhost cookie which will be XORed into
332: * the return pointer as register windows are over/underflowed.
333: */
334: p->p_addr->u_pcb.pcb_wcookie = ((u_int64_t)arc4random() << 32) |
335: arc4random();
336:
337: /* The cookie needs to guarantee invalid alignment after the XOR. */
338: switch (p->p_addr->u_pcb.pcb_wcookie % 3) {
339: case 0: /* Two lsb's already both set except if the cookie is 0. */
340: p->p_addr->u_pcb.pcb_wcookie |= 0x3;
341: break;
342: case 1: /* Set the lsb. */
343: p->p_addr->u_pcb.pcb_wcookie = 1 |
344: (p->p_addr->u_pcb.pcb_wcookie & ~0x3);
345: break;
346: case 2: /* Set the second most lsb. */
347: p->p_addr->u_pcb.pcb_wcookie = 2 |
348: (p->p_addr->u_pcb.pcb_wcookie & ~0x3);
349: break;
350: }
351:
352: /* Don't allow misaligned code by default */
353: p->p_md.md_flags &= ~MDP_FIXALIGN;
354:
355: /*
356: * Set the registers to 0 except for:
357: * %o6: stack pointer, built in exec())
358: * %tstate: (retain icc and xcc and cwp bits)
359: * %g1: address of PS_STRINGS (used by crt0)
360: * %tpc,%tnpc: entry point of program
361: */
362: /* Check what memory model is requested */
363: switch ((eh->e_flags & EF_SPARCV9_MM)) {
364: default:
365: printf("Unknown memory model %d\n",
366: (eh->e_flags & EF_SPARCV9_MM));
367: /* FALLTHROUGH */
368: case EF_SPARCV9_TSO:
369: pstate = PSTATE_MM_TSO|PSTATE_IE;
370: break;
371: case EF_SPARCV9_PSO:
372: pstate = PSTATE_MM_PSO|PSTATE_IE;
373: break;
374: case EF_SPARCV9_RMO:
375: pstate = PSTATE_MM_RMO|PSTATE_IE;
376: break;
377: }
378:
379: tstate = (ASI_PRIMARY_NO_FAULT<<TSTATE_ASI_SHIFT) |
380: ((pstate)<<TSTATE_PSTATE_SHIFT) |
381: (tf->tf_tstate & TSTATE_CWP);
382: if ((fs = p->p_md.md_fpstate) != NULL) {
383: /*
384: * We hold an FPU state. If we own *the* FPU chip state
385: * we must get rid of it, and the only way to do that is
386: * to save it. In any case, get rid of our FPU state.
387: */
388: if (p == fpproc) {
389: savefpstate(fs);
390: fpproc = NULL;
391: }
392: free((void *)fs, M_SUBPROC);
393: p->p_md.md_fpstate = NULL;
394: }
395: bzero((caddr_t)tf, sizeof *tf);
396: tf->tf_tstate = tstate;
397: tf->tf_global[1] = (u_long)PS_STRINGS;
398: /* %g4 needs to point to the start of the data segment */
399: tf->tf_global[4] = 0;
400: tf->tf_pc = pack->ep_entry & ~3;
401: tf->tf_npc = tf->tf_pc + 4;
402: tf->tf_global[2] = tf->tf_global[7] = tf->tf_pc;
403: stack -= sizeof(struct rwindow);
404: tf->tf_out[6] = stack - STACK_OFFSET;
405: tf->tf_out[7] = NULL;
406: #ifdef NOTDEF_DEBUG
407: printf("setregs: setting tf %p sp %p pc %p\n", (long)tf,
408: (long)tf->tf_out[6], (long)tf->tf_pc);
409: #endif
410: retval[1] = 0;
411: }
412:
413: #ifdef DEBUG
414: /* See sigdebug.h */
415: #include <sparc64/sparc64/sigdebug.h>
416: int sigdebug = 0x0;
417: int sigpid = 0;
418: #endif
419:
420: struct sigframe {
421: int sf_signo; /* signal number */
422: int sf_code; /* signal code (unused) */
423: siginfo_t *sf_sip; /* points to siginfo_t */
424: struct sigcontext sf_sc; /* actual sigcontext */
425: siginfo_t sf_si;
426: };
427:
428: /*
429: * machine dependent system variables.
430: */
431: int
432: cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
433: int *name;
434: u_int namelen;
435: void *oldp;
436: size_t *oldlenp;
437: void *newp;
438: size_t newlen;
439: struct proc *p;
440: {
441: int oldval, ret;
442:
443: /* all sysctl names are this level are terminal */
444: if (namelen != 1)
445: return (ENOTDIR); /* overloaded */
446:
447: switch (name[0]) {
448: case CPU_LED_BLINK:
449: oldval = sparc_led_blink;
450: ret = sysctl_int(oldp, oldlenp, newp, newlen,
451: &sparc_led_blink);
452: /*
453: * If we were false and are now true, call start the timer.
454: */
455: if (!oldval && sparc_led_blink > oldval)
456: blink_led_timeout(NULL);
457: return (ret);
458: case CPU_ALLOWAPERTURE:
459: #ifdef APERTURE
460: if (securelevel > 0)
461: return (sysctl_int_lower(oldp, oldlenp, newp, newlen,
462: &allowaperture));
463: else
464: return (sysctl_int(oldp, oldlenp, newp, newlen,
465: &allowaperture));
466: #else
467: return (sysctl_rdint(oldp, oldlenp, newp, 0));
468: #endif
469: case CPU_CPUTYPE:
470: return (sysctl_rdint(oldp, oldlenp, newp, CPU_SUN4U));
471: case CPU_CECCERRORS:
472: return (sysctl_rdint(oldp, oldlenp, newp, ceccerrs));
473: case CPU_CECCLAST:
474: return (sysctl_rdquad(oldp, oldlenp, newp, cecclast));
475: case CPU_KBDRESET:
476: if (securelevel > 0)
477: return (sysctl_rdint(oldp, oldlenp, newp, kbd_reset));
478: return (sysctl_int(oldp, oldlenp, newp, newlen, &kbd_reset));
479: default:
480: return (EOPNOTSUPP);
481: }
482: /* NOTREACHED */
483: }
484:
485: /*
486: * Send an interrupt to process.
487: */
488: void
489: sendsig(catcher, sig, mask, code, type, val)
490: sig_t catcher;
491: int sig, mask;
492: u_long code;
493: int type;
494: union sigval val;
495: {
496: struct proc *p = curproc;
497: struct sigacts *psp = p->p_sigacts;
498: struct sigframe *fp;
499: struct trapframe64 *tf;
500: vaddr_t addr;
501: struct rwindow *oldsp, *newsp;
502: struct sigframe sf;
503: int onstack;
504:
505: tf = p->p_md.md_tf;
506: oldsp = (struct rwindow *)(u_long)(tf->tf_out[6] + STACK_OFFSET);
507:
508: /*
509: * Compute new user stack addresses, subtract off
510: * one signal frame, and align.
511: */
512: onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
513:
514: if ((psp->ps_flags & SAS_ALTSTACK) && !onstack &&
515: (psp->ps_sigonstack & sigmask(sig))) {
516: fp = (struct sigframe *)((caddr_t)psp->ps_sigstk.ss_sp +
517: psp->ps_sigstk.ss_size);
518: psp->ps_sigstk.ss_flags |= SS_ONSTACK;
519: } else
520: fp = (struct sigframe *)oldsp;
521: /* Allocate an aligned sigframe */
522: fp = (struct sigframe *)((long)(fp - 1) & ~0x0f);
523:
524: /*
525: * Now set up the signal frame. We build it in kernel space
526: * and then copy it out. We probably ought to just build it
527: * directly in user space....
528: */
529: sf.sf_signo = sig;
530: sf.sf_sip = NULL;
531:
532: /*
533: * Build the signal context to be used by sigreturn.
534: */
535: sf.sf_sc.sc_onstack = onstack;
536: sf.sf_sc.sc_mask = mask;
537: /* Save register context. */
538: sf.sf_sc.sc_sp = (long)tf->tf_out[6];
539: sf.sf_sc.sc_pc = tf->tf_pc;
540: sf.sf_sc.sc_npc = tf->tf_npc;
541: sf.sf_sc.sc_tstate = tf->tf_tstate; /* XXX */
542: sf.sf_sc.sc_g1 = tf->tf_global[1];
543: sf.sf_sc.sc_o0 = tf->tf_out[0];
544:
545: if (psp->ps_siginfo & sigmask(sig)) {
546: sf.sf_sip = &fp->sf_si;
547: initsiginfo(&sf.sf_si, sig, code, type, val);
548: }
549:
550: /*
551: * Put the stack in a consistent state before we whack away
552: * at it. Note that write_user_windows may just dump the
553: * registers into the pcb; we need them in the process's memory.
554: * We also need to make sure that when we start the signal handler,
555: * its %i6 (%fp), which is loaded from the newly allocated stack area,
556: * joins seamlessly with the frame it was in when the signal occurred,
557: * so that the debugger and _longjmp code can back up through it.
558: */
559: newsp = (struct rwindow *)((vaddr_t)fp - sizeof(struct rwindow));
560: write_user_windows();
561:
562: /* XXX do not copyout siginfo if not needed */
563: if (rwindow_save(p) || copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf) ||
564: CPOUTREG(&(((struct rwindow *)newsp)->rw_in[6]), tf->tf_out[6])) {
565: /*
566: * Process has trashed its stack; give it an illegal
567: * instruction to halt it in its tracks.
568: */
569: #ifdef DEBUG
570: printf("sendsig: stack was trashed trying to send sig %d, "
571: "sending SIGILL\n", sig);
572: #endif
573: sigexit(p, SIGILL);
574: /* NOTREACHED */
575: }
576:
577: #ifdef DEBUG
578: if (sigdebug & SDB_FOLLOW) {
579: printf("sendsig: %s[%d] sig %d scp %p\n",
580: p->p_comm, p->p_pid, sig, &fp->sf_sc);
581: }
582: #endif
583:
584: /*
585: * Arrange to continue execution at the code copied out in exec().
586: * It needs the function to call in %g1, and a new stack pointer.
587: */
588: addr = p->p_sigcode;
589: tf->tf_global[1] = (vaddr_t)catcher;
590: tf->tf_pc = addr;
591: tf->tf_npc = addr + 4;
592: tf->tf_out[6] = (vaddr_t)newsp - STACK_OFFSET;
593: }
594:
595: /*
596: * System call to cleanup state after a signal
597: * has been taken. Reset signal mask and
598: * stack state from context left by sendsig (above),
599: * and return to the given trap frame (if there is one).
600: * Check carefully to make sure that the user has not
601: * modified the state to gain improper privileges or to cause
602: * a machine fault.
603: */
604: /* ARGSUSED */
605: int
606: sys_sigreturn(p, v, retval)
607: struct proc *p;
608: void *v;
609: register_t *retval;
610: {
611: struct sys_sigreturn_args /* {
612: syscallarg(struct sigcontext *) sigcntxp;
613: } */ *uap = v;
614: struct sigcontext sc, *scp;
615: struct trapframe64 *tf;
616: int error = EINVAL;
617:
618: /* First ensure consistent stack state (see sendsig). */
619: write_user_windows();
620:
621: if (rwindow_save(p)) {
622: #ifdef DEBUG
623: printf("sigreturn: rwindow_save(%p) failed, sending SIGILL\n",
624: p);
625: #endif
626: sigexit(p, SIGILL);
627: }
628: scp = SCARG(uap, sigcntxp);
629: if ((vaddr_t)scp & 3 ||
630: (error = copyin((caddr_t)scp, &sc, sizeof sc)) != 0) {
631: #ifdef DEBUG
632: printf("sigreturn: copyin failed: scp=%p\n", scp);
633: #endif
634: return (error);
635: }
636: scp = ≻
637:
638: tf = p->p_md.md_tf;
639: /*
640: * Only the icc bits in the psr are used, so it need not be
641: * verified. pc and npc must be multiples of 4. This is all
642: * that is required; if it holds, just do it.
643: */
644: if (((sc.sc_pc | sc.sc_npc) & 3) != 0 ||
645: (sc.sc_pc == 0) || (sc.sc_npc == 0)) {
646: #ifdef DEBUG
647: printf("sigreturn: pc %p or npc %p invalid\n",
648: (void *)(unsigned long)sc.sc_pc,
649: (void *)(unsigned long)sc.sc_npc);
650: #endif
651: return (EINVAL);
652: }
653:
654: /* take only psr ICC field */
655: tf->tf_tstate = (u_int64_t)(tf->tf_tstate & ~TSTATE_CCR) | (scp->sc_tstate & TSTATE_CCR);
656: tf->tf_pc = (u_int64_t)scp->sc_pc;
657: tf->tf_npc = (u_int64_t)scp->sc_npc;
658: tf->tf_global[1] = (u_int64_t)scp->sc_g1;
659: tf->tf_out[0] = (u_int64_t)scp->sc_o0;
660: tf->tf_out[6] = (u_int64_t)scp->sc_sp;
661:
662: /* Restore signal stack. */
663: if (sc.sc_onstack & SS_ONSTACK)
664: p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
665: else
666: p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
667:
668: /* Restore signal mask. */
669: p->p_sigmask = scp->sc_mask & ~sigcantmask;
670:
671: return (EJUSTRETURN);
672: }
673:
674: int waittime = -1;
675: struct pcb dumppcb;
676:
677: void
678: boot(howto)
679: int howto;
680: {
681: int i;
682: static char str[128];
683:
684: /* If system is cold, just halt. */
685: if (cold) {
686: /* (Unless the user explicitly asked for reboot.) */
687: if ((howto & RB_USERREQ) == 0)
688: howto |= RB_HALT;
689: goto haltsys;
690: }
691:
692: fb_unblank();
693: boothowto = howto;
694: if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
695: extern struct proc proc0;
696: extern int sparc_clock_time_is_ok;
697:
698: /* XXX protect against curproc->p_stats.foo refs in sync() */
699: if (curproc == NULL)
700: curproc = &proc0;
701: waittime = 0;
702: vfs_shutdown();
703:
704: /*
705: * If we've been adjusting the clock, the todr
706: * will be out of synch; adjust it now.
707: * Do this only if the TOD clock has already been read out
708: * successfully by inittodr() or set by an explicit call
709: * to resettodr() (e.g. from settimeofday()).
710: */
711: if ((howto & RB_TIMEBAD) == 0 && sparc_clock_time_is_ok) {
712: resettodr();
713: } else {
714: printf("WARNING: not updating battery clock\n");
715: }
716: }
717: (void) splhigh(); /* ??? */
718:
719: /* If rebooting and a dump is requested, do it. */
720: if (howto & RB_DUMP)
721: dumpsys();
722:
723: haltsys:
724: /* Run any shutdown hooks. */
725: doshutdownhooks();
726:
727: /* If powerdown was requested, do it. */
728: if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
729: /* Let the OBP do the work. */
730: OF_poweroff();
731: printf("WARNING: powerdown failed!\n");
732: /*
733: * RB_POWERDOWN implies RB_HALT... fall into it...
734: */
735: }
736:
737: if (howto & RB_HALT) {
738: printf("halted\n\n");
739: OF_exit();
740: panic("PROM exit failed");
741: }
742:
743: printf("rebooting\n\n");
744: #if 0
745: if (user_boot_string && *user_boot_string) {
746: i = strlen(user_boot_string);
747: if (i > sizeof(str))
748: OF_boot(user_boot_string); /* XXX */
749: bcopy(user_boot_string, str, i);
750: } else
751: #endif
752: {
753: i = 1;
754: str[0] = '\0';
755: }
756:
757: if (howto & RB_SINGLE)
758: str[i++] = 's';
759: if (howto & RB_KDB)
760: str[i++] = 'd';
761: if (i > 1) {
762: if (str[0] == '\0')
763: str[0] = '-';
764: str[i] = 0;
765: } else
766: str[0] = 0;
767: OF_boot(str);
768: panic("cpu_reboot -- failed");
769: /*NOTREACHED*/
770: }
771:
772: u_long dumpmag = 0x8fca0101; /* magic number for savecore */
773: int dumpsize = 0; /* also for savecore */
774: long dumplo = 0;
775:
776: void
777: dumpconf(void)
778: {
779: int nblks, dumpblks;
780:
781: if (dumpdev == NODEV ||
782: (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
783: return;
784: if (nblks <= ctod(1))
785: return;
786:
787: dumpblks = ctod(physmem) + pmap_dumpsize();
788: if (dumpblks > (nblks - ctod(1)))
789: /*
790: * dump size is too big for the partition.
791: * Note, we safeguard a click at the front for a
792: * possible disk label.
793: */
794: return;
795:
796: /* Put the dump at the end of the partition */
797: dumplo = nblks - dumpblks;
798:
799: /*
800: * savecore(8) expects dumpsize to be the number of pages
801: * of actual core dumped (i.e. excluding the MMU stuff).
802: */
803: dumpsize = physmem;
804: }
805:
806: #define BYTES_PER_DUMP (NBPG) /* must be a multiple of pagesize */
807: static vaddr_t dumpspace;
808:
809: caddr_t
810: reserve_dumppages(p)
811: caddr_t p;
812: {
813:
814: dumpspace = (vaddr_t)p;
815: return (p + BYTES_PER_DUMP);
816: }
817:
818: /*
819: * Write a crash dump.
820: */
821: void
822: dumpsys()
823: {
824: int psize;
825: daddr64_t blkno;
826: int (*dump)(dev_t, daddr64_t, caddr_t, size_t);
827: int error = 0;
828: struct mem_region *mp;
829: extern struct mem_region *mem;
830:
831: /* copy registers to memory */
832: snapshot(&dumppcb);
833: stackdump();
834:
835: if (dumpdev == NODEV)
836: return;
837:
838: /*
839: * For dumps during autoconfiguration,
840: * if dump device has already configured...
841: */
842: if (dumpsize == 0)
843: dumpconf();
844: if (!dumpspace) {
845: printf("\nno address space available, dump not possible\n");
846: return;
847: }
848: if (dumplo <= 0) {
849: printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
850: minor(dumpdev));
851: return;
852: }
853: printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
854: minor(dumpdev), dumplo);
855:
856: psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
857: printf("dump ");
858: if (psize == -1) {
859: printf("area unavailable\n");
860: return;
861: }
862: blkno = dumplo;
863: dump = bdevsw[major(dumpdev)].d_dump;
864:
865: error = pmap_dumpmmu(dump, blkno);
866: blkno += pmap_dumpsize();
867: printf("starting dump, blkno %lld\n", blkno);
868: for (mp = mem; mp->size; mp++) {
869: u_int64_t i = 0, n;
870: paddr_t maddr = mp->start;
871:
872: #if 0
873: /* Remind me: why don't we dump page 0 ? */
874: if (maddr == 0) {
875: /* Skip first page at physical address 0 */
876: maddr += NBPG;
877: i += NBPG;
878: blkno += btodb(NBPG);
879: }
880: #endif
881: for (; i < mp->size; i += n) {
882: n = mp->size - i;
883: if (n > BYTES_PER_DUMP)
884: n = BYTES_PER_DUMP;
885:
886: /* print out how many MBs we have dumped */
887: if (i && (i % (1024*1024)) == 0)
888: printf("%d ", i / (1024*1024));
889: (void) pmap_enter(pmap_kernel(), dumpspace, maddr,
890: VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
891: pmap_update(pmap_kernel());
892: error = (*dump)(dumpdev, blkno,
893: (caddr_t)dumpspace, (int)n);
894: pmap_remove(pmap_kernel(), dumpspace, dumpspace + n);
895: pmap_update(pmap_kernel());
896: if (error)
897: break;
898: maddr += n;
899: blkno += btodb(n);
900: }
901: }
902:
903: switch (error) {
904:
905: case ENXIO:
906: printf("device bad\n");
907: break;
908:
909: case EFAULT:
910: printf("device not ready\n");
911: break;
912:
913: case EINVAL:
914: printf("area improper\n");
915: break;
916:
917: case EIO:
918: printf("i/o error\n");
919: break;
920:
921: case 0:
922: printf("succeeded\n");
923: break;
924:
925: default:
926: printf("error %d\n", error);
927: break;
928: }
929: }
930:
931: void trapdump(struct trapframe64*);
932: /*
933: * dump out a trapframe.
934: */
935: void
936: trapdump(tf)
937: struct trapframe64* tf;
938: {
939: printf("TRAPFRAME: tstate=%llx pc=%llx npc=%llx y=%x\n",
940: (unsigned long long)tf->tf_tstate, (unsigned long long)tf->tf_pc,
941: (unsigned long long)tf->tf_npc, (unsigned)tf->tf_y);
942: printf("%%g1-7: %llx %llx %llx %llx %llx %llx %llx\n",
943: (unsigned long long)tf->tf_global[1],
944: (unsigned long long)tf->tf_global[2],
945: (unsigned long long)tf->tf_global[3],
946: (unsigned long long)tf->tf_global[4],
947: (unsigned long long)tf->tf_global[5],
948: (unsigned long long)tf->tf_global[6],
949: (unsigned long long)tf->tf_global[7]);
950: printf("%%o0-7: %llx %llx %llx %llx\n %llx %llx %llx %llx\n",
951: (unsigned long long)tf->tf_out[0],
952: (unsigned long long)tf->tf_out[1],
953: (unsigned long long)tf->tf_out[2],
954: (unsigned long long)tf->tf_out[3],
955: (unsigned long long)tf->tf_out[4],
956: (unsigned long long)tf->tf_out[5],
957: (unsigned long long)tf->tf_out[6],
958: (unsigned long long)tf->tf_out[7]);
959: }
960: /*
961: * get the fp and dump the stack as best we can. don't leave the
962: * current stack page
963: */
964: void
965: stackdump()
966: {
967: struct frame32 *fp = (struct frame32 *)getfp(), *sfp;
968: struct frame64 *fp64;
969:
970: sfp = fp;
971: printf("Frame pointer is at %p\n", fp);
972: printf("Call traceback:\n");
973: while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
974: if( ((long)fp) & 1 ) {
975: fp64 = (struct frame64*)(((char *)fp)+BIAS);
976: /* 64-bit frame */
977: printf("%llx(%llx, %llx, %llx, %llx, %llx, %llx, %llx) "
978: "fp = %llx\n",
979: (unsigned long long)fp64->fr_pc,
980: (unsigned long long)fp64->fr_arg[0],
981: (unsigned long long)fp64->fr_arg[1],
982: (unsigned long long)fp64->fr_arg[2],
983: (unsigned long long)fp64->fr_arg[3],
984: (unsigned long long)fp64->fr_arg[4],
985: (unsigned long long)fp64->fr_arg[5],
986: (unsigned long long)fp64->fr_arg[6],
987: (unsigned long long)fp64->fr_fp);
988: fp = (struct frame32 *)(u_long)fp64->fr_fp;
989: } else {
990: /* 32-bit frame */
991: printf(" pc = %x args = (%x, %x, %x, %x, %x, %x, %x) "
992: "fp = %x\n", fp->fr_pc, fp->fr_arg[0],
993: fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3],
994: fp->fr_arg[4], fp->fr_arg[5], fp->fr_arg[6],
995: fp->fr_fp);
996: fp = (struct frame32*)(u_long)(u_short)fp->fr_fp;
997: }
998: }
999: }
1000:
1001:
1002: /*
1003: * Common function for DMA map creation. May be called by bus-specific
1004: * DMA map creation functions.
1005: */
1006: int
1007: _bus_dmamap_create(t, t0, size, nsegments, maxsegsz, boundary, flags, dmamp)
1008: bus_dma_tag_t t, t0;
1009: bus_size_t size;
1010: int nsegments;
1011: bus_size_t maxsegsz;
1012: bus_size_t boundary;
1013: int flags;
1014: bus_dmamap_t *dmamp;
1015: {
1016: struct sparc_bus_dmamap *map;
1017: void *mapstore;
1018: size_t mapsize;
1019:
1020: /*
1021: * Allocate and initialize the DMA map. The end of the map
1022: * is a variable-sized array of segments, so we allocate enough
1023: * room for them in one shot.
1024: *
1025: * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
1026: * of ALLOCNOW notifies others that we've reserved these resources,
1027: * and they are not to be freed.
1028: *
1029: * The bus_dmamap_t includes one bus_dma_segment_t, hence
1030: * the (nsegments - 1).
1031: */
1032: mapsize = sizeof(struct sparc_bus_dmamap) +
1033: (sizeof(bus_dma_segment_t) * (nsegments - 1));
1034: if ((mapstore = malloc(mapsize, M_DEVBUF,
1035: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1036: return (ENOMEM);
1037:
1038: bzero(mapstore, mapsize);
1039: map = (struct sparc_bus_dmamap *)mapstore;
1040: map->_dm_size = size;
1041: map->_dm_segcnt = nsegments;
1042: map->_dm_maxsegsz = maxsegsz;
1043: map->_dm_boundary = boundary;
1044: map->_dm_flags = flags & ~(BUS_DMA_WAITOK | BUS_DMA_NOWAIT |
1045: BUS_DMA_COHERENT | BUS_DMA_NOWRITE | BUS_DMA_NOCACHE);
1046: map->dm_mapsize = 0; /* no valid mappings */
1047: map->dm_nsegs = 0;
1048:
1049: *dmamp = map;
1050: return (0);
1051: }
1052:
1053: /*
1054: * Common function for DMA map destruction. May be called by bus-specific
1055: * DMA map destruction functions.
1056: */
1057: void
1058: _bus_dmamap_destroy(t, t0, map)
1059: bus_dma_tag_t t, t0;
1060: bus_dmamap_t map;
1061:
1062: {
1063: /*
1064: * Unload the map if it is still loaded. This is required
1065: * by the specification (well, the manpage). Higher level
1066: * drivers, if any, should do this too. By the time the
1067: * system gets here, the higher level "destroy" functions
1068: * would probably already have clobbered the data needed
1069: * to do a proper unload.
1070: */
1071: if (map->dm_nsegs)
1072: bus_dmamap_unload(t0, map);
1073:
1074: free(map, M_DEVBUF);
1075: }
1076:
1077: /*
1078: * Common function for loading a DMA map with a linear buffer. May
1079: * be called by bus-specific DMA map load functions.
1080: *
1081: * Most SPARCs have IOMMUs in the bus controllers. In those cases
1082: * they only need one segment and will use virtual addresses for DVMA.
1083: * Those bus controllers should intercept these vectors and should
1084: * *NEVER* call _bus_dmamap_load() which is used only by devices that
1085: * bypass DVMA.
1086: */
1087: int
1088: _bus_dmamap_load(t, t0, map, buf, buflen, p, flags)
1089: bus_dma_tag_t t, t0;
1090: bus_dmamap_t map;
1091: void *buf;
1092: bus_size_t buflen;
1093: struct proc *p;
1094: int flags;
1095: {
1096: bus_size_t sgsize;
1097: vaddr_t vaddr = (vaddr_t)buf;
1098: int i;
1099:
1100: /*
1101: * Make sure that on error condition we return "no valid mappings".
1102: */
1103: map->dm_nsegs = 0;
1104:
1105: if (buflen > map->_dm_size)
1106: return (EFBIG);
1107:
1108: sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
1109:
1110: /*
1111: * We always use just one segment.
1112: */
1113: map->dm_mapsize = buflen;
1114: i = 0;
1115: map->dm_segs[i].ds_addr = NULL;
1116: map->dm_segs[i].ds_len = 0;
1117: while (sgsize > 0 && i < map->_dm_segcnt) {
1118: paddr_t pa;
1119:
1120: (void) pmap_extract(pmap_kernel(), vaddr, &pa);
1121: sgsize -= NBPG;
1122: vaddr += NBPG;
1123: if (map->dm_segs[i].ds_len == 0)
1124: map->dm_segs[i].ds_addr = pa;
1125: if (pa == (map->dm_segs[i].ds_addr + map->dm_segs[i].ds_len)
1126: && ((map->dm_segs[i].ds_len + NBPG) < map->_dm_maxsegsz)) {
1127: /* Hey, waddyaknow, they're contiguous */
1128: map->dm_segs[i].ds_len += NBPG;
1129: continue;
1130: }
1131: map->dm_segs[++i].ds_addr = pa;
1132: map->dm_segs[i].ds_len = NBPG;
1133: }
1134: /* Is this what the above comment calls "one segment"? */
1135: map->dm_nsegs = i;
1136:
1137: /* Mapping is bus dependent */
1138: return (0);
1139: }
1140:
1141: /*
1142: * Like _bus_dmamap_load(), but for mbufs.
1143: */
1144: int
1145: _bus_dmamap_load_mbuf(t, t0, map, m, flags)
1146: bus_dma_tag_t t, t0;
1147: bus_dmamap_t map;
1148: struct mbuf *m;
1149: int flags;
1150: {
1151: bus_dma_segment_t segs[MAX_DMA_SEGS];
1152: int i;
1153: size_t len;
1154:
1155: /*
1156: * Make sure that on error condition we return "no valid mappings".
1157: */
1158: map->dm_mapsize = 0;
1159: map->dm_nsegs = 0;
1160:
1161: if (m->m_pkthdr.len > map->_dm_size)
1162: return (EINVAL);
1163:
1164: /* Record mbuf for *_unload */
1165: map->_dm_type = _DM_TYPE_MBUF;
1166: map->_dm_source = m;
1167:
1168: i = 0;
1169: len = 0;
1170: while (m) {
1171: vaddr_t vaddr = mtod(m, vaddr_t);
1172: long buflen = (long)m->m_len;
1173:
1174: len += buflen;
1175: while (buflen > 0 && i < MAX_DMA_SEGS) {
1176: paddr_t pa;
1177: long incr;
1178:
1179: incr = min(buflen, NBPG);
1180:
1181: if (pmap_extract(pmap_kernel(), vaddr, &pa) == FALSE) {
1182: #ifdef DIAGNOSTIC
1183: printf("_bus_dmamap_load_mbuf: pmap_extract failed %lx\n",
1184: vaddr);
1185: map->_dm_type = 0;
1186: map->_dm_source = NULL;
1187: #endif
1188: return EINVAL;
1189: }
1190:
1191: buflen -= incr;
1192: vaddr += incr;
1193:
1194: if (i > 0 && pa == (segs[i - 1].ds_addr +
1195: segs[i - 1].ds_len) && ((segs[i - 1].ds_len + incr)
1196: < map->_dm_maxsegsz)) {
1197: /* Hey, waddyaknow, they're contiguous */
1198: segs[i - 1].ds_len += incr;
1199: continue;
1200: }
1201: segs[i].ds_addr = pa;
1202: segs[i].ds_len = incr;
1203: segs[i]._ds_boundary = 0;
1204: segs[i]._ds_align = 0;
1205: segs[i]._ds_mlist = NULL;
1206: i++;
1207: }
1208: m = m->m_next;
1209: if (m && i >= MAX_DMA_SEGS) {
1210: /* Exceeded the size of our dmamap */
1211: map->_dm_type = 0;
1212: map->_dm_source = NULL;
1213: return (EFBIG);
1214: }
1215: }
1216:
1217: return (bus_dmamap_load_raw(t0, map, segs, i,
1218: (bus_size_t)len, flags));
1219: }
1220:
1221: /*
1222: * Like _bus_dmamap_load(), but for uios.
1223: */
1224: int
1225: _bus_dmamap_load_uio(t, t0, map, uio, flags)
1226: bus_dma_tag_t t, t0;
1227: bus_dmamap_t map;
1228: struct uio *uio;
1229: int flags;
1230: {
1231: /*
1232: * XXXXXXX The problem with this routine is that it needs to
1233: * lock the user address space that is being loaded, but there
1234: * is no real way for us to unlock it during the unload process.
1235: * As a result, only UIO_SYSSPACE uio's are allowed for now.
1236: */
1237: bus_dma_segment_t segs[MAX_DMA_SEGS];
1238: int i, j;
1239: size_t len;
1240:
1241: /*
1242: * Make sure that on error condition we return "no valid mappings".
1243: */
1244: map->dm_mapsize = 0;
1245: map->dm_nsegs = 0;
1246:
1247: if (uio->uio_resid > map->_dm_size)
1248: return (EINVAL);
1249:
1250: if (uio->uio_segflg != UIO_SYSSPACE)
1251: return (EOPNOTSUPP);
1252:
1253: /* Record for *_unload */
1254: map->_dm_type = _DM_TYPE_UIO;
1255: map->_dm_source = (void *)uio;
1256:
1257: i = j = 0;
1258: len = 0;
1259: while (j < uio->uio_iovcnt) {
1260: vaddr_t vaddr = (vaddr_t)uio->uio_iov[j].iov_base;
1261: long buflen = (long)uio->uio_iov[j].iov_len;
1262:
1263: len += buflen;
1264: while (buflen > 0 && i < MAX_DMA_SEGS) {
1265: paddr_t pa;
1266: long incr;
1267:
1268: incr = min(buflen, NBPG);
1269: (void) pmap_extract(pmap_kernel(), vaddr, &pa);
1270: buflen -= incr;
1271: vaddr += incr;
1272:
1273: if (i > 0 && pa == (segs[i - 1].ds_addr +
1274: segs[i - 1].ds_len) && ((segs[i - 1].ds_len + incr)
1275: < map->_dm_maxsegsz)) {
1276: /* Hey, waddyaknow, they're contiguous */
1277: segs[i - 1].ds_len += incr;
1278: continue;
1279: }
1280: segs[i].ds_addr = pa;
1281: segs[i].ds_len = incr;
1282: segs[i]._ds_boundary = 0;
1283: segs[i]._ds_align = 0;
1284: segs[i]._ds_mlist = NULL;
1285: i++;
1286: }
1287: j++;
1288: if ((uio->uio_iovcnt - j) && i >= MAX_DMA_SEGS) {
1289: /* Exceeded the size of our dmamap */
1290: map->_dm_type = 0;
1291: map->_dm_source = NULL;
1292: return (EFBIG);
1293: }
1294: }
1295:
1296: return (bus_dmamap_load_raw(t0, map, segs, i, (bus_size_t)len, flags));
1297: }
1298:
1299: /*
1300: * Like _bus_dmamap_load(), but for raw memory allocated with
1301: * bus_dmamem_alloc().
1302: */
1303: int
1304: _bus_dmamap_load_raw(t, t0, map, segs, nsegs, size, flags)
1305: bus_dma_tag_t t, t0;
1306: bus_dmamap_t map;
1307: bus_dma_segment_t *segs;
1308: int nsegs;
1309: bus_size_t size;
1310: int flags;
1311: {
1312:
1313: panic("_bus_dmamap_load_raw: not implemented");
1314: }
1315:
1316: /*
1317: * Common function for unloading a DMA map. May be called by
1318: * bus-specific DMA map unload functions.
1319: */
1320: void
1321: _bus_dmamap_unload(t, t0, map)
1322: bus_dma_tag_t t, t0;
1323: bus_dmamap_t map;
1324: {
1325: /* Mark the mappings as invalid. */
1326: map->dm_mapsize = 0;
1327: map->dm_nsegs = 0;
1328:
1329: }
1330:
1331: /*
1332: * Common function for DMA map synchronization. May be called
1333: * by bus-specific DMA map synchronization functions.
1334: */
1335: void
1336: _bus_dmamap_sync(t, t0, map, offset, len, ops)
1337: bus_dma_tag_t t, t0;
1338: bus_dmamap_t map;
1339: bus_addr_t offset;
1340: bus_size_t len;
1341: int ops;
1342: {
1343: if (ops & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD))
1344: membar(MemIssue);
1345: }
1346:
1347: extern paddr_t vm_first_phys, vm_num_phys;
1348: /*
1349: * Common function for DMA-safe memory allocation. May be called
1350: * by bus-specific DMA memory allocation functions.
1351: */
1352: int
1353: _bus_dmamem_alloc(t, t0, size, alignment, boundary, segs, nsegs, rsegs, flags)
1354: bus_dma_tag_t t, t0;
1355: bus_size_t size, alignment, boundary;
1356: bus_dma_segment_t *segs;
1357: int nsegs;
1358: int *rsegs;
1359: int flags;
1360: {
1361: vaddr_t low, high;
1362: struct pglist *mlist;
1363: int error;
1364:
1365: /* Always round the size. */
1366: size = round_page(size);
1367: low = vm_first_phys;
1368: high = vm_first_phys + vm_num_phys - PAGE_SIZE;
1369:
1370: if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
1371: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1372: return (ENOMEM);
1373:
1374: /*
1375: * If the bus uses DVMA then ignore boundary and alignment.
1376: */
1377: segs[0]._ds_boundary = boundary;
1378: segs[0]._ds_align = alignment;
1379: if (flags & BUS_DMA_DVMA) {
1380: boundary = 0;
1381: alignment = 0;
1382: }
1383:
1384: /*
1385: * Allocate pages from the VM system.
1386: */
1387: TAILQ_INIT(mlist);
1388: error = uvm_pglistalloc(size, low, high,
1389: alignment, boundary, mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1390: if (error)
1391: return (error);
1392:
1393: /*
1394: * Compute the location, size, and number of segments actually
1395: * returned by the VM code.
1396: */
1397: segs[0].ds_addr = NULL; /* UPA does not map things */
1398: segs[0].ds_len = size;
1399: *rsegs = 1;
1400:
1401: /*
1402: * Simply keep a pointer around to the linked list, so
1403: * bus_dmamap_free() can return it.
1404: *
1405: * NOBODY SHOULD TOUCH THE pageq FIELDS WHILE THESE PAGES
1406: * ARE IN OUR CUSTODY.
1407: */
1408: segs[0]._ds_mlist = mlist;
1409:
1410: /* The bus driver should do the actual mapping */
1411: return (0);
1412: }
1413:
1414: /*
1415: * Common function for freeing DMA-safe memory. May be called by
1416: * bus-specific DMA memory free functions.
1417: */
1418: void
1419: _bus_dmamem_free(t, t0, segs, nsegs)
1420: bus_dma_tag_t t, t0;
1421: bus_dma_segment_t *segs;
1422: int nsegs;
1423: {
1424:
1425: if (nsegs != 1)
1426: panic("bus_dmamem_free: nsegs = %d", nsegs);
1427:
1428: /*
1429: * Return the list of pages back to the VM system.
1430: */
1431: uvm_pglistfree(segs[0]._ds_mlist);
1432: free(segs[0]._ds_mlist, M_DEVBUF);
1433: }
1434:
1435: /*
1436: * Common function for mapping DMA-safe memory. May be called by
1437: * bus-specific DMA memory map functions.
1438: */
1439: int
1440: _bus_dmamem_map(t, t0, segs, nsegs, size, kvap, flags)
1441: bus_dma_tag_t t, t0;
1442: bus_dma_segment_t *segs;
1443: int nsegs;
1444: size_t size;
1445: caddr_t *kvap;
1446: int flags;
1447: {
1448: vaddr_t va, sva;
1449: int r, cbit;
1450: size_t oversize;
1451: u_long align;
1452:
1453: if (nsegs != 1)
1454: panic("_bus_dmamem_map: nsegs = %d", nsegs);
1455:
1456: cbit = PMAP_NC;
1457: align = PAGE_SIZE;
1458:
1459: size = round_page(size);
1460:
1461: /*
1462: * Find a region of kernel virtual addresses that can accommodate
1463: * our aligment requirements.
1464: */
1465: oversize = size + align - PAGE_SIZE;
1466: r = uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET, 0,
1467: UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1468: UVM_ADV_NORMAL, 0));
1469: if (r != 0)
1470: return (ENOMEM);
1471:
1472: /* Compute start of aligned region */
1473: va = sva;
1474: va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
1475:
1476: /* Return excess virtual addresses */
1477: if (va != sva)
1478: uvm_unmap(kernel_map, sva, va);
1479: if (va + size != sva + oversize)
1480: uvm_unmap(kernel_map, va + size, sva + oversize);
1481:
1482:
1483: *kvap = (caddr_t)va;
1484:
1485: return (0);
1486: }
1487:
1488: /*
1489: * Common function for unmapping DMA-safe memory. May be called by
1490: * bus-specific DMA memory unmapping functions.
1491: */
1492: void
1493: _bus_dmamem_unmap(t, t0, kva, size)
1494: bus_dma_tag_t t, t0;
1495: caddr_t kva;
1496: size_t size;
1497: {
1498:
1499: #ifdef DIAGNOSTIC
1500: if ((u_long)kva & PAGE_MASK)
1501: panic("_bus_dmamem_unmap");
1502: #endif
1503:
1504: size = round_page(size);
1505: uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
1506: }
1507:
1508: /*
1509: * Common functin for mmap(2)'ing DMA-safe memory. May be called by
1510: * bus-specific DMA mmap(2)'ing functions.
1511: */
1512: paddr_t
1513: _bus_dmamem_mmap(t, t0, segs, nsegs, off, prot, flags)
1514: bus_dma_tag_t t, t0;
1515: bus_dma_segment_t *segs;
1516: int nsegs;
1517: off_t off;
1518: int prot, flags;
1519: {
1520: int i;
1521:
1522: for (i = 0; i < nsegs; i++) {
1523: #ifdef DIAGNOSTIC
1524: if (off & PGOFSET)
1525: panic("_bus_dmamem_mmap: offset unaligned");
1526: if (segs[i].ds_addr & PGOFSET)
1527: panic("_bus_dmamem_mmap: segment unaligned");
1528: if (segs[i].ds_len & PGOFSET)
1529: panic("_bus_dmamem_mmap: segment size not multiple"
1530: " of page size");
1531: #endif
1532: if (off >= segs[i].ds_len) {
1533: off -= segs[i].ds_len;
1534: continue;
1535: }
1536:
1537: return (atop(segs[i].ds_addr + off));
1538: }
1539:
1540: /* Page not found. */
1541: return (-1);
1542: }
1543:
1544: struct sparc_bus_dma_tag mainbus_dma_tag = {
1545: NULL,
1546: NULL,
1547: _bus_dmamap_create,
1548: _bus_dmamap_destroy,
1549: _bus_dmamap_load,
1550: _bus_dmamap_load_mbuf,
1551: _bus_dmamap_load_uio,
1552: _bus_dmamap_load_raw,
1553: _bus_dmamap_unload,
1554: _bus_dmamap_sync,
1555:
1556: _bus_dmamem_alloc,
1557: _bus_dmamem_free,
1558: _bus_dmamem_map,
1559: _bus_dmamem_unmap,
1560: _bus_dmamem_mmap
1561: };
1562:
1563:
1564: /*
1565: * Base bus space handlers.
1566: */
1567: int sparc_bus_map(bus_space_tag_t, bus_space_tag_t, bus_addr_t, bus_size_t,
1568: int, bus_space_handle_t *);
1569: int sparc_bus_protect(bus_space_tag_t, bus_space_tag_t, bus_space_handle_t,
1570: bus_size_t, int);
1571: int sparc_bus_unmap(bus_space_tag_t, bus_space_tag_t, bus_space_handle_t,
1572: bus_size_t);
1573: bus_addr_t sparc_bus_addr(bus_space_tag_t, bus_space_tag_t,
1574: bus_space_handle_t);
1575: int sparc_bus_subregion(bus_space_tag_t, bus_space_tag_t, bus_space_handle_t,
1576: bus_size_t, bus_size_t, bus_space_handle_t *);
1577: paddr_t sparc_bus_mmap(bus_space_tag_t, bus_space_tag_t, bus_addr_t, off_t,
1578: int, int);
1579: void *sparc_mainbus_intr_establish(bus_space_tag_t, bus_space_tag_t, int, int,
1580: int, int (*)(void *), void *, const char *);
1581: void sparc_bus_barrier(bus_space_tag_t, bus_space_tag_t, bus_space_handle_t,
1582: bus_size_t, bus_size_t, int);
1583: int sparc_bus_alloc(bus_space_tag_t, bus_space_tag_t, bus_addr_t, bus_addr_t,
1584: bus_size_t, bus_size_t, bus_size_t, int, bus_addr_t *,
1585: bus_space_handle_t *);
1586: void sparc_bus_free(bus_space_tag_t, bus_space_tag_t, bus_space_handle_t,
1587: bus_size_t);
1588:
1589: int
1590: sparc_bus_map(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t addr,
1591: bus_size_t size, int flags, bus_space_handle_t *hp)
1592: {
1593: vaddr_t va;
1594: u_int64_t pa;
1595: paddr_t pm_flags = 0;
1596: vm_prot_t pm_prot = VM_PROT_READ;
1597:
1598: if (flags & BUS_SPACE_MAP_PROMADDRESS) {
1599: hp->bh_ptr = addr;
1600: return (0);
1601: }
1602:
1603: if (size == 0) {
1604: char buf[80];
1605: bus_space_render_tag(t0, buf, sizeof buf);
1606: printf("\nsparc_bus_map: zero size on %s", buf);
1607: return (EINVAL);
1608: }
1609:
1610: if ( (LITTLE_ASI(t0->asi) && LITTLE_ASI(t0->sasi)) ||
1611: (PHYS_ASI(t0->asi) != PHYS_ASI(t0->sasi)) ) {
1612: char buf[80];
1613: bus_space_render_tag(t0, buf, sizeof buf);
1614: printf("\nsparc_bus_map: mismatched ASIs on %s: asi=%x sasi=%x",
1615: buf, t0->asi, t0->sasi);
1616: }
1617:
1618: if (PHYS_ASI(t0->asi)) {
1619: #ifdef BUS_SPACE_DEBUG
1620: char buf[80];
1621: bus_space_render_tag(t0, buf, sizeof buf);
1622: BUS_SPACE_PRINTF(BSDB_MAP,
1623: ("\nsparc_bus_map: physical tag %s asi %x sasi %x flags %x "
1624: "paddr %016llx size %016llx",
1625: buf,
1626: (int)t0->asi, (int)t0->sasi, (int)flags,
1627: (unsigned long long)addr, (unsigned long long)size));
1628: #endif /* BUS_SPACE_DEBUG */
1629: if (flags & BUS_SPACE_MAP_LINEAR) {
1630: char buf[80];
1631: bus_space_render_tag(t0, buf, sizeof buf);
1632: printf("\nsparc_bus_map: linear mapping requested on physical bus %s", buf);
1633: return (EINVAL);
1634: }
1635:
1636: hp->bh_ptr = addr;
1637: return (0);
1638: }
1639:
1640: size = round_page(size);
1641:
1642: if (LITTLE_ASI(t0->sasi) && !LITTLE_ASI(t0->asi))
1643: pm_flags |= PMAP_LITTLE;
1644:
1645: if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
1646: pm_flags |= PMAP_NC;
1647:
1648: va = uvm_km_valloc(kernel_map, size);
1649: if (va == 0)
1650: return (ENOMEM);
1651:
1652: /* note: preserve page offset */
1653: hp->bh_ptr = va | (addr & PGOFSET);
1654:
1655: pa = trunc_page(addr);
1656: if ((flags & BUS_SPACE_MAP_READONLY) == 0)
1657: pm_prot |= VM_PROT_WRITE;
1658:
1659: #ifdef BUS_SPACE_DEBUG
1660: { /* scope */
1661: char buf[80];
1662: bus_space_render_tag(t0, buf, sizeof buf);
1663: BUS_SPACE_PRINTF(BSDB_MAP, ("\nsparc_bus_map: tag %s type %x "
1664: "flags %x addr %016llx size %016llx virt %llx paddr "
1665: "%016llx", buf, (int)t->default_type, (int) flags,
1666: (unsigned long long)addr, (unsigned long long)size,
1667: (unsigned long long)hp->bh_ptr, (unsigned long long)pa));
1668: }
1669: #endif /* BUS_SPACE_DEBUG */
1670:
1671: do {
1672: BUS_SPACE_PRINTF(BSDB_MAPDETAIL, ("\nsparc_bus_map: phys %llx "
1673: "virt %p hp->bh_ptr %llx", (unsigned long long)pa,
1674: (char *)v, (unsigned long long)hp->bh_ptr));
1675: pmap_enter(pmap_kernel(), va, pa | pm_flags, pm_prot,
1676: pm_prot|PMAP_WIRED);
1677: va += PAGE_SIZE;
1678: pa += PAGE_SIZE;
1679: } while ((size -= PAGE_SIZE) > 0);
1680: pmap_update(pmap_kernel());
1681: return (0);
1682: }
1683:
1684: int
1685: sparc_bus_subregion(bus_space_tag_t tag, bus_space_tag_t tag0,
1686: bus_space_handle_t handle, bus_size_t offset, bus_size_t size,
1687: bus_space_handle_t *nhandlep)
1688: {
1689: *nhandlep = handle;
1690: nhandlep->bh_ptr += offset;
1691: return (0);
1692: }
1693:
1694: /* stolen from uvm_chgkprot() */
1695: /*
1696: * Change protections on kernel pages from addr to addr+len
1697: * (presumably so debugger can plant a breakpoint).
1698: *
1699: * We force the protection change at the pmap level. If we were
1700: * to use vm_map_protect a change to allow writing would be lazily-
1701: * applied meaning we would still take a protection fault, something
1702: * we really don't want to do. It would also fragment the kernel
1703: * map unnecessarily. We cannot use pmap_protect since it also won't
1704: * enforce a write-enable request. Using pmap_enter is the only way
1705: * we can ensure the change takes place properly.
1706: */
1707: int
1708: sparc_bus_protect(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t h,
1709: bus_size_t size, int flags)
1710: {
1711: vm_prot_t prot;
1712: paddr_t pm_flags = 0;
1713: paddr_t pa;
1714: vaddr_t sva, eva;
1715: void* addr = bus_space_vaddr(t0, h);
1716:
1717: if (addr == 0) {
1718: printf("\nsparc_bus_protect: null address");
1719: return (EINVAL);
1720: }
1721:
1722: if (PHYS_ASI(t0->asi)) {
1723: printf("\nsparc_bus_protect: physical ASI");
1724: return (EINVAL);
1725: }
1726:
1727: prot = (flags & BUS_SPACE_MAP_READONLY) ?
1728: VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE;
1729: if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
1730: pm_flags |= PMAP_NC;
1731:
1732: eva = round_page((vaddr_t)addr + size);
1733: for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
1734: /*
1735: * Extract physical address for the page.
1736: * We use a cheezy hack to differentiate physical
1737: * page 0 from an invalid mapping, not that it
1738: * really matters...
1739: */
1740: if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
1741: panic("bus_space_protect(): invalid page");
1742: pmap_enter(pmap_kernel(), sva, pa | pm_flags, prot, prot | PMAP_WIRED);
1743: }
1744: pmap_update(pmap_kernel());
1745:
1746: return (0);
1747: }
1748:
1749: int
1750: sparc_bus_unmap(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t bh,
1751: bus_size_t size)
1752: {
1753: vaddr_t va = trunc_page((vaddr_t)bh.bh_ptr);
1754: vaddr_t endva = va + round_page(size);
1755:
1756: if (PHYS_ASI(t0->asi))
1757: return (0);
1758:
1759: pmap_remove(pmap_kernel(), va, endva);
1760: pmap_update(pmap_kernel());
1761: uvm_km_free(kernel_map, va, endva - va);
1762:
1763: return (0);
1764: }
1765:
1766: paddr_t
1767: sparc_bus_mmap(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t paddr,
1768: off_t off, int prot, int flags)
1769: {
1770: if (PHYS_ASI(t0->asi)) {
1771: printf("\nsparc_bus_mmap: physical ASI");
1772: return (NULL);
1773: }
1774:
1775: /* Devices are un-cached... although the driver should do that */
1776: return ((paddr + off) | PMAP_NC);
1777: }
1778:
1779: bus_addr_t
1780: sparc_bus_addr(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t h)
1781: {
1782: paddr_t addr;
1783:
1784: if (PHYS_ASI(t0->asi))
1785: return h.bh_ptr;
1786:
1787: if (!pmap_extract(pmap_kernel(), h.bh_ptr, &addr))
1788: return (-1);
1789: return addr;
1790: }
1791:
1792: void *
1793: bus_intr_allocate(bus_space_tag_t t, int (*handler)(void *), void *arg,
1794: int number, int pil,
1795: volatile u_int64_t *mapper, volatile u_int64_t *clearer,
1796: const char *what)
1797: {
1798: struct intrhand *ih;
1799:
1800: ih = (struct intrhand *)malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
1801: if (ih == NULL)
1802: return (NULL);
1803:
1804: memset(ih, 0, sizeof(struct intrhand));
1805:
1806: ih->ih_fun = handler;
1807: ih->ih_arg = arg;
1808: ih->ih_number = number;
1809: ih->ih_pil = pil;
1810: ih->ih_map = mapper;
1811: ih->ih_clr = clearer;
1812: ih->ih_bus = t;
1813: strlcpy(ih->ih_name, what, sizeof(ih->ih_name));
1814:
1815: return (ih);
1816: }
1817:
1818: void
1819: bus_intr_free(void *arg)
1820: {
1821: free(arg, M_DEVBUF);
1822: }
1823:
1824: void *
1825: sparc_mainbus_intr_establish(bus_space_tag_t t, bus_space_tag_t t0, int number,
1826: int pil, int flags, int (*handler)(void *), void *arg, const char *what)
1827: {
1828: struct intrhand *ih;
1829:
1830: ih = bus_intr_allocate(t0, handler, arg, number, pil, NULL, NULL, what);
1831: if (ih == NULL)
1832: return (NULL);
1833:
1834: intr_establish(ih->ih_pil, ih);
1835:
1836: return (ih);
1837: }
1838:
1839: void
1840: sparc_bus_barrier(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t h,
1841: bus_size_t offset, bus_size_t size, int flags)
1842: {
1843: /*
1844: * We have lots of alternatives depending on whether we're
1845: * synchronizing loads with loads, loads with stores, stores
1846: * with loads, or stores with stores. The only ones that seem
1847: * generic are #Sync and #MemIssue. I'll use #Sync for safety.
1848: */
1849: if (flags == (BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE))
1850: membar(Sync);
1851: else if (flags == BUS_SPACE_BARRIER_READ)
1852: membar(Sync);
1853: else if (flags == BUS_SPACE_BARRIER_WRITE)
1854: membar(Sync);
1855: else
1856: printf("sparc_bus_barrier: unknown flags\n");
1857: return;
1858: }
1859:
1860: int
1861: sparc_bus_alloc(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t rs,
1862: bus_addr_t re, bus_size_t s, bus_size_t a, bus_size_t b, int f,
1863: bus_addr_t *ap, bus_space_handle_t *hp)
1864: {
1865: return (ENOTTY);
1866: }
1867:
1868: void
1869: sparc_bus_free(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t h,
1870: bus_size_t s)
1871: {
1872: return;
1873: }
1874:
1875: static const struct sparc_bus_space_tag _mainbus_space_tag = {
1876: NULL, /* cookie */
1877: NULL, /* parent bus tag */
1878: UPA_BUS_SPACE, /* type */
1879: ASI_PRIMARY,
1880: ASI_PRIMARY,
1881: "mainbus",
1882: sparc_bus_alloc,
1883: sparc_bus_free,
1884: sparc_bus_map, /* bus_space_map */
1885: sparc_bus_protect, /* bus_space_protect */
1886: sparc_bus_unmap, /* bus_space_unmap */
1887: sparc_bus_subregion, /* bus_space_subregion */
1888: sparc_bus_barrier, /* bus_space_barrier */
1889: sparc_bus_mmap, /* bus_space_mmap */
1890: sparc_mainbus_intr_establish, /* bus_intr_establish */
1891: sparc_bus_addr /* bus_space_addr */
1892: };
1893: const bus_space_tag_t mainbus_space_tag = &_mainbus_space_tag;
1894:
1895: struct cfdriver mainbus_cd = {
1896: NULL, "mainbus", DV_DULL
1897: };
1898:
1899: #define _BS_PRECALL(t,f) \
1900: while (t->f == NULL) \
1901: t = t->parent;
1902: #define _BS_POSTCALL
1903:
1904: #define _BS_CALL(t,f) \
1905: (*(t)->f)
1906:
1907: int
1908: bus_space_alloc(bus_space_tag_t t, bus_addr_t rs, bus_addr_t re, bus_size_t s,
1909: bus_size_t a, bus_size_t b, int f, bus_addr_t *ap, bus_space_handle_t *hp)
1910: {
1911: const bus_space_tag_t t0 = t;
1912: int ret;
1913:
1914: _BS_PRECALL(t, sparc_bus_alloc);
1915: ret = _BS_CALL(t, sparc_bus_alloc)(t, t0, rs, re, s, a, b, f, ap, hp);
1916: _BS_POSTCALL;
1917: return ret;
1918: }
1919:
1920: void
1921: bus_space_free(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
1922: {
1923: const bus_space_tag_t t0 = t;
1924:
1925: _BS_PRECALL(t, sparc_bus_free);
1926: _BS_CALL(t, sparc_bus_free)(t, t0, h, s);
1927: _BS_POSTCALL;
1928: }
1929:
1930: int
1931: bus_space_map(bus_space_tag_t t, bus_addr_t a, bus_size_t s, int f,
1932: bus_space_handle_t *hp)
1933: {
1934: const bus_space_tag_t t0 = t;
1935: int ret;
1936:
1937: _BS_PRECALL(t, sparc_bus_map);
1938: ret = _BS_CALL(t, sparc_bus_map)(t, t0, a, s, f, hp);
1939: _BS_POSTCALL;
1940: #ifdef BUS_SPACE_DEBUG
1941: if(s == 0) {
1942: char buf[128];
1943: bus_space_render_tag(t, buf, sizeof buf);
1944: printf("\n********** bus_space_map: requesting "
1945: "zero-length mapping on bus %p:%s",
1946: t, buf);
1947: }
1948: hp->bh_flags = 0;
1949: if (ret == 0) {
1950: hp->bh_size = s;
1951: hp->bh_tag = t0;
1952: } else {
1953: hp->bh_size = 0;
1954: hp->bh_tag = NULL;
1955: }
1956: #endif /* BUS_SPACE_DEBUG */
1957: return (ret);
1958: }
1959:
1960: int
1961: bus_space_protect(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s, int f)
1962: {
1963: const bus_space_tag_t t0 = t;
1964: int ret;
1965:
1966: _BS_PRECALL(t, sparc_bus_protect);
1967: ret = _BS_CALL(t, sparc_bus_protect)(t, t0, h, s, f);
1968: _BS_POSTCALL;
1969:
1970: return (ret);
1971: }
1972:
1973: int
1974: bus_space_unmap(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
1975: {
1976: const bus_space_tag_t t0 = t;
1977: int ret;
1978:
1979: _BS_PRECALL(t, sparc_bus_unmap);
1980: BUS_SPACE_ASSERT(t0, h, 0, 1);
1981: #ifdef BUS_SPACE_DEBUG
1982: if(h.bh_size != s) {
1983: char buf[128];
1984: bus_space_render_tag(t0, buf, sizeof buf);
1985: printf("\n********* bus_space_unmap: %p:%s, map/unmap "
1986: "size mismatch (%llx != %llx)",
1987: t, buf, h.bh_size, s);
1988: }
1989: #endif /* BUS_SPACE_DEBUG */
1990: ret = _BS_CALL(t, sparc_bus_unmap)(t, t0, h, s);
1991: _BS_POSTCALL;
1992: return (ret);
1993: }
1994:
1995: int
1996: bus_space_subregion(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
1997: bus_size_t s, bus_space_handle_t *hp)
1998: {
1999: const bus_space_tag_t t0 = t;
2000: int ret;
2001:
2002: _BS_PRECALL(t, sparc_bus_subregion);
2003: BUS_SPACE_ASSERT(t0, h, o, 1);
2004: #ifdef BUS_SPACE_DEBUG
2005: if(h.bh_size < o + s) {
2006: char buf[128];
2007: bus_space_render_tag(t0, buf, sizeof buf);
2008: printf("\n********** bus_space_subregion: "
2009: "%p:%s, %llx < %llx + %llx",
2010: t0, buf, h.bh_size, o, s);
2011: hp->bh_size = 0;
2012: hp->bh_tag = NULL;
2013: return (EINVAL);
2014: }
2015: #endif /* BUS_SPACE_DEBUG */
2016: ret = _BS_CALL(t, sparc_bus_subregion)(t, t0, h, o, s, hp);
2017: _BS_POSTCALL;
2018: #ifdef BUS_SPACE_DEBUG
2019: if (ret == 0) {
2020: hp->bh_size = s;
2021: hp->bh_tag = t0;
2022: } else {
2023: hp->bh_size = 0;
2024: hp->bh_tag = NULL;
2025: }
2026: #endif /* BUS_SPACE_DEBUG */
2027: return (ret);
2028: }
2029:
2030: paddr_t
2031: bus_space_mmap(bus_space_tag_t t, bus_addr_t a, off_t o, int p, int f)
2032: {
2033: const bus_space_tag_t t0 = t;
2034: paddr_t ret;
2035:
2036: _BS_PRECALL(t, sparc_bus_mmap);
2037: ret = _BS_CALL(t, sparc_bus_mmap)(t, t0, a, o, p, f);
2038: _BS_POSTCALL;
2039: return (ret);
2040: }
2041:
2042: void *
2043: bus_intr_establish(bus_space_tag_t t, int p, int l, int f, int (*h)(void *),
2044: void *a, const char *w)
2045: {
2046: const bus_space_tag_t t0 = t;
2047: void *ret;
2048:
2049: _BS_PRECALL(t, sparc_intr_establish);
2050: ret = _BS_CALL(t, sparc_intr_establish)(t, t0, p, l, f, h, a, w);
2051: _BS_POSTCALL;
2052: return (ret);
2053: }
2054:
2055: /* XXXX Things get complicated if we use unmapped register accesses. */
2056: void *
2057: bus_space_vaddr(bus_space_tag_t t, bus_space_handle_t h)
2058: {
2059: BUS_SPACE_ASSERT(t, h, 0, 1);
2060: if(t->asi == ASI_PRIMARY || t->asi == ASI_PRIMARY_LITTLE)
2061: return ((void *)(vaddr_t)(h.bh_ptr));
2062:
2063: #ifdef BUS_SPACE_DEBUG
2064: { /* Scope */
2065: char buf[64];
2066: bus_space_render_tag(t, buf, sizeof buf);
2067: printf("\nbus_space_vaddr: no vaddr for %p:%s (asi=%x)",
2068: t, buf, t->asi);
2069: }
2070: #endif
2071:
2072: return (NULL);
2073: }
2074:
2075: void
2076: bus_space_render_tag(bus_space_tag_t t, char* buf, size_t len)
2077: {
2078: if (t == NULL) {
2079: strlcat(buf, "<NULL>", len);
2080: return;
2081: }
2082: buf[0] = '\0';
2083: if (t->parent)
2084: bus_space_render_tag(t->parent, buf, len);
2085:
2086: strlcat(buf, "/", len);
2087: strlcat(buf, t->name, len);
2088: }
2089:
2090: #ifdef BUS_SPACE_DEBUG
2091:
2092: void
2093: bus_space_assert(bus_space_tag_t t, const bus_space_handle_t *h, bus_size_t o,
2094: int n)
2095: {
2096: if (h->bh_tag != t) {
2097: char buf1[128];
2098: char buf2[128];
2099: bus_space_render_tag(t, buf1, sizeof buf1);
2100: bus_space_render_tag(h->bh_tag, buf2, sizeof buf2);
2101: printf("\n********** bus_space_assert: wrong tag (%p:%s, "
2102: "expecting %p:%s) ", t, buf1, h->bh_tag, buf2);
2103: }
2104:
2105: if (o >= h->bh_size) {
2106: char buf[128];
2107: bus_space_render_tag(t, buf, sizeof buf);
2108: printf("\n********** bus_space_assert: bus %p:%s, offset "
2109: "(%llx) out of mapping range (%llx) ", t, buf, o,
2110: h->bh_size);
2111: }
2112:
2113: if (o & (n - 1)) {
2114: char buf[128];
2115: bus_space_render_tag(t, buf, sizeof buf);
2116: printf("\n********** bus_space_assert: bus %p:%s, offset "
2117: "(%llx) incorrect alignment (%d) ", t, buf, o, n);
2118: }
2119: }
2120:
2121: #endif /* BUS_SPACE_DEBUG */
2122:
2123: struct blink_led_softc {
2124: SLIST_HEAD(, blink_led) bls_head;
2125: int bls_on;
2126: struct timeout bls_to;
2127: } blink_sc = { SLIST_HEAD_INITIALIZER(bls_head), 0 };
2128:
2129: void
2130: blink_led_register(struct blink_led *l)
2131: {
2132: if (SLIST_EMPTY(&blink_sc.bls_head)) {
2133: timeout_set(&blink_sc.bls_to, blink_led_timeout, &blink_sc);
2134: blink_sc.bls_on = 0;
2135: if (sparc_led_blink)
2136: timeout_add(&blink_sc.bls_to, 1);
2137: }
2138: SLIST_INSERT_HEAD(&blink_sc.bls_head, l, bl_next);
2139: }
2140:
2141: void
2142: blink_led_timeout(void *vsc)
2143: {
2144: struct blink_led_softc *sc = &blink_sc;
2145: struct blink_led *l;
2146: int t;
2147:
2148: if (SLIST_EMPTY(&sc->bls_head))
2149: return;
2150:
2151: SLIST_FOREACH(l, &sc->bls_head, bl_next) {
2152: (*l->bl_func)(l->bl_arg, sc->bls_on);
2153: }
2154: sc->bls_on = !sc->bls_on;
2155:
2156: if (!sparc_led_blink)
2157: return;
2158:
2159: /*
2160: * Blink rate is:
2161: * full cycle every second if completely idle (loadav = 0)
2162: * full cycle every 2 seconds if loadav = 1
2163: * full cycle every 3 seconds if loadav = 2
2164: * etc.
2165: */
2166: t = (((averunnable.ldavg[0] + FSCALE) * hz) >> (FSHIFT + 1));
2167: timeout_add(&sc->bls_to, t);
2168: }
CVSweb