Index: intio.c =================================================================== RCS file: /cvsroot/src/sys/arch/x68k/dev/intio.c,v retrieving revision 1.43 diff -u -p -r1.43 intio.c --- intio.c 27 Jan 2012 18:53:06 -0000 1.43 +++ intio.c 4 Oct 2012 13:07:53 -0000 @@ -30,6 +30,8 @@ * NetBSD/x68k internal I/O virtual bus. */ +#include "opt_m68k_arch.h" + #include __KERNEL_RCSID(0, "$NetBSD: intio.c,v 1.43 2012/01/27 18:53:06 para Exp $"); @@ -42,6 +44,7 @@ __KERNEL_RCSID(0, "$NetBSD: intio.c,v 1. #include #include +#include #include #include @@ -673,6 +676,12 @@ _intio_bus_dmamap_sync(bus_dma_tag_t t, bus_size_t len, int ops) { struct intio_dma_cookie *cookie = map->x68k_dm_cookie; +#if defined(M68040) || defined(M68060) + bus_addr_t p, e, ps, pe; + bus_size_t seglen; + bus_dma_segment_t *seg; + int i; +#endif /* * Mixing PRE and POST operations is not allowed. @@ -691,10 +700,10 @@ _intio_bus_dmamap_sync(bus_dma_tag_t t, #endif /* - * If we're not bouncing, just return; nothing to do. + * Handle bouncing ops. */ - if ((cookie->id_flags & ID_IS_BOUNCING) == 0) - return; + if ((cookie->id_flags & ID_IS_BOUNCING) != 0) + { switch (cookie->id_buftype) { case ID_BUFTYPE_LINEAR: @@ -792,6 +801,206 @@ _intio_bus_dmamap_sync(bus_dma_tag_t t, printf("unknown buffer type %d\n", cookie->id_buftype); panic("_intio_bus_dmamap_sync"); } + } + + /* + * Handle cache ops + */ +#if defined(M68020) || defined(M68030) +#if defined(M68040) || defined(M68060) + if (cputype == CPU_68020 || cputype == CPU_68030) +#endif + /* assume no L2 physical cache */ + return; +#endif + +#if defined(M68040) || defined(M68060) +#if 0 /* no COHERENT flag on x68k */ + /* If the whole DMA map is uncached, do nothing. */ + if ((map->_dm_flags & BUS_DMA_COHERENT) != 0) + return; +#endif + + /* Short-circuit for unsupported `ops' */ + if ((ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) == 0) + return; + + /* + * flush/purge the cache. + */ + for (i = 0; i < map->dm_nsegs && len != 0; i++) { + seg = &map->dm_segs[i]; + if (seg->ds_len <= offset) { + /* Segment irrelevant - before requested offset */ + offset -= seg->ds_len; + continue; + } + + /* + * Now at the first segment to sync; nail + * each segment until we have exhausted the + * length. + */ + seglen = seg->ds_len - offset; + if (seglen > len) + seglen = len; + +#if 0 + /* Ignore cache-inhibited segments */ + if ((seg->_ds_flags & BUS_DMA_COHERENT) != 0) + continue; +#endif + + ps = seg->ds_addr + offset; + pe = ps + seglen; + + if (ops & BUS_DMASYNC_PREWRITE) { + p = ps & ~CACHELINE_MASK; + e = (pe + CACHELINE_MASK) & ~CACHELINE_MASK; + + /* flush cacheline */ + while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) { + DCFL(p); + p += CACHELINE_SIZE; + } + + /* flush cachelines per 128bytes */ + while ((p < e) && (p & PAGE_MASK) != 0) { + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + } + + /* flush page */ + while (p + PAGE_SIZE <= e) { + DCFP(p); + p += PAGE_SIZE; + } + + /* flush cachelines per 128bytes */ + while (p + CACHELINE_SIZE * 8 <= e) { + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + DCFL(p); + p += CACHELINE_SIZE; + } + + /* flush cacheline */ + while (p < e) { + DCFL(p); + p += CACHELINE_SIZE; + } + } + + /* + * Normally, the `PREREAD' flag instructs us to purge the + * cache for the specified offset and length. However, if + * the offset/length is not aligned to a cacheline boundary, + * we may end up purging some legitimate data from the + * start/end of the cache. In such a case, *flush* the + * cachelines at the start and end of the required region. + */ + else if (ops & BUS_DMASYNC_PREREAD) { + /* flush cacheline on start boundary */ + if (ps & CACHELINE_MASK) { + DCFL(ps & ~CACHELINE_MASK); + } + + p = (ps + CACHELINE_MASK) & ~CACHELINE_MASK; + e = pe & ~CACHELINE_MASK; + + /* purge cacheline */ + while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) { + DCPL(p); + p += CACHELINE_SIZE; + } + + /* purge cachelines per 128bytes */ + while ((p < e) && (p & PAGE_MASK) != 0) { + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + } + + /* purge page */ + while (p + PAGE_SIZE <= e) { + DCPP(p); + p += PAGE_SIZE; + } + + /* purge cachelines per 128bytes */ + while (p + CACHELINE_SIZE * 8 <= e) { + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + DCPL(p); + p += CACHELINE_SIZE; + } + + /* purge cacheline */ + while (p < e) { + DCPL(p); + p += CACHELINE_SIZE; + } + + /* flush cacheline on end boundary */ + if (p < pe) { + DCFL(p); + } + } + offset = 0; + len -= seglen; + } +#endif /* defined(M68040) || defined(M68060) */ } /* Index: vs.c =================================================================== RCS file: /cvsroot/src/sys/arch/x68k/dev/vs.c,v retrieving revision 1.35 diff -u -p -r1.35 vs.c --- vs.c 23 Nov 2011 23:07:30 -0000 1.35 +++ vs.c 4 Oct 2012 13:07:53 -0000 @@ -257,29 +257,43 @@ static int vs_dmaintr(void *hdl) { struct vs_softc *sc; + struct vs_dma *vd; DPRINTF(2, ("vs_dmaintr\n")); sc = hdl; + vd = sc->sc_dmas; mutex_spin_enter(&sc->sc_intr_lock); if (sc->sc_pintr) { + bus_dmamap_sync(vd->vd_dmat, vd->vd_map, + sc->sc_current.dmap, sc->sc_current.blksize, + BUS_DMASYNC_POSTWRITE); /* start next transfer */ sc->sc_current.dmap += sc->sc_current.blksize; if (sc->sc_current.dmap + sc->sc_current.blksize > sc->sc_current.bufsize) sc->sc_current.dmap -= sc->sc_current.bufsize; + bus_dmamap_sync(vd->vd_dmat, vd->vd_map, + sc->sc_current.dmap, sc->sc_current.blksize, + BUS_DMASYNC_PREWRITE); dmac_start_xfer_offset(sc->sc_dma_ch->ch_softc, sc->sc_current.xfer, sc->sc_current.dmap, sc->sc_current.blksize); sc->sc_pintr(sc->sc_parg); } else if (sc->sc_rintr) { + bus_dmamap_sync(vd->vd_dmat, vd->vd_map, + sc->sc_current.dmap, sc->sc_current.blksize, + BUS_DMASYNC_POSTREAD); /* start next transfer */ sc->sc_current.dmap += sc->sc_current.blksize; if (sc->sc_current.dmap + sc->sc_current.blksize > sc->sc_current.bufsize) sc->sc_current.dmap -= sc->sc_current.bufsize; + bus_dmamap_sync(vd->vd_dmat, vd->vd_map, + sc->sc_current.dmap, sc->sc_current.blksize, + BUS_DMASYNC_PREREAD); dmac_start_xfer_offset(sc->sc_dma_ch->ch_softc, sc->sc_current.xfer, sc->sc_current.dmap, @@ -544,6 +558,8 @@ vs_trigger_output(void *hdl, void *start xf->dx_device = sc->sc_addr + MSM6258_DATA*2 + 1; dmac_load_xfer(chan->ch_softc, xf); + bus_dmamap_sync(vd->vd_dmat, vd->vd_map, 0, sc->sc_current.blksize, + BUS_DMASYNC_PREWRITE); dmac_start_xfer_offset(chan->ch_softc, xf, 0, sc->sc_current.blksize); bus_space_write_1(sc->sc_iot, sc->sc_ioh, MSM6258_STAT, 2); @@ -591,6 +607,8 @@ vs_trigger_input(void *hdl, void *start, xf->dx_device = sc->sc_addr + MSM6258_DATA*2 + 1; dmac_load_xfer(chan->ch_softc, xf); + bus_dmamap_sync(vd->vd_dmat, vd->vd_map, 0, sc->sc_current.blksize, + BUS_DMASYNC_PREREAD); dmac_start_xfer_offset(chan->ch_softc, xf, 0, sc->sc_current.blksize); bus_space_write_1(sc->sc_iot, sc->sc_ioh, MSM6258_STAT, 4);