Skip to content

Commit

Permalink
x86/dma_bounce: revert r289834 and r289836
Browse files Browse the repository at this point in the history
The new load_ma implementation can cause dereferences when used with
certain drivers, back it out until the reason is found:

Fatal trap 12: page fault while in kernel mode
cpuid = 11; apic id = 03
fault virtual address   = 0x30
fault code              = supervisor read data, page not present
instruction pointer     = 0x20:0xffffffff808a2d22
stack pointer           = 0x28:0xfffffe07cc737710
frame pointer           = 0x28:0xfffffe07cc737790
code segment            = base 0x0, limit 0xfffff, type 0x1b
                        = DPL 0, pres 1, long 1, def32 0, gran 1
processor eflags        = interrupt enabled, resume, IOPL = 0
current process         = 13 (g_down)
trap number             = 12
panic: page fault
cpuid = 11
KDB: stack backtrace:
#0 0xffffffff80641647 at kdb_backtrace+0x67
#1 0xffffffff80606762 at vpanic+0x182
#2 0xffffffff806067e3 at panic+0x43
#3 0xffffffff8084eef1 at trap_fatal+0x351
#4 0xffffffff8084f0e4 at trap_pfault+0x1e4
#5 0xffffffff8084e82f at trap+0x4bf
#6 0xffffffff80830d57 at calltrap+0x8
#7 0xffffffff8063beab at _bus_dmamap_load_ccb+0x1fb
#8 0xffffffff8063bc51 at bus_dmamap_load_ccb+0x91
#9 0xffffffff8042dcad at ata_dmaload+0x11d
#10 0xffffffff8042df7e at ata_begin_transaction+0x7e
#11 0xffffffff8042c18e at ataaction+0x9ce
#12 0xffffffff802a220f at xpt_run_devq+0x5bf
#13 0xffffffff802a17ad at xpt_action_default+0x94d
#14 0xffffffff802c0024 at adastart+0x8b4
#15 0xffffffff802a2e93 at xpt_run_allocq+0x193
#16 0xffffffff802c0735 at adastrategy+0xf5
#17 0xffffffff80554206 at g_disk_start+0x426
Uptime: 2m29s
  • Loading branch information
royger authored and royger committed Oct 26, 2015
1 parent 7da4fec commit f25e305
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 193 deletions.
12 changes: 5 additions & 7 deletions sys/dev/xen/blkfront/blkfront.c
Original file line number Diff line number Diff line change
Expand Up @@ -293,12 +293,8 @@ xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
{
int error;

if (cm->cm_bp != NULL)
error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map,
cm->cm_bp, xbd_queue_cb, cm, 0);
else
error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map,
cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0);
error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, cm->cm_data,
cm->cm_datalen, xbd_queue_cb, cm, 0);
if (error == EINPROGRESS) {
/*
* Maintain queuing order by freezing the queue. The next
Expand Down Expand Up @@ -358,6 +354,8 @@ xbd_bio_command(struct xbd_softc *sc)
}

cm->cm_bp = bp;
cm->cm_data = bp->bio_data;
cm->cm_datalen = bp->bio_bcount;
cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno;

switch (bp->bio_cmd) {
Expand Down Expand Up @@ -1011,7 +1009,7 @@ xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,

sc->xbd_disk->d_mediasize = sectors * sector_size;
sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO;
sc->xbd_disk->d_flags = 0;
if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) {
sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
device_printf(sc->xbd_dev,
Expand Down
202 changes: 16 additions & 186 deletions sys/x86/x86/busdma_bounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
vm_page_t datapage; /* physical page of client data */
vm_offset_t dataoffs; /* page offset of client data */
vm_page_t datapage[2]; /* physical page(s) of client data */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
Expand Down Expand Up @@ -135,8 +135,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_addr_t addr1,
bus_addr_t addr2, bus_size_t size);
vm_offset_t vaddr, bus_addr_t addr,
bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
Expand Down Expand Up @@ -527,51 +527,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
}
}

static void
_bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
int ma_offs, bus_size_t buflen, int flags)
{
bus_size_t sg_len, max_sgsize;
int page_index;
vm_paddr_t paddr;

if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->common.lowaddr,
ptoa((vm_paddr_t)Maxmem),
dmat->common.boundary, dmat->common.alignment);
CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
map, &nobounce_dmamap, map->pagesneeded);

/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
page_index = 0;
while (buflen > 0) {
paddr = ma[page_index]->phys_addr + ma_offs;
sg_len = PAGE_SIZE - ma_offs;
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sg_len = MIN(sg_len, max_sgsize);
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
sg_len = roundup2(sg_len,
dmat->common.alignment);
sg_len = MIN(sg_len, max_sgsize);
KASSERT((sg_len & (dmat->common.alignment - 1))
== 0, ("Segment size is not aligned"));
map->pagesneeded++;
}
if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
page_index++;
ma_offs = (ma_offs + sg_len) & PAGE_MASK;
KASSERT(buflen >= sg_len,
("Segment length overruns original buffer"));
buflen -= sg_len;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}

static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
{
Expand Down Expand Up @@ -677,7 +632,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
Expand Down Expand Up @@ -746,7 +701,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
sgsize);
} else {
sgsize = MIN(sgsize, max_sgsize);
Expand All @@ -765,90 +720,6 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}

static int
bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
bus_dma_segment_t *segs, int *segp)
{
vm_paddr_t paddr, next_paddr;
int error, page_index;
struct vm_page *page;
bus_size_t sgsize, max_sgsize;

if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
/*
* If we have to keep the offset of each page this function
* is not suitable, switch back to bus_dmamap_load_ma_triv
* which is going to do the right thing in this case.
*/
error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
flags, segs, segp);
return (error);
}

if (map == NULL)
map = &nobounce_dmamap;

if (segs == NULL)
segs = dmat->segments;

if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}

page_index = 0;
page = ma[0];
while (buflen > 0) {
/*
* Compute the segment size, and adjust counts.
*/
page = ma[page_index];
paddr = page->phys_addr + ma_offs;
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sgsize = PAGE_SIZE - ma_offs;
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, paddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
("Segment size is not aligned"));
/*
* Check if two pages of the user provided buffer
* are used.
*/
if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
next_paddr = ma[page_index + 1]->phys_addr;
else
next_paddr = 0;
paddr = add_bounce_page(dmat, map, 0, paddr,
next_paddr, sgsize);
} else {
sgsize = MIN(sgsize, max_sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
KASSERT(buflen >= sgsize,
("Segment length overruns original buffer"));
buflen -= sgsize;
if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
page_index++;
ma_offs = (ma_offs + sgsize) & PAGE_MASK;
}

/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}

static void
bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
Expand Down Expand Up @@ -892,7 +763,6 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
{
struct bounce_page *bpage;
vm_offset_t datavaddr, tempvaddr;
bus_size_t datacount1, datacount2;

if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL)
return;
Expand All @@ -908,35 +778,17 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
while (bpage != NULL) {
tempvaddr = 0;
datavaddr = bpage->datavaddr;
datacount1 = bpage->datacount;
if (datavaddr == 0) {
tempvaddr =
pmap_quick_enter_page(bpage->datapage[0]);
pmap_quick_enter_page(bpage->datapage);
datavaddr = tempvaddr | bpage->dataoffs;
datacount1 = min(PAGE_SIZE - bpage->dataoffs,
datacount1);
}

bcopy((void *)datavaddr,
(void *)bpage->vaddr, datacount1);
(void *)bpage->vaddr, bpage->datacount);

if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);

if (bpage->datapage[1] == 0)
goto next_w;

/*
* We are dealing with an unmapped buffer that expands
* over two pages.
*/
datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
datacount2 = bpage->datacount - datacount1;
bcopy((void *)datavaddr,
(void *)(bpage->vaddr + datacount1), datacount2);
pmap_quick_remove_page(datavaddr);

next_w:
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
Expand All @@ -946,35 +798,17 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
while (bpage != NULL) {
tempvaddr = 0;
datavaddr = bpage->datavaddr;
datacount1 = bpage->datacount;
if (datavaddr == 0) {
tempvaddr =
pmap_quick_enter_page(bpage->datapage[0]);
pmap_quick_enter_page(bpage->datapage);
datavaddr = tempvaddr | bpage->dataoffs;
datacount1 = min(PAGE_SIZE - bpage->dataoffs,
datacount1);
}

bcopy((void *)bpage->vaddr, (void *)datavaddr,
datacount1);
bcopy((void *)bpage->vaddr,
(void *)datavaddr, bpage->datacount);

if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);

if (bpage->datapage[1] == 0)
goto next_r;

/*
* We are dealing with an unmapped buffer that expands
* over two pages.
*/
datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
datacount2 = bpage->datacount - datacount1;
bcopy((void *)(bpage->vaddr + datacount1),
(void *)datavaddr, datacount2);
pmap_quick_remove_page(datavaddr);

next_r:
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
Expand Down Expand Up @@ -1138,7 +972,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)

static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_addr_t addr1, bus_addr_t addr2, bus_size_t size)
bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
Expand Down Expand Up @@ -1168,16 +1002,12 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,

if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
/* Page offset needs to be preserved. */
bpage->vaddr |= addr1 & PAGE_MASK;
bpage->busaddr |= addr1 & PAGE_MASK;
KASSERT(addr2 == 0,
("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
bpage->vaddr |= addr & PAGE_MASK;
bpage->busaddr |= addr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
bpage->dataoffs = addr1 & PAGE_MASK;
bpage->datapage = PHYS_TO_VM_PAGE(addr);
bpage->dataoffs = addr & PAGE_MASK;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
Expand Down Expand Up @@ -1249,7 +1079,7 @@ struct bus_dma_impl bus_dma_bounce_impl = {
.mem_free = bounce_bus_dmamem_free,
.load_phys = bounce_bus_dmamap_load_phys,
.load_buffer = bounce_bus_dmamap_load_buffer,
.load_ma = bounce_bus_dmamap_load_ma,
.load_ma = bus_dmamap_load_ma_triv,
.map_waitok = bounce_bus_dmamap_waitok,
.map_complete = bounce_bus_dmamap_complete,
.map_unload = bounce_bus_dmamap_unload,
Expand Down

0 comments on commit f25e305

Please sign in to comment.