if (io_32bit) {
#if SUPPORT_VLB_SYNC
if (io_32bit & 2) {
unsigned long flags;
__save_flags(flags); /* local CPU only */
__cli(); /* local CPU only */
do_vlb_sync(IDE_NSECTOR_REG);
outsl(IDE_DATA_REG, buffer, wcount);
__restore_flags(flags); /* local CPU only */
} else
#endif /* SUPPORT_VLB_SYNC */
outsl(IDE_DATA_REG, buffer, wcount);
} else {
#if SUPPORT_SLOW_DATA_PORTS
if (drive->;slow) {
unsigned short *ptr = (unsigned short *) buffer;
while (wcount--) {
outw_p(*ptr++, IDE_DATA_REG);
outw_p(*ptr++, IDE_DATA_REG);
}
} else
#endif /* SUPPORT_SLOW_DATA_PORTS */
outsw(IDE_DATA_REG, buffer, wcount<<1);
}
}
int ide_multwrite (ide_drive_t *drive, unsigned int mcount)
{
ide_hwgroup_t *hwgroup= HWGROUP(drive);
/*
* This may look a bit odd, but remember wrq is a copy of the
* request not the original. The pointers are real however so the
* bh's are not copies. Remember that or bad stuff will happen
*
* At the point we are called the drive has asked us for the
* data, and its our job to feed it, walking across bh boundaries
* if need be.
*/
struct request *rq = &hwgroup->;wrq;
do {
unsigned long flags;
unsigned int nsect = rq->;current_nr_sectors;
if (nsect >; mcount)
nsect = mcount;
mcount -= nsect;
; 这时mcount为剩余的必需传送的扇区数
idedisk_output_data(drive, rq->;buffer, nsect<<7);
spin_lock_irqsave(&io_request_lock, flags); /* Is this really necessary? */
#ifdef CONFIG_BLK_DEV_PDC4030
rq->;sector += nsect;
#endif
if (((long)(rq->;nr_sectors -= nsect)) <= 0)
spin_unlock_irqrestore(&io_request_lock, flags);
break;
}
if ((rq->;current_nr_sectors -= nsect) == 0) {
if ((rq->;bh = rq->;bh->;b_reqnext) != NULL) {{
rq->;current_nr_sectors = rq->;bh->;b_size>;>;9;
rq->;buffer = rq->;bh->;b_data;
} else {
spin_unlock_irqrestore(&io_request_lock, flags);
printk("%s: buffer list corrupted (%ld, %ld, %d)\n",
drive->;name, rq->;current_nr_sectors,
rq->;nr_sectors, nsect);
ide_end_request(0, hwgroup);
return 1;
}
} else {
/* Fix the pointer.. we ate data */
rq->;buffer += nsect << 9;
}
spin_unlock_irqrestore(&io_request_lock, flags);
} while (mcount);
return 0;
}
if (!ide_ack_intr(hwif)) {
spin_unlock_irqrestore(&io_request_lock, flags);
return;
}
if ((handler = hwgroup->;handler) == NULL || hwgroup->;poll_timeout != 0) {
/*
* Not expecting an interrupt from this drive.
* That means this could be:
* (1) an interrupt from another PCI device
* sharing the same PCI INT# as us.
* or (2) a drive just entered sleep or standby mode,
* and is interrupting to let us know.
* or (3) a spurious interrupt of unknown origin.
*
* For PCI, we cannot tell the difference,
* so in that case we just ignore it and hope it goes away.
*/
#ifdef CONFIG_BLK_DEV_IDEPCI
if (IDE_PCI_DEVID_EQ(hwif->;pci_devid, IDE_PCI_DEVID_NULL))
#endif /* CONFIG_BLK_DEV_IDEPCI */
{
/*
* Probably not a shared PCI interrupt,
* so we can safely try to do something about it:
*/
unexpected_intr(irq, hwgroup);
#ifdef CONFIG_BLK_DEV_IDEPCI
} else {
/*
* Whack the status register, just in case we have a leftover pending IRQ.
*/
(void) IN_BYTE(hwif->;io_ports[IDE_STATUS_OFFSET]);
#endif /* CONFIG_BLK_DEV_IDEPCI */
}
spin_unlock_irqrestore(&io_request_lock, flags);
return;
}
drive = hwgroup->;drive;
if (!drive) {
/*
* This should NEVER happen, and there isn't much we could do about it here.
*/
spin_unlock_irqrestore(&io_request_lock, flags);
return;
}
if (!drive_is_ready(drive)) {
/*
* This happens regularly when we share a PCI IRQ with another device.
* Unfortunately, it can also happen with some buggy drives that trigger
* the IRQ before their status register is up to date. Hopefully we have
* enough advance overhead that the latter isn't a problem.
*/
spin_unlock_irqrestore(&io_request_lock, flags);
return;
}
if (!hwgroup->;busy) {
hwgroup->;busy = 1; /* paranoia */
printk("%s: ide_intr: hwgroup->;busy was 0 ??\n", drive->;name);
}
hwgroup->;handler = NULL;
del_timer(&hwgroup->;timer);
spin_unlock(&io_request_lock);
if (drive->;unmask)
ide__sti(); /* local CPU only */
startstop = handler(drive); /* service this interrupt, may set handler for next interrupt */
spin_lock_irq(&io_request_lock);
/*
* Note that handler() may have set things up for another
* interrupt to occur soon, but it cannot happen until
* we exit from this routine, because it will be the
* same irq as is currently being serviced here, and Linux
* won't allow another of the same (on any CPU) until we return.
*/
set_recovery_timer(HWIF(drive));
drive->;service_time = jiffies - drive->;service_start;
if (startstop == ide_stopped) {
if (hwgroup->;handler == NULL) { /* paranoia */
hwgroup->;busy = 0;
ide_do_request(hwgroup, hwif->;irq);
} else {
printk("%s: ide_intr: huh? expected NULL handler on exit\n", drive->;name);
}
}
spin_unlock_irqrestore(&io_request_lock, flags);
}
; 单个扇区写入之后的中断处理
static ide_startstop_t write_intr (ide_drive_t *drive)
{
byte stat;
int i;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
struct request *rq = hwgroup->;rq;
if (!OK_STAT(stat=GET_STAT(),DRIVE_READY,drive->;bad_wstat)) {
printk("%s: write_intr error1: nr_sectors=%ld, stat=0x%02x\n", drive->;name, rq->;nr_sectors, stat);
} else {
if ((rq->;nr_sectors == 1) ^ ((stat & DRQ_STAT) != 0)) {
rq->;sector++;
rq->;buffer += 512;
rq->;errors = 0;
i = --rq->;nr_sectors;
--rq->;current_nr_sectors;
if (((long)rq->;current_nr_sectors) <= 0)
ide_end_request(1, hwgroup);
if (i >; 0) {
idedisk_output_data (drive, rq->;buffer, SECTOR_WORDS);
ide_set_handler (drive, &write_intr, WAIT_CMD, NULL);
return ide_started;
}
return ide_stopped;
}
return ide_stopped; /* the original code did this here (?) */
}
return ide_error(drive, "write_intr", stat);
}
; 多重扇区写入后的中断处理
static ide_startstop_t multwrite_intr (ide_drive_t *drive)
{
byte stat;
int i;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
struct request *rq = &hwgroup->;wrq;
if (OK_STAT(stat=GET_STAT(),DRIVE_READY,drive->;bad_wstat)) {
if (stat & DRQ_STAT) {
/*
* The drive wants data. Remember rq is the copy
* of the request
*/
if (rq->;nr_sectors) {
if (ide_multwrite(drive, drive->;mult_count))
return ide_stopped;
ide_set_handler (drive, &multwrite_intr, WAIT_CMD, NULL);
return ide_started;
}
} else {
/*
* If the copy has all the blocks completed then
* we can end the original request.
*/
if (!rq->;nr_sectors) { /* all done? */
rq = hwgroup->;rq;
for (i = rq->;nr_sectors; i >; 0{
i -= rq->;current_nr_sectors;
ide_end_request(1, hwgroup);
}
return ide_stopped;
}
}
return ide_stopped; /* the original code did this here (?) */
}
return ide_error(drive, "multwrite_intr", stat);
}
; 读扇区的中断处理
static ide_startstop_t read_intr (ide_drive_t *drive)
{
byte stat;
int i;
unsigned int msect, nsect;
struct request *rq;
/* new way for dealing with premature shared PCI interrupts */
if (!OK_STAT(stat=GET_STAT(),DATA_READY,BAD_R_STAT)) {
if (stat & (ERR_STAT|DRQ_STAT)) {
return ide_error(drive, "read_intr", stat);
}
/* no data yet, so wait for another interrupt */
ide_set_handler(drive, &read_intr, WAIT_CMD, NULL);
return ide_started;
}
msect = drive->;mult_count;
if (io_32bit) {
#if SUPPORT_VLB_SYNC
if (io_32bit & 2) {
unsigned long flags;
__save_flags(flags); /* local CPU only */
__cli(); /* local CPU only */
do_vlb_sync(IDE_NSECTOR_REG);
insl(IDE_DATA_REG, buffer, wcount);
__restore_flags(flags); /* local CPU only */
} else
#endif /* SUPPORT_VLB_SYNC */
insl(IDE_DATA_REG, buffer, wcount);
} else {
#if SUPPORT_SLOW_DATA_PORTS
if (drive->;slow) {
unsigned short *ptr = (unsigned short *) buffer;
while (wcount--) {
*ptr++ = inw_p(IDE_DATA_REG);
*ptr++ = inw_p(IDE_DATA_REG);
}
} else
#endif /* SUPPORT_SLOW_DATA_PORTS */
insw(IDE_DATA_REG, buffer, wcount<<1);
}
}
/*
* Request may not have originated from ll_rw_blk. if not,
* asumme it has free buffers and check waiters
*/
if (q) {
/*
* we've released enough buffers to start I/O again
*/
if (waitqueue_active(&blk_buffers_wait)
&& atomic_read(&queued_sectors) < low_queued_sectors)
wake_up(&blk_buffers_wait);
/*
* Add to pending free list and batch wakeups
*/
list_add(&req->;table, &q->;pending_freelist[rw]);