diff options
author | 2008-04-10 16:55:11 +0000 | |
---|---|---|
committer | 2008-04-10 16:55:11 +0000 | |
commit | 8bbab3f8b4ad7c7c80a756d43ff0b6d5299785b5 (patch) | |
tree | 15a597212d1304a3ebcbb7520dff62c849830509 /sys-kernel/mactel-linux-sources/files/2.6.24-mactel-patches-r155/disk-protect.patch | |
parent | latest svn (diff) | |
download | je_fro-8bbab3f8b4ad7c7c80a756d43ff0b6d5299785b5.tar.gz je_fro-8bbab3f8b4ad7c7c80a756d43ff0b6d5299785b5.tar.bz2 je_fro-8bbab3f8b4ad7c7c80a756d43ff0b6d5299785b5.zip |
latest svn patchset
svn path=/; revision=200
Diffstat (limited to 'sys-kernel/mactel-linux-sources/files/2.6.24-mactel-patches-r155/disk-protect.patch')
-rw-r--r-- | sys-kernel/mactel-linux-sources/files/2.6.24-mactel-patches-r155/disk-protect.patch | 826 |
1 files changed, 826 insertions, 0 deletions
diff --git a/sys-kernel/mactel-linux-sources/files/2.6.24-mactel-patches-r155/disk-protect.patch b/sys-kernel/mactel-linux-sources/files/2.6.24-mactel-patches-r155/disk-protect.patch new file mode 100644 index 0000000..c7c9ae7 --- /dev/null +++ b/sys-kernel/mactel-linux-sources/files/2.6.24-mactel-patches-r155/disk-protect.patch @@ -0,0 +1,826 @@ +From: Elias Oltmanns <eo@...> + +From: Nicolas Boichat <nicolas@boichat.ch> + +Subject: disk-protect patch for kernel 2.6.24-rc3 +Newsgroups: gmane.linux.drivers.hdaps.devel +Date: 2007-11-18 16:19:24 GMT (16 weeks, 20 hours and 52 minutes ago) + +For those who can't wait. Mind you, it's compile tested only. But then +you all know about rc versions and the usual disclaimers with regard to +your precious data. +--- + + block/ll_rw_blk.c | 236 ++++++++++++++++++++++++++++++++++++++++++++- + drivers/ata/libata-scsi.c | 36 +++++++ + drivers/ide/ide-disk.c | 142 +++++++++++++++++++++++++++ + drivers/ide/ide-io.c | 14 +++ + drivers/scsi/scsi_lib.c | 169 ++++++++++++++++++++++++++++++++ + include/linux/ata.h | 12 ++ + include/linux/blkdev.h | 14 +++ + include/linux/ide.h | 1 + 8 files changed, 621 insertions(+), 3 deletions(-) + + +diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c +index 8b91994..31f0c22 100644 +--- a/block/ll_rw_blk.c ++++ b/block/ll_rw_blk.c +@@ -39,6 +39,10 @@ + + static void blk_unplug_work(struct work_struct *work); + static void blk_unplug_timeout(unsigned long data); ++static void blk_unfreeze_work(struct work_struct *work); ++static void blk_unfreeze_timeout(unsigned long data); ++static int blk_protect_register(struct request_queue *q); ++static void blk_protect_unregister(struct request_queue *q); + static void drive_stat_acct(struct request *rq, int new_io); + static void init_request_from_bio(struct request *req, struct bio *bio); + static int __make_request(struct request_queue *q, struct bio *bio); +@@ -231,6 +235,16 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) + q->unplug_timer.function = blk_unplug_timeout; + q->unplug_timer.data = (unsigned long)q; + ++ q->max_unfreeze = 30; ++ ++ INIT_WORK(&q->unfreeze_work, blk_unfreeze_work); ++ ++ q->unfreeze_timer.function = blk_unfreeze_timeout; ++ q->unfreeze_timer.data = (unsigned long)q; ++ ++ /* Set protect_method to auto detection initially */ ++ q->protect_method = 2; ++ + /* + * by default assume old behaviour and bounce for any highmem page + */ +@@ -263,6 +277,18 @@ static void rq_init(struct request_queue *q, struct request *rq) + rq->next_rq = NULL; + } + ++void blk_queue_issue_protect_fn(struct request_queue *q, issue_protect_fn *ipf) ++{ ++ q->issue_protect_fn = ipf; ++} ++EXPORT_SYMBOL(blk_queue_issue_protect_fn); ++ ++void blk_queue_issue_unprotect_fn(struct request_queue *q, issue_unprotect_fn *iuf) ++{ ++ q->issue_unprotect_fn = iuf; ++} ++EXPORT_SYMBOL(blk_queue_issue_unprotect_fn); ++ + /** + * blk_queue_ordered - does this queue support ordered writes + * @q: the request queue +@@ -1861,6 +1887,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) + } + + init_timer(&q->unplug_timer); ++ init_timer(&q->unfreeze_timer); + + kobject_set_name(&q->kobj, "%s", "queue"); + q->kobj.ktype = &queue_ktype; +@@ -4191,13 +4218,21 @@ int blk_register_queue(struct gendisk *disk) + kobject_uevent(&q->kobj, KOBJ_ADD); + + ret = elv_register_queue(q); ++ if (ret) ++ goto err; ++ ++ ret = blk_protect_register(q); + if (ret) { +- kobject_uevent(&q->kobj, KOBJ_REMOVE); +- kobject_del(&q->kobj); +- return ret; ++ elv_unregister_queue(q); ++ goto err; + } + + return 0; ++ ++err: ++ kobject_uevent(&q->kobj, KOBJ_REMOVE); ++ kobject_del(&q->kobj); ++ return ret; + } + + void blk_unregister_queue(struct gendisk *disk) +@@ -4205,6 +4240,7 @@ void blk_unregister_queue(struct gendisk *disk) + struct request_queue *q = disk->queue; + + if (q && q->request_fn) { ++ blk_protect_unregister(q); + elv_unregister_queue(q); + + kobject_uevent(&q->kobj, KOBJ_REMOVE); +@@ -4212,3 +4248,197 @@ void blk_unregister_queue(struct gendisk *disk) + kobject_put(&disk->kobj); + } + } ++ ++/* ++ * Issue lower level unprotect function if no timers are pending. ++ */ ++static void blk_unfreeze_work(struct work_struct *work) ++{ ++ struct request_queue *q = container_of(work, struct request_queue, unfreeze_work); ++ int pending; ++ unsigned long flags; ++ ++ spin_lock_irqsave(q->queue_lock, flags); ++ pending = timer_pending(&q->unfreeze_timer); ++ spin_unlock_irqrestore(q->queue_lock, flags); ++ if (!pending) ++ q->issue_unprotect_fn(q); ++} ++ ++/* ++ * Called when the queue freeze timeout expires... ++ */ ++static void blk_unfreeze_timeout(unsigned long data) ++{ ++ struct request_queue *q = (struct request_queue *) data; ++ ++ kblockd_schedule_work(&q->unfreeze_work); ++} ++ ++/* ++ * The lower level driver parks and freezes the queue, and this block layer ++ * function sets up the freeze timeout timer on return. If the queue is ++ * already frozen then this is called to extend the timer... ++ */ ++void blk_freeze_queue(struct request_queue *q, int seconds) ++{ ++ /* Don't accept arbitrarily long freezes */ ++ if (seconds >= q->max_unfreeze) ++ seconds = q->max_unfreeze; ++ /* set/reset the timer */ ++ mod_timer(&q->unfreeze_timer, msecs_to_jiffies(seconds*1000) + jiffies); ++} ++ ++/* ++ * When reading the 'protect' attribute, we return seconds remaining ++ * before unfreeze timeout expires ++ */ ++static ssize_t queue_protect_show(struct request_queue *q, char *page) ++{ ++ unsigned int seconds = 0; ++ ++ spin_lock_irq(q->queue_lock); ++ if (blk_queue_stopped(q) && timer_pending(&q->unfreeze_timer)) ++ /* ++ * Adding 1 in order to guarantee nonzero value until timer ++ * has actually expired. ++ */ ++ seconds = jiffies_to_msecs(q->unfreeze_timer.expires ++ - jiffies) / 1000 + 1; ++ spin_unlock_irq(q->queue_lock); ++ return queue_var_show(seconds, (page)); ++} ++ ++/* ++ * When writing the 'protect' attribute, input is the number of seconds ++ * to freeze the queue for. We call a lower level helper function to ++ * park the heads and freeze/block the queue, then we make a block layer ++ * call to setup the thaw timeout. If input is 0, then we thaw the queue. ++ */ ++static ssize_t queue_protect_store(struct request_queue *q, ++ const char *page, size_t count) ++{ ++ unsigned long freeze = 0; ++ ++ queue_var_store(&freeze, page, count); ++ ++ if (freeze>0) { ++ /* Park and freeze */ ++ if (!blk_queue_stopped(q)) ++ q->issue_protect_fn(q); ++ /* set / reset the thaw timer */ ++ spin_lock_irq(q->queue_lock); ++ blk_freeze_queue(q, freeze); ++ spin_unlock_irq(q->queue_lock); ++ } else { ++ spin_lock_irq(q->queue_lock); ++ freeze = del_timer(&q->unfreeze_timer); ++ spin_unlock_irq(q->queue_lock); ++ if (freeze) ++ q->issue_unprotect_fn(q); ++ } ++ ++ return count; ++} ++ ++static ssize_t ++queue_str_show(char *page, char *str, int status) ++{ ++ ssize_t len; ++ ++ if (status & 1) ++ len = sprintf(page, "[%s]", str); ++ else ++ len = sprintf(page, "%s", str); ++ if (status & 2) ++ len += sprintf(page+len, "\n"); ++ else ++ len += sprintf(page+len, " "); ++ return len; ++} ++ ++/* ++ * Returns current protect_method. ++ */ ++static ssize_t queue_protect_method_show(struct request_queue *q, char *page) ++{ ++ int len = 0; ++ int unload = q->protect_method; ++ ++ len += queue_str_show(page+len, "auto", (unload & 2) >> 1); ++ len += queue_str_show(page+len, "unload", unload & 1); ++ len += queue_str_show(page+len, "standby", !unload ? 3 : 2); ++ return len; ++} ++ ++/* ++ * Stores the device protect method. ++ */ ++static ssize_t queue_protect_method_store(struct request_queue *q, ++ const char *page, size_t count) ++{ ++ spin_lock_irq(q->queue_lock); ++ if (!strcmp(page, "auto") || !strcmp(page, "auto\n")) ++ q->protect_method = 2; ++ else if (!strcmp(page, "unload") || !strcmp(page, "unload\n")) ++ q->protect_method = 1; ++ else if (!strcmp(page, "standby") || !strcmp(page, "standby\n")) ++ q->protect_method = 0; ++ else { ++ spin_unlock_irq(q->queue_lock); ++ return -EINVAL; ++ } ++ spin_unlock_irq(q->queue_lock); ++ return count; ++} ++ ++static struct queue_sysfs_entry queue_protect_entry = { ++ .attr = { .name = "protect", .mode = S_IRUGO | S_IWUSR }, ++ .show = queue_protect_show, ++ .store = queue_protect_store, ++}; ++static struct queue_sysfs_entry queue_protect_method_entry = { ++ .attr = { .name = "protect_method", .mode = S_IRUGO | S_IWUSR }, ++ .show = queue_protect_method_show, ++ .store = queue_protect_method_store, ++}; ++ ++static int blk_protect_register(struct request_queue *q) ++{ ++ int error = 0; ++ ++ /* check that the lower level driver has a protect handler */ ++ if (!q->issue_protect_fn) ++ return 0; ++ ++ /* create the attributes */ ++ error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr); ++ if (error) { ++ printk(KERN_ERR ++ "blk_protect_register(): failed to create protect queue attribute!\n"); ++ return error; ++ } ++ ++ error = sysfs_create_file(&q->kobj, &queue_protect_method_entry.attr); ++ if (error) { ++ sysfs_remove_file(&q->kobj, &queue_protect_entry.attr); ++ printk(KERN_ERR ++ "blk_protect_register(): failed to create protect_method attribute!\n"); ++ return error; ++ } ++ kobject_get(&q->kobj); ++ ++ return 0; ++} ++ ++static void blk_protect_unregister(struct request_queue *q) ++{ ++ /* check that the lower level driver has a protect handler */ ++ if (!q->issue_protect_fn) ++ return; ++ ++ /* remove the attributes */ ++ sysfs_remove_file(&q->kobj, &queue_protect_method_entry.attr); ++ sysfs_remove_file(&q->kobj, &queue_protect_entry.attr); ++ kobject_put(&q->kobj); ++} +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index 14daf48..6b02998 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -856,6 +856,38 @@ static void ata_scsi_dev_config(struct scsi_device *sdev, + } + } + ++extern int scsi_protect_queue(struct request_queue *q, int unload); ++extern int scsi_unprotect_queue(struct request_queue *q); ++ ++static int ata_scsi_issue_protect_fn(struct request_queue *q) ++{ ++ struct scsi_device *sdev = q->queuedata; ++ struct ata_port *ap = ata_shost_to_port(sdev->host); ++ struct ata_device *dev = ata_scsi_find_dev(ap, sdev); ++ int unload = q->protect_method; ++ unsigned long flags; ++ ++ if (!dev) { ++ printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): Couldn't find ATA device to be parked.\n"); ++ return -ENXIO; ++ } ++ ++ if (unload == 2) { ++ unload = ata_id_has_unload(dev->id) ? 1 : 0; ++ spin_lock_irqsave(q->queue_lock, flags); ++ q->protect_method = unload; ++ spin_unlock_irqrestore(q->queue_lock, flags); ++ } ++ ++ /* call scsi_protect_queue, requesting either unload or standby */ ++ return scsi_protect_queue(q, unload); ++} ++ ++static int ata_scsi_issue_unprotect_fn(struct request_queue *q) ++{ ++ return scsi_unprotect_queue(q); ++} ++ + /** + * ata_scsi_slave_config - Set SCSI device attributes + * @sdev: SCSI device to examine +@@ -877,6 +909,10 @@ int ata_scsi_slave_config(struct scsi_device *sdev) + + if (dev) + ata_scsi_dev_config(sdev, dev); ++ blk_queue_issue_protect_fn(sdev->request_queue, ++ ata_scsi_issue_protect_fn); ++ blk_queue_issue_unprotect_fn(sdev->request_queue, ++ ata_scsi_issue_unprotect_fn); + + return 0; /* scsi layer doesn't check return value, sigh */ + } +diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c +index b178190..1ef536f 100644 +--- a/drivers/ide/ide-disk.c ++++ b/drivers/ide/ide-disk.c +@@ -675,6 +675,145 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) + } + + /* ++ * todo: ++ * - we freeze the queue regardless of success and rely on the ++ * ide_protect_queue function to thaw immediately if the command ++ * failed (to be consistent with the libata handler)... should ++ * we also inspect here? ++ */ ++void ide_end_protect_rq(struct request *rq, int error) ++{ ++ struct completion *waiting = rq->end_io_data; ++ ++ rq->end_io_data = NULL; ++ /* spin lock already accquired */ ++ if (!blk_queue_stopped(rq->q)) ++ blk_stop_queue(rq->q); ++ ++ complete(waiting); ++} ++ ++int ide_unprotect_queue(struct request_queue *q) ++{ ++ struct request rq; ++ unsigned long flags; ++ int pending = 0, rc = 0; ++ ide_drive_t *drive = q->queuedata; ++ u8 args[7], *argbuf = args; ++ ++ if (!blk_queue_stopped(q)) ++ return -EIO; ++ ++ /* Are there any pending jobs on the queue? */ ++ pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0; ++ ++ spin_lock_irqsave(q->queue_lock, flags); ++ blk_start_queue(q); ++ spin_unlock_irqrestore(q->queue_lock, flags); ++ ++ /* The unload feature of the IDLE_IMMEDIATE command ++ temporarily disables HD power management from spinning down ++ the disk. Any other command will reenable HD pm, so, if ++ there are no pending jobs on the queue, another ++ CHECK_POWER_MODE1 command without the unload feature should do ++ just fine. */ ++ if (!pending) { ++ printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n"); ++ memset(args, 0, sizeof(args)); ++ argbuf[0] = 0xe5; /* CHECK_POWER_MODE1 */ ++ ide_init_drive_cmd(&rq); ++ rq.cmd_type = REQ_TYPE_ATA_TASK; ++ rq.buffer = argbuf; ++ rc = ide_do_drive_cmd(drive, &rq, ide_head_wait); ++ } ++ ++ return rc; ++} ++ ++int ide_protect_queue(struct request_queue *q, int unload) ++{ ++ ide_drive_t *drive = q->queuedata; ++ struct request rq; ++ u8 args[7], *argbuf = args; ++ int ret = 0; ++ DECLARE_COMPLETION(wait); ++ ++ memset(&rq, 0, sizeof(rq)); ++ memset(args, 0, sizeof(args)); ++ ++ if (blk_queue_stopped(q)) ++ return -EIO; ++ ++ if (unload) { ++ argbuf[0] = 0xe1; ++ argbuf[1] = 0x44; ++ argbuf[3] = 0x4c; ++ argbuf[4] = 0x4e; ++ argbuf[5] = 0x55; ++ } else ++ argbuf[0] = 0xe0; ++ ++ /* Issue the park command & freeze */ ++ ide_init_drive_cmd(&rq); ++ ++ rq.cmd_type = REQ_TYPE_ATA_TASK; ++ rq.buffer = argbuf; ++ rq.end_io_data = &wait; ++ rq.end_io = ide_end_protect_rq; ++ ++ ret = ide_do_drive_cmd(drive, &rq, ide_next); ++ wait_for_completion(&wait); ++ ++ if (ret) ++ { ++ printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n"); ++ ide_unprotect_queue(q); ++ return ret; ++ } ++ ++ if (unload) { ++ if (args[3] == 0xc4) ++ printk(KERN_DEBUG "ide_protect_queue(): head parked..\n"); ++ else { ++ /* error parking the head */ ++ printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n"); ++ ret = -EIO; ++ ide_unprotect_queue(q); ++ } ++ } else ++ printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n"); ++ ++ return ret; ++} ++ ++int idedisk_issue_protect_fn(struct request_queue *q) ++{ ++ ide_drive_t *drive = q->queuedata; ++ int unload = q->protect_method; ++ unsigned long flags; ++ ++ /* ++ * Check capability of the device - ++ * - if "idle immediate with unload" is supported we use that, else ++ * we use "standby immediate" and live with spinning down the drive.. ++ * (Word 84, bit 13 of IDENTIFY DEVICE data) ++ */ ++ if (unload == 2) { ++ unload = drive->id->cfsse & (1 << 13) ? 1 : 0; ++ spin_lock_irqsave(q->queue_lock, flags); ++ q->protect_method = unload; ++ spin_unlock_irqrestore(q->queue_lock, flags); ++ } ++ ++ return ide_protect_queue(q, unload); ++} ++ ++int idedisk_issue_unprotect_fn(struct request_queue *q) ++{ ++ return ide_unprotect_queue(q); ++} ++ ++/* + * This is tightly woven into the driver->do_special can not touch. + * DON'T do it again until a total personality rewrite is committed. + */ +@@ -943,6 +1082,9 @@ static void idedisk_setup (ide_drive_t *drive) + drive->wcache = 1; + + write_cache(drive, 1); ++ ++ blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn); ++ blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn); + } + + static void ide_cacheflush_p(ide_drive_t *drive) +diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c +index bef781f..873764e 100644 +--- a/drivers/ide/ide-io.c ++++ b/drivers/ide/ide-io.c +@@ -1271,6 +1271,17 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) + } + + /* ++ * Don't accept a request when the queue is stopped (unless we ++ * are resuming from suspend). Prevents existing queue entries ++ * being processed after queue is stopped by the hard disk ++ * protection mechanism... ++ */ ++ if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) { ++ hwgroup->busy = 0; ++ break; ++ } ++ ++ /* + * Sanity: don't accept a request that isn't a PM request + * if we are currently power managed. This is very important as + * blk_stop_queue() doesn't prevent the elv_next_request() +@@ -1768,6 +1779,9 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio + where = ELEVATOR_INSERT_FRONT; + rq->cmd_flags |= REQ_PREEMPT; + } ++ if (action == ide_next) ++ where = ELEVATOR_INSERT_FRONT; ++ + __elv_add_request(drive->queue, rq, where, 0); + ide_do_request(hwgroup, IDE_NO_IRQ); + spin_unlock_irqrestore(&ide_lock, flags); +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index a9ac5b1..58049e7 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -2268,7 +2268,13 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple); + int + scsi_device_quiesce(struct scsi_device *sdev) + { ++ int i; + int err = scsi_device_set_state(sdev, SDEV_QUIESCE); ++ for (i = 0; err && (sdev->sdev_state == SDEV_BLOCK) && (i < 100); ++ i++) { ++ msleep_interruptible(200); ++ err = scsi_device_set_state(sdev, SDEV_QUIESCE); ++ } + if (err) + return err; + +@@ -2518,3 +2524,166 @@ void scsi_kunmap_atomic_sg(void *virt) + kunmap_atomic(virt, KM_BIO_SRC_IRQ); + } + EXPORT_SYMBOL(scsi_kunmap_atomic_sg); ++ ++/* ++ * Structure required for synchronous io completion after queue freezing ++ */ ++struct scsi_protect_io_context_sync { ++ struct scsi_device *sdev; ++ int result; ++ char *sense; ++ struct completion *waiting; ++}; ++ ++/* ++ * scsi_protect_wait_done() ++ * Command completion handler for scsi_protect_queue(). ++ * ++ * Unable to call scsi_internal_device_block() as ++ * scsi_end_request() already has the spinlock. So, ++ * we put the necessary functionality inline. ++ * ++ * todo: ++ * - we block the queue regardless of success and rely on the ++ * scsi_protect_queue function to unblock if the command ++ * failed... should we also inspect here? ++ */ ++static void scsi_protect_wait_done(void *data, char *sense, int result, int resid) ++{ ++ struct scsi_protect_io_context_sync *siocs = data; ++ struct completion *waiting = siocs->waiting; ++ struct request_queue *q = siocs->sdev->request_queue; ++ ++ siocs->waiting = NULL; ++ siocs->result = result; ++ memcpy(siocs->sense, sense, SCSI_SENSE_BUFFERSIZE); ++ ++ if (!scsi_device_set_state(siocs->sdev, SDEV_BLOCK)) ++ blk_stop_queue(q); ++ ++ complete(waiting); ++} ++ ++/* ++ * scsi_unprotect_queue() ++ * - release the queue that was previously blocked ++ */ ++int scsi_unprotect_queue(struct request_queue *q) ++{ ++ struct scsi_device *sdev = q->queuedata; ++ int rc = 0, pending = 0; ++ u8 scsi_cmd[MAX_COMMAND_SIZE]; ++ struct scsi_sense_hdr sshdr; ++ ++ if (sdev->sdev_state != SDEV_BLOCK) ++ return -ENXIO; ++ ++ /* Are there any pending jobs on the queue? */ ++ pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0; ++ ++ rc = scsi_internal_device_unblock(sdev); ++ if (rc) ++ return rc; ++ ++ if (!pending) { ++ printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n"); ++ ++ memset(scsi_cmd, 0, sizeof(scsi_cmd)); ++ scsi_cmd[0] = ATA_16; ++ scsi_cmd[1] = (3 << 1); /* Non-data */ ++ /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */ ++ scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */ ++ ++ /* Good values for timeout and retries? Values below ++ from scsi_ioctl_send_command() for default case... */ ++ if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr, ++ (10*HZ), 5)) ++ rc = -EIO; ++ } ++ return rc; ++} ++EXPORT_SYMBOL_GPL(scsi_unprotect_queue); ++ ++/* ++ * scsi_protect_queue() ++ * - build and issue the park/standby command.. ++ * - queue is blocked during command completion handler ++ */ ++int scsi_protect_queue(struct request_queue *q, int unload) ++{ ++ struct scsi_protect_io_context_sync siocs; ++ struct scsi_device *sdev = q->queuedata; ++ int rc = 0; ++ u8 args[7]; ++ u8 scsi_cmd[MAX_COMMAND_SIZE]; ++ unsigned char sense[SCSI_SENSE_BUFFERSIZE]; ++ unsigned char *desc; ++ DECLARE_COMPLETION_ONSTACK(wait); ++ ++ if (sdev->sdev_state != SDEV_RUNNING) ++ return -ENXIO; ++ ++ memset(args, 0, sizeof(args)); ++ memset(sense, 0, sizeof(sense)); ++ ++ if (unload) { ++ args[0] = 0xe1; ++ args[1] = 0x44; ++ args[3] = 0x4c; ++ args[4] = 0x4e; ++ args[5] = 0x55; ++ } else ++ args[0] = 0xe0; ++ ++ memset(scsi_cmd, 0, sizeof(scsi_cmd)); ++ scsi_cmd[0] = ATA_16; ++ scsi_cmd[1] = (3 << 1); /* Non-data */ ++ scsi_cmd[2] = 0x20; /* no off.line, or data xfer, request cc */ ++ scsi_cmd[4] = args[1]; ++ scsi_cmd[6] = args[2]; ++ scsi_cmd[8] = args[3]; ++ scsi_cmd[10] = args[4]; ++ scsi_cmd[12] = args[5]; ++ scsi_cmd[14] = args[0]; ++ siocs.sdev = sdev; ++ siocs.sense = sense; ++ siocs.waiting = &wait; ++ ++ scsi_execute_async(sdev, scsi_cmd, COMMAND_SIZE(scsi_cmd[0]), ++ DMA_NONE, NULL, 0, 0, (10*HZ), 5, ++ &siocs, &scsi_protect_wait_done, GFP_NOWAIT); ++ wait_for_completion(&wait); ++ ++ if (siocs.result != ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) { ++ printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n"); ++ scsi_unprotect_queue(q); /* just in case we still managed to block */ ++ rc = -EIO; ++ goto out; ++ } ++ ++ desc = sense + 8; ++ ++ /* Retrieve data from check condition */ ++ args[1] = desc[3]; ++ args[2] = desc[5]; ++ args[3] = desc[7]; ++ args[4] = desc[9]; ++ args[5] = desc[11]; ++ args[0] = desc[13]; ++ ++ if (unload) { ++ if (args[3] == 0xc4) ++ printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n"); ++ else { ++ /* error parking the head */ ++ printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n"); ++ rc = -EIO; ++ scsi_unprotect_queue(q); ++ } ++ } else ++ printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n"); ++ ++out: ++ return rc; ++} ++EXPORT_SYMBOL_GPL(scsi_protect_queue); +diff --git a/include/linux/ata.h b/include/linux/ata.h +index e672e80..aa16cbb 100644 +--- a/include/linux/ata.h ++++ b/include/linux/ata.h +@@ -395,6 +395,18 @@ struct ata_taskfile { + + #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) + ++static inline int ata_id_has_unload(const u16 *id) ++{ ++ /* ATA-7 specifies two places to indicate unload feature support. ++ * Since I don't really understand the difference, I'll just check ++ * both and only return zero if none of them indicates otherwise. */ ++ if ((id[84] & 0xC000) == 0x4000 && id[84] & (1 << 13)) ++ return id[84] & (1 << 13); ++ if ((id[87] & 0xC000) == 0x4000) ++ return id[87] & (1 << 13); ++ return 0; ++} ++ + static inline bool ata_id_has_hipm(const u16 *id) + { + u16 val = id[76]; +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index d18ee67..e10f40b 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -332,6 +332,8 @@ struct bio_vec; + typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); + typedef void (prepare_flush_fn) (struct request_queue *, struct request *); + typedef void (softirq_done_fn)(struct request *); ++typedef int (issue_protect_fn) (struct request_queue *); ++typedef int (issue_unprotect_fn) (struct request_queue *); + + enum blk_queue_state { + Queue_down, +@@ -368,6 +370,8 @@ struct request_queue + merge_bvec_fn *merge_bvec_fn; + prepare_flush_fn *prepare_flush_fn; + softirq_done_fn *softirq_done_fn; ++ issue_protect_fn *issue_protect_fn; ++ issue_unprotect_fn *issue_unprotect_fn; + + /* + * Dispatch queue sorting +@@ -383,6 +387,14 @@ struct request_queue + unsigned long unplug_delay; /* After this many jiffies */ + struct work_struct unplug_work; + ++ /* ++ * Auto-unfreeze state ++ */ ++ struct timer_list unfreeze_timer; ++ int max_unfreeze; /* At most this many seconds */ ++ struct work_struct unfreeze_work; ++ int protect_method; ++ + struct backing_dev_info backing_dev_info; + + /* +@@ -773,6 +785,8 @@ extern int blk_do_ordered(struct request_queue *, struct request **); + extern unsigned blk_ordered_cur_seq(struct request_queue *); + extern unsigned blk_ordered_req_seq(struct request *); + extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); ++extern void blk_queue_issue_protect_fn(struct request_queue *, issue_protect_fn *); ++extern void blk_queue_issue_unprotect_fn(struct request_queue *, issue_unprotect_fn *); + + extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); + extern void blk_dump_rq_flags(struct request *, char *); +diff --git a/include/linux/ide.h b/include/linux/ide.h +index 9a6a41e..a8f12f5 100644 +--- a/include/linux/ide.h ++++ b/include/linux/ide.h +@@ -1045,6 +1045,7 @@ extern void ide_init_drive_cmd (struct request *rq); + */ + typedef enum { + ide_wait, /* insert rq at end of list, and wait for it */ ++ ide_next, /* insert rq immediately after current request */ + ide_preempt, /* insert rq in front of current request */ + ide_head_wait, /* insert rq in front of current request and wait for it */ + ide_end /* insert rq at end of list, but don't wait for it */ |