diff options
author | Dave Airlie <airlied@redhat.com> | 2010-12-22 00:48:54 +0100 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-12-22 00:48:54 +0100 |
commit | ae09f09e94d755ed45c58b695675636c0ec53f9e (patch) | |
tree | 77cb9bac7d81f5b1250b8638a007e10c17b600af /drivers | |
parent | Merge remote branch 'nouveau/drm-nouveau-next' of /ssd/git/drm-nouveau-next i... (diff) | |
parent | drm/i915: Undo "Uncouple render/power ctx before suspending" (diff) | |
download | linux-ae09f09e94d755ed45c58b695675636c0ec53f9e.tar.xz linux-ae09f09e94d755ed45c58b695675636c0ec53f9e.zip |
Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'intel/drm-intel-next' of /ssd/git/drm-next: (771 commits)
drm/i915: Undo "Uncouple render/power ctx before suspending"
drm/i915: Allow the application to choose the constant addressing mode
drm/i915: dynamic render p-state support for Sandy Bridge
drm/i915: Enable EI mode for RCx decision making on Sandybridge
drm/i915/sdvo: Border and stall select became test bits in gen5
drm/i915: Add Guess-o-matic for pageflip timestamping.
drm/i915: Add support for precise vblank timestamping (v2)
drm/i915: Add frame buffer compression on Sandybridge
drm/i915: Add self-refresh support on Sandybridge
drm/i915: Wait for vblank before unpinning old fb
Revert "drm/i915: Avoid using PIPE_CONTROL on Ironlake"
drm/i915: Pass clock limits down to PLL matcher
drm/i915: Poll for seqno completion if IRQ is disabled
drm/i915/ringbuffer: Make IRQ refcnting atomic
agp/intel: Fix missed cached memory flags setting in i965_write_entry()
drm/i915/sdvo: Only use the SDVO pin if it is in the valid range
drm/i915: Enable RC6 autodownclocking on Sandybridge
drm/i915: Terminate the FORCE WAKE after we have finished reading
drm/i915/gtt: Clear the cachelines upon resume
drm/i915: Restore GTT mapping first upon resume
...
Diffstat (limited to 'drivers')
280 files changed, 12861 insertions, 7973 deletions
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index a1725e6488d3..7888501ad9ee 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1341,7 +1341,7 @@ static struct request *set_next_request(void) { struct request_queue *q; int cnt = FD_MAX_UNITS; - struct request *rq; + struct request *rq = NULL; /* Find next queue we can dispatch from */ fdc_queue = fdc_queue + 1; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 4e4cc6c828cb..605a67e40bbf 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1399,7 +1399,7 @@ static struct request *set_next_request(void) { struct request_queue *q; int old_pos = fdc_queue; - struct request *rq; + struct request *rq = NULL; do { q = unit[fdc_queue].disk->queue; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index a67d0a611a8a..f291587d753e 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -66,6 +66,7 @@ MODULE_VERSION("3.6.26"); MODULE_LICENSE("GPL"); static DEFINE_MUTEX(cciss_mutex); +static struct proc_dir_entry *proc_cciss; #include "cciss_cmd.h" #include "cciss.h" @@ -363,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", #define ENG_GIG_FACTOR (ENG_GIG/512) #define ENGAGE_SCSI "engage scsi" -static struct proc_dir_entry *proc_cciss; - static void cciss_seq_show_header(struct seq_file *seq) { ctlr_info_t *h = seq->private; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6ec9d53806c5..008d4a00b50d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -21,80 +21,9 @@ - Instructions for use - -------------------- + For usage instructions, please refer to: - 1) Map a Linux block device to an existing rbd image. - - Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name] - - $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add - - The snapshot name can be "-" or omitted to map the image read/write. - - 2) List all active blkdev<->object mappings. - - In this example, we have performed step #1 twice, creating two blkdevs, - mapped to two separate rados objects in the rados rbd pool - - $ cat /sys/class/rbd/list - #id major client_name pool name snap KB - 0 254 client4143 rbd foo - 1024000 - - The columns, in order, are: - - blkdev unique id - - blkdev assigned major - - rados client id - - rados pool name - - rados block device name - - mapped snapshot ("-" if none) - - device size in KB - - - 3) Create a snapshot. - - Usage: <blkdev id> <snapname> - - $ echo "0 mysnap" > /sys/class/rbd/snap_create - - - 4) Listing a snapshot. - - $ cat /sys/class/rbd/snaps_list - #id snap KB - 0 - 1024000 (*) - 0 foo 1024000 - - The columns, in order, are: - - blkdev unique id - - snapshot name, '-' means none (active read/write version) - - size of device at time of snapshot - - the (*) indicates this is the active version - - 5) Rollback to snapshot. - - Usage: <blkdev id> <snapname> - - $ echo "0 mysnap" > /sys/class/rbd/snap_rollback - - - 6) Mapping an image using snapshot. - - A snapshot mapping is read-only. This is being done by passing - snap=<snapname> to the options when adding a device. - - $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add - - - 7) Remove an active blkdev<->rbd image mapping. - - In this example, we remove the mapping with blkdev unique id 1. - - $ echo 1 > /sys/class/rbd/remove - - - NOTE: The actual creation and deletion of rados objects is outside the scope - of this driver. + Documentation/ABI/testing/sysfs-bus-rbd */ @@ -163,6 +92,14 @@ struct rbd_request { u64 len; }; +struct rbd_snap { + struct device dev; + const char *name; + size_t size; + struct list_head node; + u64 id; +}; + /* * a single device */ @@ -193,21 +130,60 @@ struct rbd_device { int read_only; struct list_head node; + + /* list of snapshots */ + struct list_head snaps; + + /* sysfs related */ + struct device dev; +}; + +static struct bus_type rbd_bus_type = { + .name = "rbd", }; static spinlock_t node_lock; /* protects client get/put */ -static struct class *class_rbd; /* /sys/class/rbd */ static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ static LIST_HEAD(rbd_dev_list); /* devices */ static LIST_HEAD(rbd_client_list); /* clients */ +static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); +static void rbd_dev_release(struct device *dev); +static ssize_t rbd_snap_rollback(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size); +static ssize_t rbd_snap_add(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count); +static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, + struct rbd_snap *snap);; + + +static struct rbd_device *dev_to_rbd(struct device *dev) +{ + return container_of(dev, struct rbd_device, dev); +} + +static struct device *rbd_get_dev(struct rbd_device *rbd_dev) +{ + return get_device(&rbd_dev->dev); +} + +static void rbd_put_dev(struct rbd_device *rbd_dev) +{ + put_device(&rbd_dev->dev); +} static int rbd_open(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; struct rbd_device *rbd_dev = disk->private_data; + rbd_get_dev(rbd_dev); + set_device_ro(bdev, rbd_dev->read_only); if ((mode & FMODE_WRITE) && rbd_dev->read_only) @@ -216,9 +192,19 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) return 0; } +static int rbd_release(struct gendisk *disk, fmode_t mode) +{ + struct rbd_device *rbd_dev = disk->private_data; + + rbd_put_dev(rbd_dev); + + return 0; +} + static const struct block_device_operations rbd_bd_ops = { .owner = THIS_MODULE, .open = rbd_open, + .release = rbd_release, }; /* @@ -361,7 +347,6 @@ static int rbd_header_from_disk(struct rbd_image_header *header, int ret = -ENOMEM; init_rwsem(&header->snap_rwsem); - header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); header->snapc = kmalloc(sizeof(struct ceph_snap_context) + snap_count * @@ -1256,10 +1241,20 @@ bad: return -ERANGE; } +static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev) +{ + struct rbd_snap *snap; + + while (!list_empty(&rbd_dev->snaps)) { + snap = list_first_entry(&rbd_dev->snaps, struct rbd_snap, node); + __rbd_remove_snap_dev(rbd_dev, snap); + } +} + /* * only read the first part of the ondisk header, without the snaps info */ -static int rbd_update_snaps(struct rbd_device *rbd_dev) +static int __rbd_update_snaps(struct rbd_device *rbd_dev) { int ret; struct rbd_image_header h; @@ -1280,12 +1275,15 @@ static int rbd_update_snaps(struct rbd_device *rbd_dev) rbd_dev->header.total_snaps = h.total_snaps; rbd_dev->header.snapc = h.snapc; rbd_dev->header.snap_names = h.snap_names; + rbd_dev->header.snap_names_len = h.snap_names_len; rbd_dev->header.snap_sizes = h.snap_sizes; rbd_dev->header.snapc->seq = snap_seq; + ret = __rbd_init_snaps_header(rbd_dev); + up_write(&rbd_dev->header.snap_rwsem); - return 0; + return ret; } static int rbd_init_disk(struct rbd_device *rbd_dev) @@ -1300,6 +1298,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (rc) return rc; + /* no need to lock here, as rbd_dev is not registered yet */ + rc = __rbd_init_snaps_header(rbd_dev); + if (rc) + return rc; + rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size); if (rc) return rc; @@ -1343,54 +1346,360 @@ out: return rc; } -/******************************************************************** - * /sys/class/rbd/ - * add map rados objects to blkdev - * remove unmap rados objects - * list show mappings - *******************************************************************/ +/* + sysfs +*/ + +static ssize_t rbd_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size); +} + +static ssize_t rbd_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); -static void class_rbd_release(struct class *cls) + return sprintf(buf, "%d\n", rbd_dev->major); +} + +static ssize_t rbd_client_id_show(struct device *dev, + struct device_attribute *attr, char *buf) { - kfree(cls); + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "client%lld\n", ceph_client_id(rbd_dev->client)); } -static ssize_t class_rbd_list(struct class *c, - struct class_attribute *attr, - char *data) +static ssize_t rbd_pool_show(struct device *dev, + struct device_attribute *attr, char *buf) { - int n = 0; - struct list_head *tmp; - int max = PAGE_SIZE; + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "%s\n", rbd_dev->pool_name); +} + +static ssize_t rbd_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "%s\n", rbd_dev->obj); +} + +static ssize_t rbd_snap_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "%s\n", rbd_dev->snap_name); +} + +static ssize_t rbd_image_refresh(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); + int rc; + int ret = size; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - n += snprintf(data, max, - "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n"); + rc = __rbd_update_snaps(rbd_dev); + if (rc < 0) + ret = rc; - list_for_each(tmp, &rbd_dev_list) { - struct rbd_device *rbd_dev; + mutex_unlock(&ctl_mutex); + return ret; +} - rbd_dev = list_entry(tmp, struct rbd_device, node); - n += snprintf(data+n, max-n, - "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n", - rbd_dev->id, - rbd_dev->major, - ceph_client_id(rbd_dev->client), - rbd_dev->pool_name, - rbd_dev->obj, rbd_dev->snap_name, - rbd_dev->header.image_size >> 10); - if (n == max) +static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); +static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); +static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); +static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); +static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); +static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); +static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); +static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); +static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback); + +static struct attribute *rbd_attrs[] = { + &dev_attr_size.attr, + &dev_attr_major.attr, + &dev_attr_client_id.attr, + &dev_attr_pool.attr, + &dev_attr_name.attr, + &dev_attr_current_snap.attr, + &dev_attr_refresh.attr, + &dev_attr_create_snap.attr, + &dev_attr_rollback_snap.attr, + NULL +}; + +static struct attribute_group rbd_attr_group = { + .attrs = rbd_attrs, +}; + +static const struct attribute_group *rbd_attr_groups[] = { + &rbd_attr_group, + NULL +}; + +static void rbd_sysfs_dev_release(struct device *dev) +{ +} + +static struct device_type rbd_device_type = { + .name = "rbd", + .groups = rbd_attr_groups, + .release = rbd_sysfs_dev_release, +}; + + +/* + sysfs - snapshots +*/ + +static ssize_t rbd_snap_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); + + return sprintf(buf, "%lld\n", (long long)snap->size); +} + +static ssize_t rbd_snap_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); + + return sprintf(buf, "%lld\n", (long long)snap->id); +} + +static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL); +static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); + +static struct attribute *rbd_snap_attrs[] = { + &dev_attr_snap_size.attr, + &dev_attr_snap_id.attr, + NULL, +}; + +static struct attribute_group rbd_snap_attr_group = { + .attrs = rbd_snap_attrs, +}; + +static void rbd_snap_dev_release(struct device *dev) +{ + struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); + kfree(snap->name); + kfree(snap); +} + +static const struct attribute_group *rbd_snap_attr_groups[] = { + &rbd_snap_attr_group, + NULL +}; + +static struct device_type rbd_snap_device_type = { + .groups = rbd_snap_attr_groups, + .release = rbd_snap_dev_release, +}; + +static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, + struct rbd_snap *snap) +{ + list_del(&snap->node); + device_unregister(&snap->dev); +} + +static int rbd_register_snap_dev(struct rbd_device *rbd_dev, + struct rbd_snap *snap, + struct device *parent) +{ + struct device *dev = &snap->dev; + int ret; + + dev->type = &rbd_snap_device_type; + dev->parent = parent; + dev->release = rbd_snap_dev_release; + dev_set_name(dev, "snap_%s", snap->name); + ret = device_register(dev); + + return ret; +} + +static int __rbd_add_snap_dev(struct rbd_device *rbd_dev, + int i, const char *name, + struct rbd_snap **snapp) +{ + int ret; + struct rbd_snap *snap = kzalloc(sizeof(*snap), GFP_KERNEL); + if (!snap) + return -ENOMEM; + snap->name = kstrdup(name, GFP_KERNEL); + snap->size = rbd_dev->header.snap_sizes[i]; + snap->id = rbd_dev->header.snapc->snaps[i]; + if (device_is_registered(&rbd_dev->dev)) { + ret = rbd_register_snap_dev(rbd_dev, snap, + &rbd_dev->dev); + if (ret < 0) + goto err; + } + *snapp = snap; + return 0; +err: + kfree(snap->name); + kfree(snap); + return ret; +} + +/* + * search for the previous snap in a null delimited string list + */ +const char *rbd_prev_snap_name(const char *name, const char *start) +{ + if (name < start + 2) + return NULL; + + name -= 2; + while (*name) { + if (name == start) + return start; + name--; + } + return name + 1; +} + +/* + * compare the old list of snapshots that we have to what's in the header + * and update it accordingly. Note that the header holds the snapshots + * in a reverse order (from newest to oldest) and we need to go from + * older to new so that we don't get a duplicate snap name when + * doing the process (e.g., removed snapshot and recreated a new + * one with the same name. + */ +static int __rbd_init_snaps_header(struct rbd_device *rbd_dev) +{ + const char *name, *first_name; + int i = rbd_dev->header.total_snaps; + struct rbd_snap *snap, *old_snap = NULL; + int ret; + struct list_head *p, *n; + + first_name = rbd_dev->header.snap_names; + name = first_name + rbd_dev->header.snap_names_len; + + list_for_each_prev_safe(p, n, &rbd_dev->snaps) { + u64 cur_id; + + old_snap = list_entry(p, struct rbd_snap, node); + + if (i) + cur_id = rbd_dev->header.snapc->snaps[i - 1]; + + if (!i || old_snap->id < cur_id) { + /* old_snap->id was skipped, thus was removed */ + __rbd_remove_snap_dev(rbd_dev, old_snap); + continue; + } + if (old_snap->id == cur_id) { + /* we have this snapshot already */ + i--; + name = rbd_prev_snap_name(name, first_name); + continue; + } + for (; i > 0; + i--, name = rbd_prev_snap_name(name, first_name)) { + if (!name) { + WARN_ON(1); + return -EINVAL; + } + cur_id = rbd_dev->header.snapc->snaps[i]; + /* snapshot removal? handle it above */ + if (cur_id >= old_snap->id) + break; + /* a new snapshot */ + ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap); + if (ret < 0) + return ret; + + /* note that we add it backward so using n and not p */ + list_add(&snap->node, n); + p = &snap->node; + } + } + /* we're done going over the old snap list, just add what's left */ + for (; i > 0; i--) { + name = rbd_prev_snap_name(name, first_name); + if (!name) { + WARN_ON(1); + return -EINVAL; + } + ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap); + if (ret < 0) + return ret; + list_add(&snap->node, &rbd_dev->snaps); + } + + return 0; +} + + +static void rbd_root_dev_release(struct device *dev) +{ +} + +static struct device rbd_root_dev = { + .init_name = "rbd", + .release = rbd_root_dev_release, +}; + +static int rbd_bus_add_dev(struct rbd_device *rbd_dev) +{ + int ret = -ENOMEM; + struct device *dev; + struct rbd_snap *snap; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + dev = &rbd_dev->dev; + + dev->bus = &rbd_bus_type; + dev->type = &rbd_device_type; + dev->parent = &rbd_root_dev; + dev->release = rbd_dev_release; + dev_set_name(dev, "%d", rbd_dev->id); + ret = device_register(dev); + if (ret < 0) + goto done_free; + + list_for_each_entry(snap, &rbd_dev->snaps, node) { + ret = rbd_register_snap_dev(rbd_dev, snap, + &rbd_dev->dev); + if (ret < 0) break; } mutex_unlock(&ctl_mutex); - return n; + return 0; +done_free: + mutex_unlock(&ctl_mutex); + return ret; } -static ssize_t class_rbd_add(struct class *c, - struct class_attribute *attr, - const char *buf, size_t count) +static void rbd_bus_del_dev(struct rbd_device *rbd_dev) +{ + device_unregister(&rbd_dev->dev); +} + +static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count) { struct ceph_osd_client *osdc; struct rbd_device *rbd_dev; @@ -1419,6 +1728,7 @@ static ssize_t class_rbd_add(struct class *c, /* static rbd_device initialization */ spin_lock_init(&rbd_dev->lock); INIT_LIST_HEAD(&rbd_dev->node); + INIT_LIST_HEAD(&rbd_dev->snaps); /* generate unique id: find highest unique id, add one */ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); @@ -1478,6 +1788,9 @@ static ssize_t class_rbd_add(struct class *c, } rbd_dev->major = irc; + rc = rbd_bus_add_dev(rbd_dev); + if (rc) + goto err_out_disk; /* set up and announce blkdev mapping */ rc = rbd_init_disk(rbd_dev); if (rc) @@ -1487,6 +1800,8 @@ static ssize_t class_rbd_add(struct class *c, err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); +err_out_disk: + rbd_free_disk(rbd_dev); err_out_client: rbd_put_client(rbd_dev); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); @@ -1518,35 +1833,10 @@ static struct rbd_device *__rbd_get_dev(unsigned long id) return NULL; } -static ssize_t class_rbd_remove(struct class *c, - struct class_attribute *attr, - const char *buf, - size_t count) +static void rbd_dev_release(struct device *dev) { - struct rbd_device *rbd_dev = NULL; - int target_id, rc; - unsigned long ul; - - rc = strict_strtoul(buf, 10, &ul); - if (rc) - return rc; - - /* convert to int; abort if we lost anything in the conversion */ - target_id = (int) ul; - if (target_id != ul) - return -EINVAL; - - /* remove object from list immediately */ - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - rbd_dev = __rbd_get_dev(target_id); - if (rbd_dev) - list_del_init(&rbd_dev->node); - - mutex_unlock(&ctl_mutex); - - if (!rbd_dev) - return -ENOENT; + struct rbd_device *rbd_dev = + container_of(dev, struct rbd_device, dev); rbd_put_client(rbd_dev); @@ -1557,67 +1847,11 @@ static ssize_t class_rbd_remove(struct class *c, /* release module ref */ module_put(THIS_MODULE); - - return count; } -static ssize_t class_rbd_snaps_list(struct class *c, - struct class_attribute *attr, - char *data) -{ - struct rbd_device *rbd_dev = NULL; - struct list_head *tmp; - struct rbd_image_header *header; - int i, n = 0, max = PAGE_SIZE; - int ret; - - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - n += snprintf(data, max, "#id\tsnap\tKB\n"); - - list_for_each(tmp, &rbd_dev_list) { - char *names, *p; - struct ceph_snap_context *snapc; - - rbd_dev = list_entry(tmp, struct rbd_device, node); - header = &rbd_dev->header; - - down_read(&header->snap_rwsem); - - names = header->snap_names; - snapc = header->snapc; - - n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n", - rbd_dev->id, RBD_SNAP_HEAD_NAME, - header->image_size >> 10, - (!rbd_dev->cur_snap ? " (*)" : "")); - if (n == max) - break; - - p = names; - for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) { - n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n", - rbd_dev->id, p, header->snap_sizes[i] >> 10, - (rbd_dev->cur_snap && - (snap_index(header, i) == rbd_dev->cur_snap) ? - " (*)" : "")); - if (n == max) - break; - } - - up_read(&header->snap_rwsem); - } - - - ret = n; - mutex_unlock(&ctl_mutex); - return ret; -} - -static ssize_t class_rbd_snaps_refresh(struct class *c, - struct class_attribute *attr, - const char *buf, - size_t count) +static ssize_t rbd_remove(struct bus_type *bus, + const char *buf, + size_t count) { struct rbd_device *rbd_dev = NULL; int target_id, rc; @@ -1641,95 +1875,70 @@ static ssize_t class_rbd_snaps_refresh(struct class *c, goto done; } - rc = rbd_update_snaps(rbd_dev); - if (rc < 0) - ret = rc; + list_del_init(&rbd_dev->node); + + __rbd_remove_all_snaps(rbd_dev); + rbd_bus_del_dev(rbd_dev); done: mutex_unlock(&ctl_mutex); return ret; } -static ssize_t class_rbd_snap_create(struct class *c, - struct class_attribute *attr, - const char *buf, - size_t count) +static ssize_t rbd_snap_add(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { - struct rbd_device *rbd_dev = NULL; - int target_id, ret; - char *name; - - name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL); + struct rbd_device *rbd_dev = dev_to_rbd(dev); + int ret; + char *name = kmalloc(count + 1, GFP_KERNEL); if (!name) return -ENOMEM; - /* parse snaps add command */ - if (sscanf(buf, "%d " - "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", - &target_id, - name) != 2) { - ret = -EINVAL; - goto done; - } + snprintf(name, count, "%s", buf); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - rbd_dev = __rbd_get_dev(target_id); - if (!rbd_dev) { - ret = -ENOENT; - goto done_unlock; - } - ret = rbd_header_add_snap(rbd_dev, name, GFP_KERNEL); if (ret < 0) goto done_unlock; - ret = rbd_update_snaps(rbd_dev); + ret = __rbd_update_snaps(rbd_dev); if (ret < 0) goto done_unlock; ret = count; done_unlock: mutex_unlock(&ctl_mutex); -done: kfree(name); return ret; } -static ssize_t class_rbd_rollback(struct class *c, - struct class_attribute *attr, - const char *buf, - size_t count) +static ssize_t rbd_snap_rollback(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { - struct rbd_device *rbd_dev = NULL; - int target_id, ret; + struct rbd_device *rbd_dev = dev_to_rbd(dev); + int ret; u64 snapid; - char snap_name[RBD_MAX_SNAP_NAME_LEN]; u64 cur_ofs; - char *seg_name; + char *seg_name = NULL; + char *snap_name = kmalloc(count + 1, GFP_KERNEL); + ret = -ENOMEM; + if (!snap_name) + return ret; /* parse snaps add command */ - if (sscanf(buf, "%d " - "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", - &target_id, - snap_name) != 2) { - return -EINVAL; - } - - ret = -ENOMEM; + snprintf(snap_name, count, "%s", buf); seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); if (!seg_name) - return ret; + goto done; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - rbd_dev = __rbd_get_dev(target_id); - if (!rbd_dev) { - ret = -ENOENT; - goto done_unlock; - } - ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL); if (ret < 0) goto done_unlock; @@ -1750,7 +1959,7 @@ static ssize_t class_rbd_rollback(struct class *c, seg_name, ret); } - ret = rbd_update_snaps(rbd_dev); + ret = __rbd_update_snaps(rbd_dev); if (ret < 0) goto done_unlock; @@ -1758,57 +1967,42 @@ static ssize_t class_rbd_rollback(struct class *c, done_unlock: mutex_unlock(&ctl_mutex); +done: kfree(seg_name); + kfree(snap_name); return ret; } -static struct class_attribute class_rbd_attrs[] = { - __ATTR(add, 0200, NULL, class_rbd_add), - __ATTR(remove, 0200, NULL, class_rbd_remove), - __ATTR(list, 0444, class_rbd_list, NULL), - __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh), - __ATTR(snap_create, 0200, NULL, class_rbd_snap_create), - __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL), - __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback), +static struct bus_attribute rbd_bus_attrs[] = { + __ATTR(add, S_IWUSR, NULL, rbd_add), + __ATTR(remove, S_IWUSR, NULL, rbd_remove), __ATTR_NULL }; /* * create control files in sysfs - * /sys/class/rbd/... + * /sys/bus/rbd/... */ static int rbd_sysfs_init(void) { - int ret = -ENOMEM; + int ret; - class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL); - if (!class_rbd) - goto out; + rbd_bus_type.bus_attrs = rbd_bus_attrs; - class_rbd->name = DRV_NAME; - class_rbd->owner = THIS_MODULE; - class_rbd->class_release = class_rbd_release; - class_rbd->class_attrs = class_rbd_attrs; + ret = bus_register(&rbd_bus_type); + if (ret < 0) + return ret; - ret = class_register(class_rbd); - if (ret) - goto out_class; - return 0; + ret = device_register(&rbd_root_dev); -out_class: - kfree(class_rbd); - class_rbd = NULL; - pr_err(DRV_NAME ": failed to create class rbd\n"); -out: return ret; } static void rbd_sysfs_cleanup(void) { - if (class_rbd) - class_destroy(class_rbd); - class_rbd = NULL; + device_unregister(&rbd_root_dev); + bus_unregister(&rbd_bus_type); } int __init rbd_init(void) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 255035cfc88a..4f9e22f29138 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -65,7 +65,7 @@ enum blkif_state { struct blk_shadow { struct blkif_request req; - unsigned long request; + struct request *request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; @@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; - info->shadow[id].request = 0; + info->shadow[id].request = NULL; info->shadow_free = id; } @@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, } /* - * blkif_queue_request + * Generate a Xen blkfront IO request from a blk layer request. Reads + * and writes are handled as expected. Since we lack a loose flush + * request, we map flushes into a full ordered barrier. * - * request block io - * - * id: for guest use only. - * operation: BLKIF_OP_{READ,WRITE,PROBE} - * buffer: buffer to read/write into. this should be a - * virtual address in the guest os. + * @req: a request struct */ static int blkif_queue_request(struct request *req) { @@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = get_id_from_freelist(info); - info->shadow[id].request = (unsigned long)req; + info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); @@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; + if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + /* + * Ideally we could just do an unordered + * flush-to-disk, but all we have is a full write + * barrier at the moment. However, a barrier write is + * a superset of FUA, so we can implement it the same + * way. (It's also a FLUSH+FUA, since it is + * guaranteed ordered WRT previous writes.) + */ + ring_req->operation = BLKIF_OP_WRITE_BARRIER; + } + ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); @@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; - req = (struct request *)info->shadow[id].request; + req = info->shadow[id].request; blkif_completion(&info->shadow[id]); @@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; + } + if (unlikely(bret->status == BLKIF_RSP_ERROR && + info->shadow[id].req.nr_segments == 0)) { + printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", + info->gd->disk_name); + error = -EOPNOTSUPP; + } + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ - if (copy[i].request == 0) + if (!copy[i].request) continue; /* Grab a request slot and copy shadow state into it. */ @@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), - rq_data_dir( - (struct request *) - info->shadow[req->id].request)); + rq_data_dir(info->shadow[req->id].request)); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; @@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) */ info->feature_flush = 0; - /* - * The driver doesn't properly handled empty flushes, so - * lets disable barrier support for now. - */ -#if 0 if (!err && barrier) - info->feature_flush = REQ_FLUSH; -#endif + info->feature_flush = REQ_FLUSH | REQ_FUA; err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 5259065f3c79..3e67ddde9e16 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -120,7 +120,6 @@ struct agp_bridge_driver { void (*agp_destroy_page)(struct page *, int flags); void (*agp_destroy_pages)(struct agp_memory *); int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); - void (*chipset_flush)(struct agp_bridge_data *); }; struct agp_bridge_data { diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c index 9d2c97a69cdd..a48e05b31593 100644 --- a/drivers/char/agp/compat_ioctl.c +++ b/drivers/char/agp/compat_ioctl.c @@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case AGPIOC_CHIPSET_FLUSH32: - ret_val = agpioc_chipset_flush_wrap(curr_priv); break; } diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h index 0c9678ac0371..f30e0fd97963 100644 --- a/drivers/char/agp/compat_ioctl.h +++ b/drivers/char/agp/compat_ioctl.h @@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory); struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); struct agp_memory *agp_find_mem_by_key(int key); struct agp_client *agp_find_client_by_pid(pid_t id); -int agpioc_chipset_flush_wrap(struct agp_file_private *priv); #endif /* _AGP_COMPAT_H */ diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 3cb4539a98b2..2e044338753c 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c @@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) return agp_unbind_memory(memory); } -int agpioc_chipset_flush_wrap(struct agp_file_private *priv) -{ - DBG(""); - agp_flush_chipset(agp_bridge); - return 0; -} - static long agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file, break; case AGPIOC_CHIPSET_FLUSH: - ret_val = agpioc_chipset_flush_wrap(curr_priv); break; } diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 4956f1c8f9d5..012cba0d6d96 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -81,13 +81,6 @@ static int agp_get_key(void) return -1; } -void agp_flush_chipset(struct agp_bridge_data *bridge) -{ - if (bridge->driver->chipset_flush) - bridge->driver->chipset_flush(bridge); -} -EXPORT_SYMBOL(agp_flush_chipset); - /* * Use kmalloc if possible for the page list. Otherwise fall back to * vmalloc. This speeds things up and also saves memory for small AGP @@ -487,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr) } EXPORT_SYMBOL(agp_unbind_memory); -/** - * agp_rebind_emmory - Rewrite the entire GATT, useful on resume - */ -int agp_rebind_memory(void) -{ - struct agp_memory *curr; - int ret_val = 0; - - spin_lock(&agp_bridge->mapped_lock); - list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) { - ret_val = curr->bridge->driver->insert_memory(curr, - curr->pg_start, - curr->type); - if (ret_val != 0) - break; - } - spin_unlock(&agp_bridge->mapped_lock); - return ret_val; -} -EXPORT_SYMBOL(agp_rebind_memory); /* End - Routines for handling swapping of agp_memory into the GATT */ diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index e72f49d52202..07e9796fead7 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -828,14 +828,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) static int agp_intel_resume(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); - int ret_val; bridge->driver->configure(); - ret_val = agp_rebind_memory(); - if (ret_val != 0) - return ret_val; - return 0; } #endif diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 90539df02504..010e3defd6c3 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -75,6 +75,8 @@ #define I810_GMS_DISABLE 0x00000000 #define I810_PGETBL_CTL 0x2020 #define I810_PGETBL_ENABLED 0x00000001 +/* Note: PGETBL_CTL2 has a different offset on G33. */ +#define I965_PGETBL_CTL2 0x20c4 #define I965_PGETBL_SIZE_MASK 0x0000000e #define I965_PGETBL_SIZE_512KB (0 << 1) #define I965_PGETBL_SIZE_256KB (1 << 1) @@ -82,9 +84,15 @@ #define I965_PGETBL_SIZE_1MB (3 << 1) #define I965_PGETBL_SIZE_2MB (4 << 1) #define I965_PGETBL_SIZE_1_5MB (5 << 1) -#define G33_PGETBL_SIZE_MASK (3 << 8) -#define G33_PGETBL_SIZE_1M (1 << 8) -#define G33_PGETBL_SIZE_2M (2 << 8) +#define G33_GMCH_SIZE_MASK (3 << 8) +#define G33_GMCH_SIZE_1M (1 << 8) +#define G33_GMCH_SIZE_2M (2 << 8) +#define G4x_GMCH_SIZE_MASK (0xf << 8) +#define G4x_GMCH_SIZE_1M (0x1 << 8) +#define G4x_GMCH_SIZE_2M (0x3 << 8) +#define G4x_GMCH_SIZE_VT_1M (0x9 << 8) +#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) +#define G4x_GMCH_SIZE_VT_2M (0xc << 8) #define I810_DRAM_CTL 0x3000 #define I810_DRAM_ROW_0 0x00000001 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9272c38dd3c6..356f73e0d17e 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -24,7 +24,6 @@ #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" -#include <linux/intel-gtt.h> #include <drm/intel-gtt.h> /* @@ -39,40 +38,12 @@ #define USE_PCI_DMA_API 0 #endif -/* Max amount of stolen space, anything above will be returned to Linux */ -int intel_max_stolen = 32 * 1024 * 1024; - -static const struct aper_size_info_fixed intel_i810_sizes[] = -{ - {64, 16384, 4}, - /* The 32M mode still requires a 64k gatt */ - {32, 8192, 4} -}; - -#define AGP_DCACHE_MEMORY 1 -#define AGP_PHYS_MEMORY 2 -#define INTEL_AGP_CACHED_MEMORY 3 - -static struct gatt_mask intel_i810_masks[] = -{ - {.mask = I810_PTE_VALID, .type = 0}, - {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, - {.mask = I810_PTE_VALID, .type = 0}, - {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, - .type = INTEL_AGP_CACHED_MEMORY} -}; - -#define INTEL_AGP_UNCACHED_MEMORY 0 -#define INTEL_AGP_CACHED_MEMORY_LLC 1 -#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2 -#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 -#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 - struct intel_gtt_driver { unsigned int gen : 8; unsigned int is_g33 : 1; unsigned int is_pineview : 1; unsigned int is_ironlake : 1; + unsigned int has_pgtbl_enable : 1; unsigned int dma_mask_size : 8; /* Chipset specific GTT setup */ int (*setup)(void); @@ -95,13 +66,14 @@ static struct _intel_private { u8 __iomem *registers; phys_addr_t gtt_bus_addr; phys_addr_t gma_bus_addr; - phys_addr_t pte_bus_addr; + u32 PGETBL_save; u32 __iomem *gtt; /* I915G */ int num_dcache_entries; union { void __iomem *i9xx_flush_page; void *i8xx_flush_page; }; + char *i81x_gtt_table; struct page *i8xx_page; struct resource ifp_resource; int resource_valid; @@ -113,42 +85,31 @@ static struct _intel_private { #define IS_G33 intel_private.driver->is_g33 #define IS_PINEVIEW intel_private.driver->is_pineview #define IS_IRONLAKE intel_private.driver->is_ironlake +#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable -static void intel_agp_free_sglist(struct agp_memory *mem) -{ - struct sg_table st; - - st.sgl = mem->sg_list; - st.orig_nents = st.nents = mem->page_count; - - sg_free_table(&st); - - mem->sg_list = NULL; - mem->num_sg = 0; -} - -static int intel_agp_map_memory(struct agp_memory *mem) +int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, + struct scatterlist **sg_list, int *num_sg) { struct sg_table st; struct scatterlist *sg; int i; - if (mem->sg_list) + if (*sg_list) return 0; /* already mapped (for e.g. resume */ - DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); + DBG("try mapping %lu pages\n", (unsigned long)num_entries); - if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) + if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) goto err; - mem->sg_list = sg = st.sgl; + *sg_list = sg = st.sgl; - for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) - sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); + for (i = 0 ; i < num_entries; i++, sg = sg_next(sg)) + sg_set_page(sg, pages[i], PAGE_SIZE, 0); - mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, - mem->page_count, PCI_DMA_BIDIRECTIONAL); - if (unlikely(!mem->num_sg)) + *num_sg = pci_map_sg(intel_private.pcidev, *sg_list, + num_entries, PCI_DMA_BIDIRECTIONAL); + if (unlikely(!*num_sg)) goto err; return 0; @@ -157,90 +118,22 @@ err: sg_free_table(&st); return -ENOMEM; } +EXPORT_SYMBOL(intel_gtt_map_memory); -static void intel_agp_unmap_memory(struct agp_memory *mem) +void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) { + struct sg_table st; DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); - pci_unmap_sg(intel_private.pcidev, mem->sg_list, - mem->page_count, PCI_DMA_BIDIRECTIONAL); - intel_agp_free_sglist(mem); -} - -static int intel_i810_fetch_size(void) -{ - u32 smram_miscc; - struct aper_size_info_fixed *values; - - pci_read_config_dword(intel_private.bridge_dev, - I810_SMRAM_MISCC, &smram_miscc); - values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); - - if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { - dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n"); - return 0; - } - if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { - agp_bridge->current_size = (void *) (values + 1); - agp_bridge->aperture_size_idx = 1; - return values[1].size; - } else { - agp_bridge->current_size = (void *) (values); - agp_bridge->aperture_size_idx = 0; - return values[0].size; - } - - return 0; -} - -static int intel_i810_configure(void) -{ - struct aper_size_info_fixed *current_size; - u32 temp; - int i; - - current_size = A_SIZE_FIX(agp_bridge->current_size); - - if (!intel_private.registers) { - pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); - temp &= 0xfff80000; + pci_unmap_sg(intel_private.pcidev, sg_list, + num_sg, PCI_DMA_BIDIRECTIONAL); - intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) { - dev_err(&intel_private.pcidev->dev, - "can't remap memory\n"); - return -ENOMEM; - } - } + st.sgl = sg_list; + st.orig_nents = st.nents = num_sg; - if ((readl(intel_private.registers+I810_DRAM_CTL) - & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { - /* This will need to be dynamically assigned */ - dev_info(&intel_private.pcidev->dev, - "detected 4MB dedicated video ram\n"); - intel_private.num_dcache_entries = 1024; - } - pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); - agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ - - if (agp_bridge->driver->needs_scratch_page) { - for (i = 0; i < current_size->num_entries; i++) { - writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); - } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ - } - global_cache_flush(); - return 0; -} - -static void intel_i810_cleanup(void) -{ - writel(0, intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers); /* PCI Posting. */ - iounmap(intel_private.registers); + sg_free_table(&st); } +EXPORT_SYMBOL(intel_gtt_unmap_memory); static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { @@ -277,80 +170,64 @@ static void i8xx_destroy_pages(struct page *page) atomic_dec(&agp_bridge->current_memory_agp); } -static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, - int type) +#define I810_GTT_ORDER 4 +static int i810_setup(void) { - int i, j, num_entries; - void *temp; - int ret = -EINVAL; - int mask_type; - - if (mem->page_count == 0) - goto out; - - temp = agp_bridge->current_size; - num_entries = A_SIZE_FIX(temp)->num_entries; + u32 reg_addr; + char *gtt_table; - if ((pg_start + mem->page_count) > num_entries) - goto out_err; + /* i81x does not preallocate the gtt. It's always 64kb in size. */ + gtt_table = alloc_gatt_pages(I810_GTT_ORDER); + if (gtt_table == NULL) + return -ENOMEM; + intel_private.i81x_gtt_table = gtt_table; + pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr); + reg_addr &= 0xfff80000; - for (j = pg_start; j < (pg_start + mem->page_count); j++) { - if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { - ret = -EBUSY; - goto out_err; - } - } + intel_private.registers = ioremap(reg_addr, KB(64)); + if (!intel_private.registers) + return -ENOMEM; - if (type != mem->type) - goto out_err; + writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED, + intel_private.registers+I810_PGETBL_CTL); - mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); + intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; - switch (mask_type) { - case AGP_DCACHE_MEMORY: - if (!mem->is_flushed) - global_cache_flush(); - for (i = pg_start; i < (pg_start + mem->page_count); i++) { - writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, - intel_private.registers+I810_PTE_BASE+(i*4)); - } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); - break; - case AGP_PHYS_MEMORY: - case AGP_NORMAL_MEMORY: - if (!mem->is_flushed) - global_cache_flush(); - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - page_to_phys(mem->pages[i]), mask_type), - intel_private.registers+I810_PTE_BASE+(j*4)); - } - readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); - break; - default: - goto out_err; + if ((readl(intel_private.registers+I810_DRAM_CTL) + & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { + dev_info(&intel_private.pcidev->dev, + "detected 4MB dedicated video ram\n"); + intel_private.num_dcache_entries = 1024; } -out: - ret = 0; -out_err: - mem->is_flushed = true; - return ret; + return 0; +} + +static void i810_cleanup(void) +{ + writel(0, intel_private.registers+I810_PGETBL_CTL); + free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); } -static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, - int type) +static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, + int type) { int i; - if (mem->page_count == 0) - return 0; + if ((pg_start + mem->page_count) + > intel_private.num_dcache_entries) + return -EINVAL; + + if (!mem->is_flushed) + global_cache_flush(); - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); + for (i = pg_start; i < (pg_start + mem->page_count); i++) { + dma_addr_t addr = i << PAGE_SHIFT; + intel_private.driver->write_entry(addr, + i, type); } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); + readl(intel_private.gtt+i-1); return 0; } @@ -397,29 +274,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) return new; } -static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) -{ - struct agp_memory *new; - - if (type == AGP_DCACHE_MEMORY) { - if (pg_count != intel_private.num_dcache_entries) - return NULL; - - new = agp_create_memory(1); - if (new == NULL) - return NULL; - - new->type = AGP_DCACHE_MEMORY; - new->page_count = pg_count; - new->num_scratch_pages = 0; - agp_free_page_array(new); - return new; - } - if (type == AGP_PHYS_MEMORY) - return alloc_agpphysmem_i8xx(pg_count, type); - return NULL; -} - static void intel_i810_free_by_type(struct agp_memory *curr) { agp_free_key(curr->key); @@ -437,13 +291,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr) kfree(curr); } -static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, - dma_addr_t addr, int type) -{ - /* Type checking must be done elsewhere */ - return addr | bridge->driver->masks[type].mask; -} - static int intel_gtt_setup_scratch_page(void) { struct page *page; @@ -455,7 +302,7 @@ static int intel_gtt_setup_scratch_page(void) get_page(page); set_pages_uc(page, 1); - if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { + if (intel_private.base.needs_dmar) { dma_addr = pci_map_page(intel_private.pcidev, page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) @@ -470,34 +317,45 @@ static int intel_gtt_setup_scratch_page(void) return 0; } -static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { +static void i810_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + u32 pte_flags = I810_PTE_VALID; + + switch (flags) { + case AGP_DCACHE_MEMORY: + pte_flags |= I810_PTE_LOCAL; + break; + case AGP_USER_CACHED_MEMORY: + pte_flags |= I830_PTE_SYSTEM_CACHED; + break; + } + + writel(addr | pte_flags, intel_private.gtt + entry); +} + +static const struct aper_size_info_fixed intel_fake_agp_sizes[] = { + {32, 8192, 3}, + {64, 16384, 4}, {128, 32768, 5}, - /* The 64M mode still requires a 128k gatt */ - {64, 16384, 5}, {256, 65536, 6}, {512, 131072, 7}, }; -static unsigned int intel_gtt_stolen_entries(void) +static unsigned int intel_gtt_stolen_size(void) { u16 gmch_ctrl; u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; - unsigned int overhead_entries, stolen_entries; unsigned int stolen_size = 0; + if (INTEL_GTT_GEN == 1) + return 0; /* no stolen mem on i81x */ + pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - if (INTEL_GTT_GEN > 4 || IS_PINEVIEW) - overhead_entries = 0; - else - overhead_entries = intel_private.base.gtt_mappable_entries - / 1024; - - overhead_entries += 1; /* BIOS popup */ - if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { switch (gmch_ctrl & I830_GMCH_GMS_MASK) { @@ -623,12 +481,7 @@ static unsigned int intel_gtt_stolen_entries(void) } } - if (!local && stolen_size > intel_max_stolen) { - dev_info(&intel_private.bridge_dev->dev, - "detected %dK stolen memory, trimming to %dK\n", - stolen_size / KB(1), intel_max_stolen / KB(1)); - stolen_size = intel_max_stolen; - } else if (stolen_size > 0) { + if (stolen_size > 0) { dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", stolen_size / KB(1), local ? "local" : "stolen"); } else { @@ -637,46 +490,88 @@ static unsigned int intel_gtt_stolen_entries(void) stolen_size = 0; } - stolen_entries = stolen_size/KB(4) - overhead_entries; + return stolen_size; +} - return stolen_entries; +static void i965_adjust_pgetbl_size(unsigned int size_flag) +{ + u32 pgetbl_ctl, pgetbl_ctl2; + + /* ensure that ppgtt is disabled */ + pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); + pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; + writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); + + /* write the new ggtt size */ + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; + pgetbl_ctl |= size_flag; + writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); } -static unsigned int intel_gtt_total_entries(void) +static unsigned int i965_gtt_total_entries(void) { int size; + u32 pgetbl_ctl; + u16 gmch_ctl; - if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) { - u32 pgetbl_ctl; - pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctl); - switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { - case I965_PGETBL_SIZE_128KB: - size = KB(128); - break; - case I965_PGETBL_SIZE_256KB: - size = KB(256); - break; - case I965_PGETBL_SIZE_512KB: - size = KB(512); + if (INTEL_GTT_GEN == 5) { + switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { + case G4x_GMCH_SIZE_1M: + case G4x_GMCH_SIZE_VT_1M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); break; - case I965_PGETBL_SIZE_1MB: - size = KB(1024); + case G4x_GMCH_SIZE_VT_1_5M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); break; - case I965_PGETBL_SIZE_2MB: - size = KB(2048); + case G4x_GMCH_SIZE_2M: + case G4x_GMCH_SIZE_VT_2M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); break; - case I965_PGETBL_SIZE_1_5MB: - size = KB(1024 + 512); - break; - default: - dev_info(&intel_private.pcidev->dev, - "unknown page table size, assuming 512KB\n"); - size = KB(512); } + } - return size/4; - } else if (INTEL_GTT_GEN == 6) { + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + + switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { + case I965_PGETBL_SIZE_128KB: + size = KB(128); + break; + case I965_PGETBL_SIZE_256KB: + size = KB(256); + break; + case I965_PGETBL_SIZE_512KB: + size = KB(512); + break; + /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ + case I965_PGETBL_SIZE_1MB: + size = KB(1024); + break; + case I965_PGETBL_SIZE_2MB: + size = KB(2048); + break; + case I965_PGETBL_SIZE_1_5MB: + size = KB(1024 + 512); + break; + default: + dev_info(&intel_private.pcidev->dev, + "unknown page table size, assuming 512KB\n"); + size = KB(512); + } + + return size/4; +} + +static unsigned int intel_gtt_total_entries(void) +{ + int size; + + if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) + return i965_gtt_total_entries(); + else if (INTEL_GTT_GEN == 6) { u16 snb_gmch_ctl; pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); @@ -706,7 +601,18 @@ static unsigned int intel_gtt_mappable_entries(void) { unsigned int aperture_size; - if (INTEL_GTT_GEN == 2) { + if (INTEL_GTT_GEN == 1) { + u32 smram_miscc; + + pci_read_config_dword(intel_private.bridge_dev, + I810_SMRAM_MISCC, &smram_miscc); + + if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) + == I810_GFX_MEM_WIN_32M) + aperture_size = MB(32); + else + aperture_size = MB(64); + } else if (INTEL_GTT_GEN == 2) { u16 gmch_ctrl; pci_read_config_word(intel_private.bridge_dev, @@ -739,7 +645,7 @@ static void intel_gtt_cleanup(void) iounmap(intel_private.gtt); iounmap(intel_private.registers); - + intel_gtt_teardown_scratch_page(); } @@ -755,6 +661,14 @@ static int intel_gtt_init(void) intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.base.gtt_total_entries = intel_gtt_total_entries(); + /* save the PGETBL reg for resume */ + intel_private.PGETBL_save = + readl(intel_private.registers+I810_PGETBL_CTL) + & ~I810_PGETBL_ENABLED; + /* we only ever restore the register when enabling the PGTBL... */ + if (HAS_PGTBL_EN) + intel_private.PGETBL_save |= I810_PGETBL_ENABLED; + dev_info(&intel_private.bridge_dev->dev, "detected gtt size: %dK total, %dK mappable\n", intel_private.base.gtt_total_entries * 4, @@ -772,14 +686,7 @@ static int intel_gtt_init(void) global_cache_flush(); /* FIXME: ? */ - /* we have to call this as early as possible after the MMIO base address is known */ - intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); - if (intel_private.base.gtt_stolen_entries == 0) { - intel_private.driver->cleanup(); - iounmap(intel_private.registers); - iounmap(intel_private.gtt); - return -ENOMEM; - } + intel_private.base.stolen_size = intel_gtt_stolen_size(); ret = intel_gtt_setup_scratch_page(); if (ret != 0) { @@ -787,6 +694,8 @@ static int intel_gtt_init(void) return ret; } + intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; + return 0; } @@ -812,8 +721,10 @@ static int intel_fake_agp_fetch_size(void) static void i830_cleanup(void) { - kunmap(intel_private.i8xx_page); - intel_private.i8xx_flush_page = NULL; + if (intel_private.i8xx_flush_page) { + kunmap(intel_private.i8xx_flush_page); + intel_private.i8xx_flush_page = NULL; + } __free_page(intel_private.i8xx_page); intel_private.i8xx_page = NULL; @@ -860,25 +771,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { u32 pte_flags = I810_PTE_VALID; - - switch (flags) { - case AGP_DCACHE_MEMORY: - pte_flags |= I810_PTE_LOCAL; - break; - case AGP_USER_CACHED_MEMORY: + + if (flags == AGP_USER_CACHED_MEMORY) pte_flags |= I830_PTE_SYSTEM_CACHED; - break; - } writel(addr | pte_flags, intel_private.gtt + entry); } -static void intel_enable_gtt(void) +static bool intel_enable_gtt(void) { u32 gma_addr; - u16 gmch_ctrl; + u8 __iomem *reg; - if (INTEL_GTT_GEN == 2) + if (INTEL_GTT_GEN <= 2) pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &gma_addr); else @@ -887,13 +792,38 @@ static void intel_enable_gtt(void) intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); - pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); + if (INTEL_GTT_GEN >= 6) + return true; - writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED, - intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ + if (INTEL_GTT_GEN == 2) { + u16 gmch_ctrl; + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + gmch_ctrl |= I830_GMCH_ENABLED; + pci_write_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, gmch_ctrl); + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { + dev_err(&intel_private.pcidev->dev, + "failed to enable the GTT: GMCH_CTRL=%x\n", + gmch_ctrl); + return false; + } + } + + reg = intel_private.registers+I810_PGETBL_CTL; + writel(intel_private.PGETBL_save, reg); + if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { + dev_err(&intel_private.pcidev->dev, + "failed to enable the GTT: PGETBL=%x [expected %x]\n", + readl(reg), intel_private.PGETBL_save); + return false; + } + + return true; } static int i830_setup(void) @@ -908,8 +838,6 @@ static int i830_setup(void) return -ENOMEM; intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; - intel_private.pte_bus_addr = - readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; intel_i830_setup_flush(); @@ -934,12 +862,12 @@ static int intel_fake_agp_configure(void) { int i; - intel_enable_gtt(); + if (!intel_enable_gtt()) + return -EIO; agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; - for (i = intel_private.base.gtt_stolen_entries; - i < intel_private.base.gtt_total_entries; i++) { + for (i = 0; i < intel_private.base.gtt_total_entries; i++) { intel_private.driver->write_entry(intel_private.scratch_page_dma, i, 0); } @@ -963,10 +891,10 @@ static bool i830_check_flags(unsigned int flags) return false; } -static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, - unsigned int sg_len, - unsigned int pg_start, - unsigned int flags) +void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, + unsigned int sg_len, + unsigned int pg_start, + unsigned int flags) { struct scatterlist *sg; unsigned int len, m; @@ -987,27 +915,34 @@ static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, } readl(intel_private.gtt+j-1); } +EXPORT_SYMBOL(intel_gtt_insert_sg_entries); + +void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, + struct page **pages, unsigned int flags) +{ + int i, j; + + for (i = 0, j = first_entry; i < num_entries; i++, j++) { + dma_addr_t addr = page_to_phys(pages[i]); + intel_private.driver->write_entry(addr, + j, flags); + } + readl(intel_private.gtt+j-1); +} +EXPORT_SYMBOL(intel_gtt_insert_pages); static int intel_fake_agp_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { - int i, j; int ret = -EINVAL; + if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) + return i810_insert_dcache_entries(mem, pg_start, type); + if (mem->page_count == 0) goto out; - if (pg_start < intel_private.base.gtt_stolen_entries) { - dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", - pg_start, intel_private.base.gtt_stolen_entries); - - dev_info(&intel_private.pcidev->dev, - "trying to insert into local/stolen memory\n"); - goto out_err; - } - - if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries) + if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) goto out_err; if (type != mem->type) @@ -1019,21 +954,17 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, if (!mem->is_flushed) global_cache_flush(); - if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { - ret = intel_agp_map_memory(mem); + if (intel_private.base.needs_dmar) { + ret = intel_gtt_map_memory(mem->pages, mem->page_count, + &mem->sg_list, &mem->num_sg); if (ret != 0) return ret; intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, pg_start, type); - } else { - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - dma_addr_t addr = page_to_phys(mem->pages[i]); - intel_private.driver->write_entry(addr, - j, type); - } - readl(intel_private.gtt+j-1); - } + } else + intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, + type); out: ret = 0; @@ -1042,40 +973,54 @@ out_err: return ret; } +void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) +{ + unsigned int i; + + for (i = first_entry; i < (first_entry + num_entries); i++) { + intel_private.driver->write_entry(intel_private.scratch_page_dma, + i, 0); + } + readl(intel_private.gtt+i-1); +} +EXPORT_SYMBOL(intel_gtt_clear_range); + static int intel_fake_agp_remove_entries(struct agp_memory *mem, off_t pg_start, int type) { - int i; - if (mem->page_count == 0) return 0; - if (pg_start < intel_private.base.gtt_stolen_entries) { - dev_info(&intel_private.pcidev->dev, - "trying to disable local/stolen memory\n"); - return -EINVAL; + if (intel_private.base.needs_dmar) { + intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); + mem->sg_list = NULL; + mem->num_sg = 0; } - if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) - intel_agp_unmap_memory(mem); - - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - intel_private.driver->write_entry(intel_private.scratch_page_dma, - i, 0); - } - readl(intel_private.gtt+i-1); + intel_gtt_clear_range(pg_start, mem->page_count); return 0; } -static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge) -{ - intel_private.driver->chipset_flush(); -} - static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, int type) { + struct agp_memory *new; + + if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) { + if (pg_count != intel_private.num_dcache_entries) + return NULL; + + new = agp_create_memory(1); + if (new == NULL) + return NULL; + + new->type = AGP_DCACHE_MEMORY; + new->page_count = pg_count; + new->num_scratch_pages = 0; + agp_free_page_array(new); + return new; + } if (type == AGP_PHYS_MEMORY) return alloc_agpphysmem_i8xx(pg_count, type); /* always return NULL for other allocation types for now */ @@ -1190,12 +1135,19 @@ static void i9xx_chipset_flush(void) writel(1, intel_private.i9xx_flush_page); } -static void i965_write_entry(dma_addr_t addr, unsigned int entry, +static void i965_write_entry(dma_addr_t addr, + unsigned int entry, unsigned int flags) { + u32 pte_flags; + + pte_flags = I810_PTE_VALID; + if (flags == AGP_USER_CACHED_MEMORY) + pte_flags |= I830_PTE_SYSTEM_CACHED; + /* Shift high bits down */ addr |= (addr >> 28) & 0xf0; - writel(addr | I810_PTE_VALID, intel_private.gtt + entry); + writel(addr | pte_flags, intel_private.gtt + entry); } static bool gen6_check_flags(unsigned int flags) @@ -1265,40 +1217,11 @@ static int i9xx_setup(void) intel_private.gtt_bus_addr = reg_addr + gtt_offset; } - intel_private.pte_bus_addr = - readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - intel_i9xx_setup_flush(); return 0; } -static const struct agp_bridge_driver intel_810_driver = { - .owner = THIS_MODULE, - .aperture_sizes = intel_i810_sizes, - .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 2, - .needs_scratch_page = true, - .configure = intel_i810_configure, - .fetch_size = intel_i810_fetch_size, - .cleanup = intel_i810_cleanup, - .mask_memory = intel_i810_mask_memory, - .masks = intel_i810_masks, - .agp_enable = intel_fake_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = agp_generic_create_gatt_table, - .free_gatt_table = agp_generic_free_gatt_table, - .insert_memory = intel_i810_insert_entries, - .remove_memory = intel_i810_remove_entries, - .alloc_by_type = intel_i810_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = agp_generic_type_to_mask_type, -}; - static const struct agp_bridge_driver intel_fake_agp_driver = { .owner = THIS_MODULE, .size_type = FIXED_APER_SIZE, @@ -1319,15 +1242,20 @@ static const struct agp_bridge_driver intel_fake_agp_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_fake_agp_chipset_flush, }; static const struct intel_gtt_driver i81x_gtt_driver = { .gen = 1, + .has_pgtbl_enable = 1, .dma_mask_size = 32, + .setup = i810_setup, + .cleanup = i810_cleanup, + .check_flags = i830_check_flags, + .write_entry = i810_write_entry, }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, + .has_pgtbl_enable = 1, .setup = i830_setup, .cleanup = i830_cleanup, .write_entry = i830_write_entry, @@ -1337,10 +1265,11 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, + .has_pgtbl_enable = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ - .write_entry = i830_write_entry, + .write_entry = i830_write_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1367,6 +1296,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = { }; static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, + .has_pgtbl_enable = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, @@ -1410,93 +1340,92 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = { static const struct intel_gtt_driver_description { unsigned int gmch_chip_id; char *name; - const struct agp_bridge_driver *gmch_driver; const struct intel_gtt_driver *gtt_driver; } intel_gtt_chipsets[] = { - { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver, + { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &i81x_gtt_driver}, - { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver, + { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &i81x_gtt_driver}, - { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver, + { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &i81x_gtt_driver}, - { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver, + { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82854_IG, "854", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82865_IG, "865", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_G33_IG, "G33", - &intel_fake_agp_driver, &g33_gtt_driver }, + &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", - &intel_fake_agp_driver, &g33_gtt_driver }, + &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", - &intel_fake_agp_driver, &g33_gtt_driver }, + &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", - &intel_fake_agp_driver, &pineview_gtt_driver }, + &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", - &intel_fake_agp_driver, &pineview_gtt_driver }, + &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_IG, "B43", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G41_IG, "G41", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, - "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, + "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, + "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { 0, NULL, NULL } }; @@ -1521,21 +1450,20 @@ int intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge) { int i, mask; - bridge->driver = NULL; + intel_private.driver = NULL; for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { - bridge->driver = - intel_gtt_chipsets[i].gmch_driver; - intel_private.driver = + intel_private.driver = intel_gtt_chipsets[i].gtt_driver; break; } } - if (!bridge->driver) + if (!intel_private.driver) return 0; + bridge->driver = &intel_fake_agp_driver; bridge->dev_private_data = &intel_private; bridge->dev = pdev; @@ -1551,8 +1479,8 @@ int intel_gmch_probe(struct pci_dev *pdev, pci_set_consistent_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)); - if (bridge->driver == &intel_810_driver) - return 1; + /*if (bridge->driver == &intel_810_driver) + return 1;*/ if (intel_gtt_init() != 0) return 0; @@ -1561,12 +1489,19 @@ int intel_gmch_probe(struct pci_dev *pdev, } EXPORT_SYMBOL(intel_gmch_probe); -struct intel_gtt *intel_gtt_get(void) +const struct intel_gtt *intel_gtt_get(void) { return &intel_private.base; } EXPORT_SYMBOL(intel_gtt_get); +void intel_gtt_chipset_flush(void) +{ + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); +} +EXPORT_SYMBOL(intel_gtt_chipset_flush); + void intel_gmch_remove(struct pci_dev *pdev) { if (intel_private.pcidev) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 1030f8420137..c17a305ecb28 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/wait.h> +#include <linux/acpi.h> #include "tpm.h" #define TPM_HEADER_SIZE 10 @@ -78,6 +79,26 @@ enum tis_defaults { static LIST_HEAD(tis_chips); static DEFINE_SPINLOCK(tis_lock); +#ifdef CONFIG_ACPI +static int is_itpm(struct pnp_dev *dev) +{ + struct acpi_device *acpi = pnp_acpi_device(dev); + struct acpi_hardware_id *id; + + list_for_each_entry(id, &acpi->pnp.ids, list) { + if (!strcmp("INTC0102", id->id)) + return 1; + } + + return 0; +} +#else +static int is_itpm(struct pnp_dev *dev) +{ + return 0; +} +#endif + static int check_locality(struct tpm_chip *chip, int l) { if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & @@ -472,6 +493,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, "1.2 TPM (device-id 0x%X, rev-id %d)\n", vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); + if (is_itpm(to_pnp_dev(dev))) + itpm = 1; + if (itpm) dev_info(dev, "Intel iTPM workaround enabled\n"); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 6c1b676643a9..896a2ced1d27 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1547,31 +1547,16 @@ static int init_vqs(struct ports_device *portdev) nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); - if (!vqs) { - err = -ENOMEM; - goto fail; - } io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); - if (!io_callbacks) { - err = -ENOMEM; - goto free_vqs; - } io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); - if (!io_names) { - err = -ENOMEM; - goto free_callbacks; - } portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), GFP_KERNEL); - if (!portdev->in_vqs) { - err = -ENOMEM; - goto free_names; - } portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), GFP_KERNEL); - if (!portdev->out_vqs) { + if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || + !portdev->out_vqs) { err = -ENOMEM; - goto free_invqs; + goto free; } /* @@ -1605,7 +1590,7 @@ static int init_vqs(struct ports_device *portdev) io_callbacks, (const char **)io_names); if (err) - goto free_outvqs; + goto free; j = 0; portdev->in_vqs[0] = vqs[0]; @@ -1621,23 +1606,19 @@ static int init_vqs(struct ports_device *portdev) portdev->out_vqs[i] = vqs[j + 1]; } } - kfree(io_callbacks); kfree(io_names); + kfree(io_callbacks); kfree(vqs); return 0; -free_names: - kfree(io_names); -free_callbacks: - kfree(io_callbacks); -free_outvqs: +free: kfree(portdev->out_vqs); -free_invqs: kfree(portdev->in_vqs); -free_vqs: + kfree(io_names); + kfree(io_callbacks); kfree(vqs); -fail: + return err; } diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index eb6b54dbb806..85ffd5e38c50 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -1213,3 +1213,4 @@ module_exit(sh_dmae_exit); MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:sh-dma-engine"); diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index b3781399b38a..ba2898b3639b 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -10,16 +10,16 @@ obj-$(CONFIG_EDAC) := edac_stub.o obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o obj-$(CONFIG_EDAC_MCE) += edac_mce.o -edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o -edac_core-objs += edac_module.o edac_device_sysfs.o +edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o +edac_core-y += edac_module.o edac_device_sysfs.o ifdef CONFIG_PCI -edac_core-objs += edac_pci.o edac_pci_sysfs.o +edac_core-y += edac_pci.o edac_pci_sysfs.o endif obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o -edac_mce_amd-objs := mce_amd.o +edac_mce_amd-y := mce_amd.o obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c index 8d0688f36d4c..39faded3cadd 100644 --- a/drivers/edac/mce_amd_inj.c +++ b/drivers/edac/mce_amd_inj.c @@ -139,7 +139,7 @@ static int __init edac_init_mce_inject(void) return 0; err_sysfs_create: - while (i-- >= 0) + while (--i >= 0) sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr); kobject_del(mce_kobj); diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 18fdd9703b48..1a467a91fb0b 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -7,6 +7,7 @@ */ #include <linux/bug.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> @@ -26,8 +27,14 @@ #include <asm/unaligned.h> #include <net/arp.h> -#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */ -#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2) +/* rx limits */ +#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ +#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) + +/* tx limits */ +#define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ +#define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ +#define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ #define IEEE1394_BROADCAST_CHANNEL 31 #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) @@ -169,15 +176,8 @@ struct fwnet_device { struct fw_address_handler handler; u64 local_fifo; - /* List of packets to be sent */ - struct list_head packet_list; - /* - * List of packets that were broadcasted. When we get an ISO interrupt - * one of them has been sent - */ - struct list_head broadcasted_list; - /* List of packets that have been sent but not yet acked */ - struct list_head sent_list; + /* Number of tx datagrams that have been queued but not yet acked */ + int queued_datagrams; struct list_head peer_list; struct fw_card *card; @@ -195,7 +195,7 @@ struct fwnet_peer { unsigned pdg_size; /* pd_list size */ u16 datagram_label; /* outgoing datagram label */ - unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ + u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ int node_id; int generation; unsigned speed; @@ -203,22 +203,18 @@ struct fwnet_peer { /* This is our task struct. It's used for the packet complete callback. */ struct fwnet_packet_task { - /* - * ptask can actually be on dev->packet_list, dev->broadcasted_list, - * or dev->sent_list depending on its current state. - */ - struct list_head pt_link; struct fw_transaction transaction; struct rfc2734_header hdr; struct sk_buff *skb; struct fwnet_device *dev; int outstanding_pkts; - unsigned max_payload; u64 fifo_addr; u16 dest_node; + u16 max_payload; u8 generation; u8 speed; + u8 enqueued; }; /* @@ -650,8 +646,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, net->stats.rx_packets++; net->stats.rx_bytes += skb->len; } - if (netif_queue_stopped(net)) - netif_wake_queue(net); return 0; @@ -660,8 +654,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, net->stats.rx_dropped++; dev_kfree_skb_any(skb); - if (netif_queue_stopped(net)) - netif_wake_queue(net); return -ENOENT; } @@ -793,15 +785,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, * Datagram is not complete, we're done for the * moment. */ - spin_unlock_irqrestore(&dev->lock, flags); - - return 0; + retval = 0; fail: spin_unlock_irqrestore(&dev->lock, flags); - if (netif_queue_stopped(net)) - netif_wake_queue(net); - return retval; } @@ -901,11 +888,19 @@ static void fwnet_free_ptask(struct fwnet_packet_task *ptask) kmem_cache_free(fwnet_packet_task_cache, ptask); } +/* Caller must hold dev->lock. */ +static void dec_queued_datagrams(struct fwnet_device *dev) +{ + if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) + netif_wake_queue(dev->netdev); +} + static int fwnet_send_packet(struct fwnet_packet_task *ptask); static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) { struct fwnet_device *dev = ptask->dev; + struct sk_buff *skb = ptask->skb; unsigned long flags; bool free; @@ -914,10 +909,14 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) ptask->outstanding_pkts--; /* Check whether we or the networking TX soft-IRQ is last user. */ - free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link)); + free = (ptask->outstanding_pkts == 0 && ptask->enqueued); + if (free) + dec_queued_datagrams(dev); - if (ptask->outstanding_pkts == 0) - list_del(&ptask->pt_link); + if (ptask->outstanding_pkts == 0) { + dev->netdev->stats.tx_packets++; + dev->netdev->stats.tx_bytes += skb->len; + } spin_unlock_irqrestore(&dev->lock, flags); @@ -926,7 +925,6 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) u16 fg_off; u16 datagram_label; u16 lf; - struct sk_buff *skb; /* Update the ptask to point to the next fragment and send it */ lf = fwnet_get_hdr_lf(&ptask->hdr); @@ -953,7 +951,7 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); break; } - skb = ptask->skb; + skb_pull(skb, ptask->max_payload); if (ptask->outstanding_pkts > 1) { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, @@ -970,6 +968,31 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) fwnet_free_ptask(ptask); } +static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) +{ + struct fwnet_device *dev = ptask->dev; + unsigned long flags; + bool free; + + spin_lock_irqsave(&dev->lock, flags); + + /* One fragment failed; don't try to send remaining fragments. */ + ptask->outstanding_pkts = 0; + + /* Check whether we or the networking TX soft-IRQ is last user. */ + free = ptask->enqueued; + if (free) + dec_queued_datagrams(dev); + + dev->netdev->stats.tx_dropped++; + dev->netdev->stats.tx_errors++; + + spin_unlock_irqrestore(&dev->lock, flags); + + if (free) + fwnet_free_ptask(ptask); +} + static void fwnet_write_complete(struct fw_card *card, int rcode, void *payload, size_t length, void *data) { @@ -977,11 +1000,12 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, ptask = data; - if (rcode == RCODE_COMPLETE) + if (rcode == RCODE_COMPLETE) { fwnet_transmit_packet_done(ptask); - else + } else { fw_error("fwnet_write_complete: failed: %x\n", rcode); - /* ??? error recovery */ + fwnet_transmit_packet_failed(ptask); + } } static int fwnet_send_packet(struct fwnet_packet_task *ptask) @@ -1039,9 +1063,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) spin_lock_irqsave(&dev->lock, flags); /* If the AT tasklet already ran, we may be last user. */ - free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); + free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) - list_add_tail(&ptask->pt_link, &dev->broadcasted_list); + ptask->enqueued = true; + else + dec_queued_datagrams(dev); spin_unlock_irqrestore(&dev->lock, flags); @@ -1056,9 +1082,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) spin_lock_irqsave(&dev->lock, flags); /* If the AT tasklet already ran, we may be last user. */ - free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); + free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) - list_add_tail(&ptask->pt_link, &dev->sent_list); + ptask->enqueued = true; + else + dec_queued_datagrams(dev); spin_unlock_irqrestore(&dev->lock, flags); @@ -1224,6 +1252,15 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) struct fwnet_peer *peer; unsigned long flags; + spin_lock_irqsave(&dev->lock, flags); + + /* Can this happen? */ + if (netif_queue_stopped(dev->netdev)) { + spin_unlock_irqrestore(&dev->lock, flags); + + return NETDEV_TX_BUSY; + } + ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); if (ptask == NULL) goto fail; @@ -1242,9 +1279,6 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) proto = hdr_buf.h_proto; dg_size = skb->len; - /* serialize access to peer, including peer->datagram_label */ - spin_lock_irqsave(&dev->lock, flags); - /* * Set the transmission type for the packet. ARP packets and IP * broadcast packets are sent via GASP. @@ -1266,7 +1300,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) - goto fail_unlock; + goto fail; generation = peer->generation; dest_node = peer->node_id; @@ -1320,18 +1354,21 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) max_payload += RFC2374_FRAG_HDR_SIZE; } + if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) + netif_stop_queue(dev->netdev); + spin_unlock_irqrestore(&dev->lock, flags); ptask->max_payload = max_payload; - INIT_LIST_HEAD(&ptask->pt_link); + ptask->enqueued = 0; fwnet_send_packet(ptask); return NETDEV_TX_OK; - fail_unlock: - spin_unlock_irqrestore(&dev->lock, flags); fail: + spin_unlock_irqrestore(&dev->lock, flags); + if (ptask) kmem_cache_free(fwnet_packet_task_cache, ptask); @@ -1377,7 +1414,7 @@ static void fwnet_init_dev(struct net_device *net) net->addr_len = FWNET_ALEN; net->hard_header_len = FWNET_HLEN; net->type = ARPHRD_IEEE1394; - net->tx_queue_len = 10; + net->tx_queue_len = FWNET_TX_QUEUE_LEN; } /* caller must hold fwnet_device_mutex */ @@ -1457,14 +1494,9 @@ static int fwnet_probe(struct device *_dev) dev->broadcast_rcv_context = NULL; dev->broadcast_xmt_max_payload = 0; dev->broadcast_xmt_datagramlabel = 0; - dev->local_fifo = FWNET_NO_FIFO_ADDR; - - INIT_LIST_HEAD(&dev->packet_list); - INIT_LIST_HEAD(&dev->broadcasted_list); - INIT_LIST_HEAD(&dev->sent_list); + dev->queued_datagrams = 0; INIT_LIST_HEAD(&dev->peer_list); - dev->card = card; dev->netdev = net; @@ -1522,7 +1554,7 @@ static int fwnet_remove(struct device *_dev) struct fwnet_peer *peer = dev_get_drvdata(_dev); struct fwnet_device *dev = peer->dev; struct net_device *net; - struct fwnet_packet_task *ptask, *pt_next; + int i; mutex_lock(&fwnet_device_mutex); @@ -1540,21 +1572,9 @@ static int fwnet_remove(struct device *_dev) dev->card); fw_iso_context_destroy(dev->broadcast_rcv_context); } - list_for_each_entry_safe(ptask, pt_next, - &dev->packet_list, pt_link) { - dev_kfree_skb_any(ptask->skb); - kmem_cache_free(fwnet_packet_task_cache, ptask); - } - list_for_each_entry_safe(ptask, pt_next, - &dev->broadcasted_list, pt_link) { - dev_kfree_skb_any(ptask->skb); - kmem_cache_free(fwnet_packet_task_cache, ptask); - } - list_for_each_entry_safe(ptask, pt_next, - &dev->sent_list, pt_link) { - dev_kfree_skb_any(ptask->skb); - kmem_cache_free(fwnet_packet_task_cache, ptask); - } + for (i = 0; dev->queued_datagrams && i < 5; i++) + ssleep(1); + WARN_ON(dev->queued_datagrams); list_del(&dev->dev_link); free_netdev(net); diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c index e23c06893d19..599f6c9e0fbf 100644 --- a/drivers/gpio/cs5535-gpio.c +++ b/drivers/gpio/cs5535-gpio.c @@ -56,6 +56,18 @@ static struct cs5535_gpio_chip { * registers, see include/linux/cs5535.h. */ +static void errata_outl(u32 val, unsigned long addr) +{ + /* + * According to the CS5536 errata (#36), after suspend + * a write to the high bank GPIO register will clear all + * non-selected bits; the recommended workaround is a + * read-modify-write operation. + */ + val |= inl(addr); + outl(val, addr); +} + static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, unsigned int reg) { @@ -64,7 +76,7 @@ static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, outl(1 << offset, chip->base + reg); else /* high bank register */ - outl(1 << (offset - 16), chip->base + 0x80 + reg); + errata_outl(1 << (offset - 16), chip->base + 0x80 + reg); } void cs5535_gpio_set(unsigned offset, unsigned int reg) @@ -86,7 +98,7 @@ static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset, outl(1 << (offset + 16), chip->base + reg); else /* high bank register */ - outl(1 << offset, chip->base + 0x80 + reg); + errata_outl(1 << offset, chip->base + 0x80 + reg); } void cs5535_gpio_clear(unsigned offset, unsigned int reg) diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 252fdb98b73a..0cb2ba50af53 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev, } EXPORT_SYMBOL(drm_agp_bind_pages); -void drm_agp_chipset_flush(struct drm_device *dev) -{ - agp_flush_chipset(dev->agp->bridge); -} -EXPORT_SYMBOL(drm_agp_chipset_flush); - #endif /* __OS_HAS_AGP */ diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 4c200931a6bc..c6b2e27a446a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -241,7 +241,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (!drm_helper_encoder_in_use(encoder)) { + if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) { drm_encoder_disable(encoder); /* disconnector encoder from any connector */ encoder->crtc = NULL; @@ -482,6 +482,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) int count = 0, ro, fail = 0; struct drm_crtc_helper_funcs *crtc_funcs; int ret = 0; + int i; DRM_DEBUG_KMS("\n"); @@ -677,6 +678,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (ret != 0) goto fail; } + DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); + for (i = 0; i < set->num_connectors; i++) { + DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, + drm_get_connector_name(set->connectors[i])); + set->connectors[i]->dpms = DRM_MODE_DPMS_ON; + } kfree(save_connectors); kfree(save_encoders); @@ -852,7 +859,7 @@ static void output_poll_execute(struct work_struct *work) struct delayed_work *delayed_work = to_delayed_work(work); struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); struct drm_connector *connector; - enum drm_connector_status old_status, status; + enum drm_connector_status old_status; bool repoll = false, changed = false; if (!drm_kms_helper_poll) @@ -877,8 +884,9 @@ static void output_poll_execute(struct work_struct *work) !(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; - status = connector->funcs->detect(connector, false); - if (old_status != status) + connector->status = connector->funcs->detect(connector, false); + DRM_DEBUG_KMS("connector status updated to %d\n", connector->status); + if (old_status != connector->status) changed = true; } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 55160d7c38b6..8304c42195fc 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1047,10 +1047,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, struct timeval now; unsigned long flags; unsigned int seq; + int ret; e = kzalloc(sizeof *e, GFP_KERNEL); - if (e == NULL) - return -ENOMEM; + if (e == NULL) { + ret = -ENOMEM; + goto err_put; + } e->pipe = pipe; e->base.pid = current->pid; @@ -1064,9 +1067,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, spin_lock_irqsave(&dev->event_lock, flags); if (file_priv->event_space < sizeof e->event) { - spin_unlock_irqrestore(&dev->event_lock, flags); - kfree(e); - return -ENOMEM; + ret = -EBUSY; + goto err_unlock; } file_priv->event_space -= sizeof e->event; @@ -1103,6 +1105,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, spin_unlock_irqrestore(&dev->event_lock, flags); return 0; + +err_unlock: + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(e); +err_put: + drm_vblank_put(dev, e->pipe); + return ret; } /** diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index a6bfc302ed90..c59515ba7e69 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -392,10 +392,36 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, mm->scanned_blocks = 0; mm->scan_hit_start = 0; mm->scan_hit_size = 0; + mm->scan_check_range = 0; } EXPORT_SYMBOL(drm_mm_init_scan); /** + * Initializa lru scanning. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. This version is for range-restricted scans. + * + * Warning: As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end) +{ + mm->scan_alignment = alignment; + mm->scan_size = size; + mm->scanned_blocks = 0; + mm->scan_hit_start = 0; + mm->scan_hit_size = 0; + mm->scan_start = start; + mm->scan_end = end; + mm->scan_check_range = 1; +} +EXPORT_SYMBOL(drm_mm_init_scan_with_range); + +/** * Add a node to the scan list that might be freed to make space for the desired * hole. * @@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) struct drm_mm *mm = node->mm; struct list_head *prev_free, *next_free; struct drm_mm_node *prev_node, *next_node; + unsigned long adj_start; + unsigned long adj_end; mm->scanned_blocks++; @@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) node->free_stack.prev = prev_free; node->free_stack.next = next_free; - if (check_free_hole(node->start, node->start + node->size, + if (mm->scan_check_range) { + adj_start = node->start < mm->scan_start ? + mm->scan_start : node->start; + adj_end = node->start + node->size > mm->scan_end ? + mm->scan_end : node->start + node->size; + } else { + adj_start = node->start; + adj_end = node->start + node->size; + } + + if (check_free_hole(adj_start , adj_end, mm->scan_size, mm->scan_alignment)) { mm->scan_hit_start = node->start; mm->scan_hit_size = node->size; diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index fdc833d5cc7b..0ae6a7c5020f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_gem.o \ i915_gem_debug.o \ i915_gem_evict.o \ + i915_gem_execbuffer.o \ + i915_gem_gtt.o \ i915_gem_tiling.o \ i915_trace_points.o \ intel_display.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1f4f3ceb63c7..92f75782c332 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,6 +32,7 @@ #include "drmP.h" #include "drm.h" #include "intel_drv.h" +#include "intel_ringbuffer.h" #include "i915_drm.h" #include "i915_drv.h" @@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data) B(is_broadwater); B(is_crestline); B(has_fbc); - B(has_rc6); B(has_pipe_cxsr); B(has_hotplug); B(cursor_needs_physical); @@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) +static const char *get_pin_flag(struct drm_i915_gem_object *obj) { - if (obj_priv->user_pin_count > 0) + if (obj->user_pin_count > 0) return "P"; - else if (obj_priv->pin_count > 0) + else if (obj->pin_count > 0) return "p"; else return " "; } -static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) +static const char *get_tiling_flag(struct drm_i915_gem_object *obj) { - switch (obj_priv->tiling_mode) { + switch (obj->tiling_mode) { default: case I915_TILING_NONE: return " "; case I915_TILING_X: return "X"; @@ -109,7 +109,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", + seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), @@ -117,6 +117,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->base.read_domains, obj->base.write_domain, obj->last_rendering_seqno, + obj->last_fenced_seqno, obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) @@ -124,7 +125,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) - seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); + seq_printf(m, " (gtt offset: %08x, size: %08x)", + obj->gtt_offset, (unsigned int)obj->gtt_space->size); + if (obj->pin_mappable || obj->fault_mappable) { + char s[3], *t = s; + if (obj->pin_mappable) + *t++ = 'p'; + if (obj->fault_mappable) + *t++ = 'f'; + *t = '\0'; + seq_printf(m, " (%s mappable)", s); + } if (obj->ring != NULL) seq_printf(m, " (%s)", obj->ring->name); } @@ -136,7 +147,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) struct list_head *head; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; size_t total_obj_size, total_gtt_size; int count, ret; @@ -171,12 +182,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj_priv, head, mm_list) { + list_for_each_entry(obj, head, mm_list) { seq_printf(m, " "); - describe_obj(m, obj_priv); + describe_obj(m, obj); seq_printf(m, "\n"); - total_obj_size += obj_priv->base.size; - total_gtt_size += obj_priv->gtt_space->size; + total_obj_size += obj->base.size; + total_gtt_size += obj->gtt_space->size; count++; } mutex_unlock(&dev->struct_mutex); @@ -186,24 +197,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return 0; } +#define count_objects(list, member) do { \ + list_for_each_entry(obj, list, member) { \ + size += obj->gtt_space->size; \ + ++count; \ + if (obj->map_and_fenceable) { \ + mappable_size += obj->gtt_space->size; \ + ++mappable_count; \ + } \ + } \ +} while(0) + static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u32 count, mappable_count; + size_t size, mappable_size; + struct drm_i915_gem_object *obj; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - seq_printf(m, "%u objects\n", dev_priv->mm.object_count); - seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); - seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); - seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); - seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); - seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); - seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); + seq_printf(m, "%u objects, %zu bytes\n", + dev_priv->mm.object_count, + dev_priv->mm.object_memory); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.gtt_list, gtt_list); + seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.active_list, mm_list); + count_objects(&dev_priv->mm.flushing_list, mm_list); + seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.pinned_list, mm_list); + seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.inactive_list, mm_list); + seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.deferred_free_list, mm_list); + seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + if (obj->fault_mappable) { + size += obj->gtt_space->size; + ++count; + } + if (obj->pin_mappable) { + mappable_size += obj->gtt_space->size; + ++mappable_count; + } + } + seq_printf(m, "%u pinned mappable objects, %zu bytes\n", + mappable_count, mappable_size); + seq_printf(m, "%u fault mappable objects, %zu bytes\n", + count, size); + + seq_printf(m, "%zu [%zu] gtt total\n", + dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); mutex_unlock(&dev->struct_mutex); @@ -243,14 +309,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "%d prepares\n", work->pending); if (work->old_fb_obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); - if(obj_priv) - seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); + struct drm_i915_gem_object *obj = work->old_fb_obj; + if (obj) + seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } if (work->pending_flip_obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); - if(obj_priv) - seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); + struct drm_i915_gem_object *obj = work->pending_flip_obj; + if (obj) + seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -265,44 +331,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_request *gem_request; - int ret; + int ret, count; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - seq_printf(m, "Request:\n"); - list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, - list) { - seq_printf(m, " %d @ %d\n", - gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies)); + count = 0; + if (!list_empty(&dev_priv->ring[RCS].request_list)) { + seq_printf(m, "Render requests:\n"); + list_for_each_entry(gem_request, + &dev_priv->ring[RCS].request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; + } + if (!list_empty(&dev_priv->ring[VCS].request_list)) { + seq_printf(m, "BSD requests:\n"); + list_for_each_entry(gem_request, + &dev_priv->ring[VCS].request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; + } + if (!list_empty(&dev_priv->ring[BCS].request_list)) { + seq_printf(m, "BLT requests:\n"); + list_for_each_entry(gem_request, + &dev_priv->ring[BCS].request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; } mutex_unlock(&dev->struct_mutex); + if (count == 0) + seq_printf(m, "No requests\n"); + return 0; } +static void i915_ring_seqno_info(struct seq_file *m, + struct intel_ring_buffer *ring) +{ + if (ring->get_seqno) { + seq_printf(m, "Current sequence (%s): %d\n", + ring->name, ring->get_seqno(ring)); + seq_printf(m, "Waiter sequence (%s): %d\n", + ring->name, ring->waiting_seqno); + seq_printf(m, "IRQ sequence (%s): %d\n", + ring->name, ring->irq_seqno); + } +} + static int i915_gem_seqno_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - if (dev_priv->render_ring.status_page.page_addr != NULL) { - seq_printf(m, "Current sequence: %d\n", - dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); - } else { - seq_printf(m, "Current sequence: hws uninitialized\n"); - } - seq_printf(m, "Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_ring_seqno_info(m, &dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); @@ -315,7 +417,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -354,16 +456,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - if (dev_priv->render_ring.status_page.page_addr != NULL) { - seq_printf(m, "Current sequence: %d\n", - dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); - } else { - seq_printf(m, "Current sequence: hws uninitialized\n"); - } - seq_printf(m, "Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - seq_printf(m, "IRQ sequence: %d\n", - dev_priv->mm.irq_gem_seqno); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_ring_seqno_info(m, &dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); return 0; @@ -383,29 +477,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; + struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; - if (obj == NULL) { - seq_printf(m, "Fenced object[%2d] = unused\n", i); - } else { - struct drm_i915_gem_object *obj_priv; - - obj_priv = to_intel_bo(obj); - seq_printf(m, "Fenced object[%2d] = %p: %s " - "%08x %08zx %08x %s %08x %08x %d", - i, obj, get_pin_flag(obj_priv), - obj_priv->gtt_offset, - obj->size, obj_priv->stride, - get_tiling_flag(obj_priv), - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - if (obj->name) - seq_printf(m, " (name: %d)", obj->name); - seq_printf(m, "\n"); - } + seq_printf(m, "Fenced object[%2d] = ", i); + if (obj == NULL) + seq_printf(m, "unused"); + else + describe_obj(m, obj); + seq_printf(m, "\n"); } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -414,10 +496,12 @@ static int i915_hws_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int i; + struct intel_ring_buffer *ring; volatile u32 *hws; + int i; - hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; + hws = (volatile u32 *)ring->status_page.page_addr; if (hws == NULL) return 0; @@ -431,14 +515,14 @@ static int i915_hws_info(struct seq_file *m, void *data) static void i915_dump_object(struct seq_file *m, struct io_mapping *mapping, - struct drm_i915_gem_object *obj_priv) + struct drm_i915_gem_object *obj) { int page, page_count, i; - page_count = obj_priv->base.size / PAGE_SIZE; + page_count = obj->base.size / PAGE_SIZE; for (page = 0; page < page_count; page++) { u32 *mem = io_mapping_map_wc(mapping, - obj_priv->gtt_offset + page * PAGE_SIZE); + obj->gtt_offset + page * PAGE_SIZE); for (i = 0; i < PAGE_SIZE; i += 4) seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); io_mapping_unmap(mem); @@ -450,25 +534,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - obj = &obj_priv->base; - if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { - seq_printf(m, "--- gtt_offset = 0x%08x\n", - obj_priv->gtt_offset); - i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv); + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); + i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); } } mutex_unlock(&dev->struct_mutex); - return 0; } @@ -477,19 +557,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - if (!dev_priv->render_ring.gem_object) { + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; + if (!ring->obj) { seq_printf(m, "No ringbuffer setup\n"); } else { - u8 *virt = dev_priv->render_ring.virtual_start; + u8 *virt = ring->virtual_start; uint32_t off; - for (off = 0; off < dev_priv->render_ring.size; off += 4) { + for (off = 0; off < ring->size; off += 4) { uint32_t *ptr = (uint32_t *)(virt + off); seq_printf(m, "%08x : %08x\n", off, *ptr); } @@ -504,19 +586,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - unsigned int head, tail; + struct intel_ring_buffer *ring; - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; + if (ring->size == 0) + return 0; - seq_printf(m, "RingHead : %08x\n", head); - seq_printf(m, "RingTail : %08x\n", tail); - seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); - seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD)); + seq_printf(m, "Ring %s:\n", ring->name); + seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); + seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); + seq_printf(m, " Size : %08x\n", ring->size); + seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); + seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); + if (IS_GEN6(dev)) { + seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); + seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); + } + seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); + seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); return 0; } +static const char *ring_str(int ring) +{ + switch (ring) { + case RING_RENDER: return " render"; + case RING_BSD: return " bsd"; + case RING_BLT: return " blt"; + default: return ""; + } +} + static const char *pin_flag(int pinned) { if (pinned > 0) @@ -547,6 +648,36 @@ static const char *purgeable_flag(int purgeable) return purgeable ? " purgeable" : ""; } +static void print_error_buffers(struct seq_file *m, + const char *name, + struct drm_i915_error_buffer *err, + int count) +{ + seq_printf(m, "%s [%d]:\n", name, count); + + while (count--) { + seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s", + err->gtt_offset, + err->size, + err->read_domains, + err->write_domain, + err->seqno, + pin_flag(err->pinned), + tiling_flag(err->tiling), + dirty_flag(err->dirty), + purgeable_flag(err->purgeable), + ring_str(err->ring)); + + if (err->name) + seq_printf(m, " (name: %d)", err->name); + if (err->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", err->fence_reg); + + seq_printf(m, "\n"); + err++; + } +} + static int i915_error_state(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -568,41 +699,46 @@ static int i915_error_state(struct seq_file *m, void *unused) error->time.tv_usec); seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); seq_printf(m, "EIR: 0x%08x\n", error->eir); - seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); - seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); + seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); + if (INTEL_INFO(dev)->gen >= 6) { + seq_printf(m, "ERROR: 0x%08x\n", error->error); + seq_printf(m, "Blitter command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); + seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); + seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); + seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); + seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); + seq_printf(m, "Video (BSD) command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); + seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); + seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); + seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); + seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); + } + seq_printf(m, "Render command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); - seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); if (INTEL_INFO(dev)->gen >= 4) { - seq_printf(m, " INSTPS: 0x%08x\n", error->instps); seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); + seq_printf(m, " INSTPS: 0x%08x\n", error->instps); } - seq_printf(m, "seqno: 0x%08x\n", error->seqno); - - if (error->active_bo_count) { - seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); - - for (i = 0; i < error->active_bo_count; i++) { - seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", - error->active_bo[i].gtt_offset, - error->active_bo[i].size, - error->active_bo[i].read_domains, - error->active_bo[i].write_domain, - error->active_bo[i].seqno, - pin_flag(error->active_bo[i].pinned), - tiling_flag(error->active_bo[i].tiling), - dirty_flag(error->active_bo[i].dirty), - purgeable_flag(error->active_bo[i].purgeable)); - - if (error->active_bo[i].name) - seq_printf(m, " (name: %d)", error->active_bo[i].name); - if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg); - - seq_printf(m, "\n"); - } - } + seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); + seq_printf(m, " seqno: 0x%08x\n", error->seqno); + + for (i = 0; i < 16; i++) + seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); + + if (error->active_bo) + print_error_buffers(m, "Active", + error->active_bo, + error->active_bo_count); + + if (error->pinned_bo) + print_error_buffers(m, "Pinned", + error->pinned_bo, + error->pinned_bo_count); for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { if (error->batchbuffer[i]) { @@ -635,6 +771,9 @@ static int i915_error_state(struct seq_file *m, void *unused) if (error->overlay) intel_overlay_print_error_state(m, error->overlay); + if (error->display) + intel_display_print_error_state(m, dev, error->display); + out: spin_unlock_irqrestore(&dev_priv->error_lock, flags); @@ -658,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u16 rgvswctl = I915_READ16(MEMSWCTL); - u16 rgvstat = I915_READ16(MEMSTAT_ILK); - seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); - seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); - seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> - MEMSTAT_VID_SHIFT); - seq_printf(m, "Current P-state: %d\n", - (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + if (IS_GEN5(dev)) { + u16 rgvswctl = I915_READ16(MEMSWCTL); + u16 rgvstat = I915_READ16(MEMSTAT_ILK); + + seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); + seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); + seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> + MEMSTAT_VID_SHIFT); + seq_printf(m, "Current P-state: %d\n", + (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + } else if (IS_GEN6(dev)) { + u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); + u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + int max_freq; + + /* RPSTAT1 is in the GT power well */ + __gen6_force_wake_get(dev_priv); + + seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); + seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); + seq_printf(m, "Render p-state ratio: %d\n", + (gt_perf_status & 0xff00) >> 8); + seq_printf(m, "Render p-state VID: %d\n", + gt_perf_status & 0xff); + seq_printf(m, "Render p-state limit: %d\n", + rp_state_limits & 0xff); + + max_freq = (rp_state_cap & 0xff0000) >> 16; + seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", + max_freq * 100); + + max_freq = (rp_state_cap & 0xff00) >> 8; + seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", + max_freq * 100); + + max_freq = rp_state_cap & 0xff; + seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", + max_freq * 100); + + __gen6_force_wake_put(dev_priv); + } else { + seq_printf(m, "no P-state info available\n"); + } return 0; } @@ -794,7 +969,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; bool sr_enabled = false; - if (IS_GEN5(dev)) + if (HAS_PCH_SPLIT(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; @@ -886,7 +1061,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.height, fb->base.depth, fb->base.bits_per_pixel); - describe_obj(m, to_intel_bo(fb->obj)); + describe_obj(m, fb->obj); seq_printf(m, "\n"); list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { @@ -898,7 +1073,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.height, fb->base.depth, fb->base.bits_per_pixel); - describe_obj(m, to_intel_bo(fb->obj)); + describe_obj(m, fb->obj); seq_printf(m, "\n"); } @@ -943,7 +1118,6 @@ i915_wedged_write(struct file *filp, loff_t *ppos) { struct drm_device *dev = filp->private_data; - drm_i915_private_t *dev_priv = dev->dev_private; char buf[20]; int val = 1; @@ -959,12 +1133,7 @@ i915_wedged_write(struct file *filp, } DRM_INFO("Manually setting wedged to %d\n", val); - - atomic_set(&dev_priv->mm.wedged, val); - if (val) { - wake_up_all(&dev_priv->irq_queue); - queue_work(dev_priv->wq, &dev_priv->error_work); - } + i915_handle_error(dev, val); return cnt; } @@ -1028,9 +1197,15 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_hws", i915_hws_info, 0}, - {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, - {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, + {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, + {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, + {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, + {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, + {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, + {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, + {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, + {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, + {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, {"i915_error_state", i915_error_state, 0}, {"i915_rstdby_delays", i915_rstdby_delays, 0}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7a26f4dd21ae..18746e6cb129 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -49,6 +49,8 @@ static int i915_init_phys_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + /* Program Hardware Status Page */ dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); @@ -57,11 +59,10 @@ static int i915_init_phys_hws(struct drm_device *dev) DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - dev_priv->render_ring.status_page.page_addr - = dev_priv->status_page_dmah->vaddr; + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); + memset(ring->status_page.page_addr, 0, PAGE_SIZE); if (INTEL_INFO(dev)->gen >= 4) dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & @@ -79,13 +80,15 @@ static int i915_init_phys_hws(struct drm_device *dev) static void i915_free_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + if (dev_priv->status_page_dmah) { drm_pci_free(dev, dev_priv->status_page_dmah); dev_priv->status_page_dmah = NULL; } - if (dev_priv->render_ring.status_page.gfx_addr) { - dev_priv->render_ring.status_page.gfx_addr = 0; + if (ring->status_page.gfx_addr) { + ring->status_page.gfx_addr = 0; drm_core_ioremapfree(&dev_priv->hws_map, dev); } @@ -97,7 +100,7 @@ void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; - struct intel_ring_buffer *ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); /* * We should never lose context on the ring with modesetting @@ -106,8 +109,8 @@ void i915_kernel_lost_context(struct drm_device * dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; @@ -123,6 +126,8 @@ void i915_kernel_lost_context(struct drm_device * dev) static int i915_dma_cleanup(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; + /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. @@ -131,9 +136,8 @@ static int i915_dma_cleanup(struct drm_device * dev) drm_irq_uninstall(dev); mutex_lock(&dev->struct_mutex); - intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); - intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + intel_cleanup_ring_buffer(&dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ @@ -147,6 +151,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + struct intel_ring_buffer *ring = LP_RING(dev_priv); master_priv->sarea = drm_getsarea(dev); if (master_priv->sarea) { @@ -157,24 +162,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } if (init->ring_size != 0) { - if (dev_priv->render_ring.gem_object != NULL) { + if (ring->obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } - dev_priv->render_ring.size = init->ring_size; + ring->size = init->ring_size; - dev_priv->render_ring.map.offset = init->ring_start; - dev_priv->render_ring.map.size = init->ring_size; - dev_priv->render_ring.map.type = 0; - dev_priv->render_ring.map.flags = 0; - dev_priv->render_ring.map.mtrr = 0; + ring->map.offset = init->ring_start; + ring->map.size = init->ring_size; + ring->map.type = 0; + ring->map.flags = 0; + ring->map.mtrr = 0; - drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); + drm_core_ioremap_wc(&ring->map, dev); - if (dev_priv->render_ring.map.handle == NULL) { + if (ring->map.handle == NULL) { i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -182,7 +187,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } } - dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; + ring->virtual_start = ring->map.handle; dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; @@ -201,12 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); - struct intel_ring_buffer *ring; DRM_DEBUG_DRIVER("%s\n", __func__); - ring = &dev_priv->render_ring; - if (ring->map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -221,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev) DRM_DEBUG_DRIVER("hw status page @ %p\n", ring->status_page.page_addr); if (ring->status_page.gfx_addr != 0) - intel_ring_setup_status_page(dev, ring); + intel_ring_setup_status_page(ring); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); @@ -263,7 +266,7 @@ static int i915_dma_init(struct drm_device *dev, void *data, * instruction detected will be given a size of zero, which is a * signal to abort the rest of the buffer. */ -static int do_validate_cmd(int cmd) +static int validate_cmd(int cmd) { switch (((cmd >> 29) & 0x7)) { case 0x0: @@ -321,40 +324,27 @@ static int do_validate_cmd(int cmd) return 0; } -static int validate_cmd(int cmd) -{ - int ret = do_validate_cmd(cmd); - -/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ - - return ret; -} - static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; - int i; + int i, ret; - if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) + if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) return -EINVAL; - BEGIN_LP_RING((dwords+1)&~1); - for (i = 0; i < dwords;) { - int cmd, sz; - - cmd = buffer[i]; - - if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) + int sz = validate_cmd(buffer[i]); + if (sz == 0 || i + sz > dwords) return -EINVAL; - - OUT_RING(cmd); - - while (++i, --sz) { - OUT_RING(buffer[i]); - } + i += sz; } + ret = BEGIN_LP_RING((dwords+1)&~1); + if (ret) + return ret; + + for (i = 0; i < dwords; i++) + OUT_RING(buffer[i]); if (dwords & 1) OUT_RING(0); @@ -365,34 +355,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect *boxes, - int i, int DR1, int DR4) + struct drm_clip_rect *box, + int DR1, int DR4) { - struct drm_clip_rect box = boxes[i]; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; - if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { + if (box->y2 <= box->y1 || box->x2 <= box->x1 || + box->y2 <= 0 || box->x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", - box.x1, box.y1, box.x2, box.y2); + box->x1, box->y1, box->x2, box->y2); return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) + return ret; + OUT_RING(GFX_OP_DRAWRECT_INFO_I965); - OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); - OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); + OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); + OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); - ADVANCE_LP_RING(); } else { - BEGIN_LP_RING(6); + ret = BEGIN_LP_RING(6); + if (ret) + return ret; + OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); - OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); - OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); + OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); + OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); - ADVANCE_LP_RING(); } + ADVANCE_LP_RING(); return 0; } @@ -412,12 +409,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev) if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(0); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(0); + ADVANCE_LP_RING(); + } } static int i915_dispatch_cmdbuffer(struct drm_device * dev, @@ -439,7 +437,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, for (i = 0; i < count; i++) { if (i < nbox) { - ret = i915_emit_box(dev, cliprects, i, + ret = i915_emit_box(dev, &cliprects[i], cmd->DR1, cmd->DR4); if (ret) return ret; @@ -458,8 +456,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { + struct drm_i915_private *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; - int i = 0, count; + int i, count, ret; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); @@ -469,17 +468,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, i915_kernel_lost_context(dev); count = nbox ? nbox : 1; - for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, cliprects, i, - batch->DR1, batch->DR4); + ret = i915_emit_box(dev, &cliprects[i], + batch->DR1, batch->DR4); if (ret) return ret; } if (!IS_I830(dev) && !IS_845G(dev)) { - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) + return ret; + if (INTEL_INFO(dev)->gen >= 4) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); @@ -487,26 +488,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); OUT_RING(batch->start | MI_BATCH_NON_SECURE); } - ADVANCE_LP_RING(); } else { - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) + return ret; + OUT_RING(MI_BATCH_BUFFER); OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(batch->start + batch->used - 4); OUT_RING(0); - ADVANCE_LP_RING(); } + ADVANCE_LP_RING(); } if (IS_G4X(dev) || IS_GEN5(dev)) { - BEGIN_LP_RING(2); - OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(2) == 0) { + OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + } } - i915_emit_breadcrumb(dev); + i915_emit_breadcrumb(dev); return 0; } @@ -515,6 +519,7 @@ static int i915_dispatch_flip(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + int ret; if (!master_priv->sarea_priv) return -EINVAL; @@ -526,12 +531,13 @@ static int i915_dispatch_flip(struct drm_device * dev) i915_kernel_lost_context(dev); - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(10); + if (ret) + return ret; + OUT_RING(MI_FLUSH | MI_READ_FLUSH); OUT_RING(0); - ADVANCE_LP_RING(); - BEGIN_LP_RING(6); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); if (dev_priv->current_page == 0) { @@ -542,33 +548,32 @@ static int i915_dispatch_flip(struct drm_device * dev) dev_priv->current_page = 0; } OUT_RING(0); - ADVANCE_LP_RING(); - BEGIN_LP_RING(2); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); OUT_RING(0); + ADVANCE_LP_RING(); master_priv->sarea_priv->last_enqueue = dev_priv->counter++; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(0); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(0); + ADVANCE_LP_RING(); + } master_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; } -static int i915_quiescent(struct drm_device * dev) +static int i915_quiescent(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev->dev_private); i915_kernel_lost_context(dev); - return intel_wait_ring_buffer(dev, &dev_priv->render_ring, - dev_priv->render_ring.size - 8); + return intel_wait_ring_buffer(ring, ring->size - 8); } static int i915_flush_ioctl(struct drm_device *dev, void *data, @@ -767,6 +772,15 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_BLT: value = HAS_BLT(dev); break; + case I915_PARAM_HAS_RELAXED_FENCING: + value = 1; + break; + case I915_PARAM_HAS_COHERENT_RINGS: + value = 1; + break; + case I915_PARAM_HAS_EXEC_CONSTANTS: + value = INTEL_INFO(dev)->gen >= 4; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -822,7 +836,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; - struct intel_ring_buffer *ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; @@ -1001,73 +1015,47 @@ intel_teardown_mchbar(struct drm_device *dev) #define PTE_VALID (1 << 0) /** - * i915_gtt_to_phys - take a GTT address and turn it into a physical one + * i915_stolen_to_phys - take an offset into stolen memory and turn it into + * a physical one * @dev: drm device - * @gtt_addr: address to translate + * @offset: address to translate * - * Some chip functions require allocations from stolen space but need the - * physical address of the memory in question. We use this routine - * to get a physical address suitable for register programming from a given - * GTT address. + * Some chip functions require allocations from stolen space and need the + * physical address of the memory in question. */ -static unsigned long i915_gtt_to_phys(struct drm_device *dev, - unsigned long gtt_addr) +static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) { - unsigned long *gtt; - unsigned long entry, phys; - int gtt_bar = IS_GEN2(dev) ? 1 : 0; - int gtt_offset, gtt_size; - - if (INTEL_INFO(dev)->gen >= 4) { - if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) { - gtt_offset = 2*1024*1024; - gtt_size = 2*1024*1024; - } else { - gtt_offset = 512*1024; - gtt_size = 512*1024; - } + struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev_priv->bridge_dev; + u32 base; + +#if 0 + /* On the machines I have tested the Graphics Base of Stolen Memory + * is unreliable, so compute the base by subtracting the stolen memory + * from the Top of Low Usable DRAM which is where the BIOS places + * the graphics stolen memory. + */ + if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + /* top 32bits are reserved = 0 */ + pci_read_config_dword(pdev, 0xA4, &base); } else { - gtt_bar = 3; - gtt_offset = 0; - gtt_size = pci_resource_len(dev->pdev, gtt_bar); - } - - gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, - gtt_size); - if (!gtt) { - DRM_ERROR("ioremap of GTT failed\n"); - return 0; - } - - entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); - - DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); - - /* Mask out these reserved bits on this hardware. */ - if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev)) - entry &= ~PTE_ADDRESS_MASK_HIGH; - - /* If it's not a mapping type we know, then bail. */ - if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && - (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { - iounmap(gtt); - return 0; - } - - if (!(entry & PTE_VALID)) { - DRM_ERROR("bad GTT entry in stolen space\n"); - iounmap(gtt); - return 0; + /* XXX presume 8xx is the same as i915 */ + pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); + } +#else + if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + u16 val; + pci_read_config_word(pdev, 0xb0, &val); + base = val >> 4 << 20; + } else { + u8 val; + pci_read_config_byte(pdev, 0x9c, &val); + base = val >> 3 << 27; } + base -= dev_priv->mm.gtt->stolen_size; +#endif - iounmap(gtt); - - phys =(entry & PTE_ADDRESS_MASK) | - ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); - - DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); - - return phys; + return base + offset; } static void i915_warn_stolen(struct drm_device *dev) @@ -1083,54 +1071,35 @@ static void i915_setup_compression(struct drm_device *dev, int size) unsigned long cfb_base; unsigned long ll_base = 0; - /* Leave 1M for line length buffer & misc. */ - compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0); - if (!compressed_fb) { - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; - i915_warn_stolen(dev); - return; - } + compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); + if (compressed_fb) + compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); + if (!compressed_fb) + goto err; - compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); - if (!compressed_fb) { - i915_warn_stolen(dev); - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; - return; - } + cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); + if (!cfb_base) + goto err_fb; - cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); - if (!cfb_base) { - DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); - drm_mm_put_block(compressed_fb); - } + if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { + compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, + 4096, 4096, 0); + if (compressed_llb) + compressed_llb = drm_mm_get_block(compressed_llb, + 4096, 4096); + if (!compressed_llb) + goto err_fb; - if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { - compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096, - 4096, 0); - if (!compressed_llb) { - i915_warn_stolen(dev); - return; - } - - compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); - if (!compressed_llb) { - i915_warn_stolen(dev); - return; - } - - ll_base = i915_gtt_to_phys(dev, compressed_llb->start); - if (!ll_base) { - DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); - drm_mm_put_block(compressed_fb); - drm_mm_put_block(compressed_llb); - } + ll_base = i915_stolen_to_phys(dev, compressed_llb->start); + if (!ll_base) + goto err_llb; } dev_priv->cfb_size = size; intel_disable_fbc(dev); dev_priv->compressed_fb = compressed_fb; - if (IS_IRONLAKE_M(dev)) + if (HAS_PCH_SPLIT(dev)) I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); @@ -1140,8 +1109,17 @@ static void i915_setup_compression(struct drm_device *dev, int size) dev_priv->compressed_llb = compressed_llb; } - DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, - ll_base, size >> 20); + DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", + cfb_base, ll_base, size >> 20); + return; + +err_llb: + drm_mm_put_block(compressed_llb); +err_fb: + drm_mm_put_block(compressed_fb); +err: + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + i915_warn_stolen(dev); } static void i915_cleanup_compression(struct drm_device *dev) @@ -1192,17 +1170,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) return can_switch; } -static int i915_load_modeset_init(struct drm_device *dev, - unsigned long prealloc_size, - unsigned long agp_size) +static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long prealloc_size, gtt_size, mappable_size; int ret = 0; - /* Basic memrange allocator for stolen space (aka mm.vram) */ - drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); + prealloc_size = dev_priv->mm.gtt->stolen_size; + gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; + mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + + /* Basic memrange allocator for stolen space */ + drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); - /* Let GEM Manage from end of prealloc space to end of aperture. + /* Let GEM Manage all of the aperture. * * However, leave one page at the end still bound to the scratch page. * There are a number of places where the hardware apparently @@ -1211,7 +1192,7 @@ static int i915_load_modeset_init(struct drm_device *dev, * at the last page of the aperture. One page should be enough to * keep any prefetching inside of the aperture. */ - i915_gem_do_init(dev, prealloc_size, agp_size - 4096); + i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); mutex_lock(&dev->struct_mutex); ret = i915_gem_init_ringbuffer(dev); @@ -1223,16 +1204,17 @@ static int i915_load_modeset_init(struct drm_device *dev, if (I915_HAS_FBC(dev) && i915_powersave) { int cfb_size; - /* Try to get an 8M buffer... */ - if (prealloc_size > (9*1024*1024)) - cfb_size = 8*1024*1024; + /* Leave 1M for line length buffer & misc. */ + + /* Try to get a 32M buffer... */ + if (prealloc_size > (36*1024*1024)) + cfb_size = 32*1024*1024; else /* fall back to 7/8 of the stolen space */ cfb_size = prealloc_size * 7 / 8; i915_setup_compression(dev, cfb_size); } - /* Allow hardware batchbuffers unless told otherwise. - */ + /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; ret = intel_parse_bios(dev); @@ -1422,152 +1404,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) } } -struct v_table { - u8 vid; - unsigned long vd; /* in .1 mil */ - unsigned long vm; /* in .1 mil */ - u8 pvid; -}; - -static struct v_table v_table[] = { - { 0, 16125, 15000, 0x7f, }, - { 1, 16000, 14875, 0x7e, }, - { 2, 15875, 14750, 0x7d, }, - { 3, 15750, 14625, 0x7c, }, - { 4, 15625, 14500, 0x7b, }, - { 5, 15500, 14375, 0x7a, }, - { 6, 15375, 14250, 0x79, }, - { 7, 15250, 14125, 0x78, }, - { 8, 15125, 14000, 0x77, }, - { 9, 15000, 13875, 0x76, }, - { 10, 14875, 13750, 0x75, }, - { 11, 14750, 13625, 0x74, }, - { 12, 14625, 13500, 0x73, }, - { 13, 14500, 13375, 0x72, }, - { 14, 14375, 13250, 0x71, }, - { 15, 14250, 13125, 0x70, }, - { 16, 14125, 13000, 0x6f, }, - { 17, 14000, 12875, 0x6e, }, - { 18, 13875, 12750, 0x6d, }, - { 19, 13750, 12625, 0x6c, }, - { 20, 13625, 12500, 0x6b, }, - { 21, 13500, 12375, 0x6a, }, - { 22, 13375, 12250, 0x69, }, - { 23, 13250, 12125, 0x68, }, - { 24, 13125, 12000, 0x67, }, - { 25, 13000, 11875, 0x66, }, - { 26, 12875, 11750, 0x65, }, - { 27, 12750, 11625, 0x64, }, - { 28, 12625, 11500, 0x63, }, - { 29, 12500, 11375, 0x62, }, - { 30, 12375, 11250, 0x61, }, - { 31, 12250, 11125, 0x60, }, - { 32, 12125, 11000, 0x5f, }, - { 33, 12000, 10875, 0x5e, }, - { 34, 11875, 10750, 0x5d, }, - { 35, 11750, 10625, 0x5c, }, - { 36, 11625, 10500, 0x5b, }, - { 37, 11500, 10375, 0x5a, }, - { 38, 11375, 10250, 0x59, }, - { 39, 11250, 10125, 0x58, }, - { 40, 11125, 10000, 0x57, }, - { 41, 11000, 9875, 0x56, }, - { 42, 10875, 9750, 0x55, }, - { 43, 10750, 9625, 0x54, }, - { 44, 10625, 9500, 0x53, }, - { 45, 10500, 9375, 0x52, }, - { 46, 10375, 9250, 0x51, }, - { 47, 10250, 9125, 0x50, }, - { 48, 10125, 9000, 0x4f, }, - { 49, 10000, 8875, 0x4e, }, - { 50, 9875, 8750, 0x4d, }, - { 51, 9750, 8625, 0x4c, }, - { 52, 9625, 8500, 0x4b, }, - { 53, 9500, 8375, 0x4a, }, - { 54, 9375, 8250, 0x49, }, - { 55, 9250, 8125, 0x48, }, - { 56, 9125, 8000, 0x47, }, - { 57, 9000, 7875, 0x46, }, - { 58, 8875, 7750, 0x45, }, - { 59, 8750, 7625, 0x44, }, - { 60, 8625, 7500, 0x43, }, - { 61, 8500, 7375, 0x42, }, - { 62, 8375, 7250, 0x41, }, - { 63, 8250, 7125, 0x40, }, - { 64, 8125, 7000, 0x3f, }, - { 65, 8000, 6875, 0x3e, }, - { 66, 7875, 6750, 0x3d, }, - { 67, 7750, 6625, 0x3c, }, - { 68, 7625, 6500, 0x3b, }, - { 69, 7500, 6375, 0x3a, }, - { 70, 7375, 6250, 0x39, }, - { 71, 7250, 6125, 0x38, }, - { 72, 7125, 6000, 0x37, }, - { 73, 7000, 5875, 0x36, }, - { 74, 6875, 5750, 0x35, }, - { 75, 6750, 5625, 0x34, }, - { 76, 6625, 5500, 0x33, }, - { 77, 6500, 5375, 0x32, }, - { 78, 6375, 5250, 0x31, }, - { 79, 6250, 5125, 0x30, }, - { 80, 6125, 5000, 0x2f, }, - { 81, 6000, 4875, 0x2e, }, - { 82, 5875, 4750, 0x2d, }, - { 83, 5750, 4625, 0x2c, }, - { 84, 5625, 4500, 0x2b, }, - { 85, 5500, 4375, 0x2a, }, - { 86, 5375, 4250, 0x29, }, - { 87, 5250, 4125, 0x28, }, - { 88, 5125, 4000, 0x27, }, - { 89, 5000, 3875, 0x26, }, - { 90, 4875, 3750, 0x25, }, - { 91, 4750, 3625, 0x24, }, - { 92, 4625, 3500, 0x23, }, - { 93, 4500, 3375, 0x22, }, - { 94, 4375, 3250, 0x21, }, - { 95, 4250, 3125, 0x20, }, - { 96, 4125, 3000, 0x1f, }, - { 97, 4125, 3000, 0x1e, }, - { 98, 4125, 3000, 0x1d, }, - { 99, 4125, 3000, 0x1c, }, - { 100, 4125, 3000, 0x1b, }, - { 101, 4125, 3000, 0x1a, }, - { 102, 4125, 3000, 0x19, }, - { 103, 4125, 3000, 0x18, }, - { 104, 4125, 3000, 0x17, }, - { 105, 4125, 3000, 0x16, }, - { 106, 4125, 3000, 0x15, }, - { 107, 4125, 3000, 0x14, }, - { 108, 4125, 3000, 0x13, }, - { 109, 4125, 3000, 0x12, }, - { 110, 4125, 3000, 0x11, }, - { 111, 4125, 3000, 0x10, }, - { 112, 4125, 3000, 0x0f, }, - { 113, 4125, 3000, 0x0e, }, - { 114, 4125, 3000, 0x0d, }, - { 115, 4125, 3000, 0x0c, }, - { 116, 4125, 3000, 0x0b, }, - { 117, 4125, 3000, 0x0a, }, - { 118, 4125, 3000, 0x09, }, - { 119, 4125, 3000, 0x08, }, - { 120, 1125, 0, 0x07, }, - { 121, 1000, 0, 0x06, }, - { 122, 875, 0, 0x05, }, - { 123, 750, 0, 0x04, }, - { 124, 625, 0, 0x03, }, - { 125, 500, 0, 0x02, }, - { 126, 375, 0, 0x01, }, - { 127, 0, 0, 0x00, }, -}; - -struct cparams { - int i; - int t; - int m; - int c; -}; - -static struct cparams cparams[] = { +static const struct cparams { + u16 i; + u16 t; + u16 m; + u16 c; +} cparams[] = { { 1, 1333, 301, 28664 }, { 1, 1066, 294, 24460 }, { 1, 800, 294, 25192 }, @@ -1633,21 +1475,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv) return ((m * x) / 127) - b; } -static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) +static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { - unsigned long val = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(v_table); i++) { - if (v_table[i].pvid == pxvid) { - if (IS_MOBILE(dev_priv->dev)) - val = v_table[i].vm; - else - val = v_table[i].vd; - } - } - - return val; + static const struct v_table { + u16 vd; /* in .1 mil */ + u16 vm; /* in .1 mil */ + } v_table[] = { + { 0, 0, }, + { 375, 0, }, + { 500, 0, }, + { 625, 0, }, + { 750, 0, }, + { 875, 0, }, + { 1000, 0, }, + { 1125, 0, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4250, 3125, }, + { 4375, 3250, }, + { 4500, 3375, }, + { 4625, 3500, }, + { 4750, 3625, }, + { 4875, 3750, }, + { 5000, 3875, }, + { 5125, 4000, }, + { 5250, 4125, }, + { 5375, 4250, }, + { 5500, 4375, }, + { 5625, 4500, }, + { 5750, 4625, }, + { 5875, 4750, }, + { 6000, 4875, }, + { 6125, 5000, }, + { 6250, 5125, }, + { 6375, 5250, }, + { 6500, 5375, }, + { 6625, 5500, }, + { 6750, 5625, }, + { 6875, 5750, }, + { 7000, 5875, }, + { 7125, 6000, }, + { 7250, 6125, }, + { 7375, 6250, }, + { 7500, 6375, }, + { 7625, 6500, }, + { 7750, 6625, }, + { 7875, 6750, }, + { 8000, 6875, }, + { 8125, 7000, }, + { 8250, 7125, }, + { 8375, 7250, }, + { 8500, 7375, }, + { 8625, 7500, }, + { 8750, 7625, }, + { 8875, 7750, }, + { 9000, 7875, }, + { 9125, 8000, }, + { 9250, 8125, }, + { 9375, 8250, }, + { 9500, 8375, }, + { 9625, 8500, }, + { 9750, 8625, }, + { 9875, 8750, }, + { 10000, 8875, }, + { 10125, 9000, }, + { 10250, 9125, }, + { 10375, 9250, }, + { 10500, 9375, }, + { 10625, 9500, }, + { 10750, 9625, }, + { 10875, 9750, }, + { 11000, 9875, }, + { 11125, 10000, }, + { 11250, 10125, }, + { 11375, 10250, }, + { 11500, 10375, }, + { 11625, 10500, }, + { 11750, 10625, }, + { 11875, 10750, }, + { 12000, 10875, }, + { 12125, 11000, }, + { 12250, 11125, }, + { 12375, 11250, }, + { 12500, 11375, }, + { 12625, 11500, }, + { 12750, 11625, }, + { 12875, 11750, }, + { 13000, 11875, }, + { 13125, 12000, }, + { 13250, 12125, }, + { 13375, 12250, }, + { 13500, 12375, }, + { 13625, 12500, }, + { 13750, 12625, }, + { 13875, 12750, }, + { 14000, 12875, }, + { 14125, 13000, }, + { 14250, 13125, }, + { 14375, 13250, }, + { 14500, 13375, }, + { 14625, 13500, }, + { 14750, 13625, }, + { 14875, 13750, }, + { 15000, 13875, }, + { 15125, 14000, }, + { 15250, 14125, }, + { 15375, 14250, }, + { 15500, 14375, }, + { 15625, 14500, }, + { 15750, 14625, }, + { 15875, 14750, }, + { 16000, 14875, }, + { 16125, 15000, }, + }; + if (dev_priv->info->is_mobile) + return v_table[pxvid].vm; + else + return v_table[pxvid].vd; } void i915_update_gfx_val(struct drm_i915_private *dev_priv) @@ -1881,9 +1847,9 @@ EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; - resource_size_t base, size; int ret = 0, mmio_bar; - uint32_t agp_size, prealloc_size; + uint32_t agp_size; + /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -1899,11 +1865,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->dev = dev; dev_priv->info = (struct intel_device_info *) flags; - /* Add register map (needed for suspend/resume) */ - mmio_bar = IS_GEN2(dev) ? 1 : 0; - base = pci_resource_start(dev->pdev, mmio_bar); - size = pci_resource_len(dev->pdev, mmio_bar); - if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; @@ -1913,16 +1874,25 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); - dev_priv->regs = ioremap(base, size); + mmio_bar = IS_GEN2(dev) ? 1 : 0; + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; goto put_bridge; } + dev_priv->mm.gtt = intel_gtt_get(); + if (!dev_priv->mm.gtt) { + DRM_ERROR("Failed to initialize GTT\n"); + ret = -ENODEV; + goto out_iomapfree; + } + + agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + dev_priv->mm.gtt_mapping = - io_mapping_create_wc(dev->agp->base, - dev->agp->agp_info.aper_size * 1024*1024); + io_mapping_create_wc(dev->agp->base, agp_size); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; goto out_rmmap; @@ -1934,24 +1904,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * MTRR if present. Even if a UC MTRR isn't present. */ dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, - dev->agp->agp_info.aper_size * - 1024 * 1024, + agp_size, MTRR_TYPE_WRCOMB, 1); if (dev_priv->mm.gtt_mtrr < 0) { DRM_INFO("MTRR allocation failed. Graphics " "performance may suffer.\n"); } - dev_priv->mm.gtt = intel_gtt_get(); - if (!dev_priv->mm.gtt) { - DRM_ERROR("Failed to initialize GTT\n"); - ret = -ENODEV; - goto out_iomapfree; - } - - prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; - agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; - /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed * by the GPU. i915_gem_retire_requests() is called directly when we @@ -1959,7 +1918,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * bo. * * It is also used for periodic low-priority events, such as - * idle-timers and hangcheck. + * idle-timers and recording error state. * * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the @@ -1977,20 +1936,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* enable GEM by default */ dev_priv->has_gem = 1; - if (prealloc_size > agp_size * 3 / 4) { - DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " - "memory stolen.\n", - prealloc_size / 1024, agp_size / 1024); - DRM_ERROR("Disabling GEM. (try reducing stolen memory or " - "updating the BIOS to fix).\n"); - dev_priv->has_gem = 0; - } - if (dev_priv->has_gem == 0 && drm_core_check_feature(dev, DRIVER_MODESET)) { DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); ret = -ENODEV; - goto out_iomapfree; + goto out_workqueue_free; } dev->driver->get_vblank_counter = i915_get_vblank_counter; @@ -2013,8 +1963,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Init HWS */ if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); - if (ret != 0) - goto out_workqueue_free; + if (ret) + goto out_gem_unload; } if (IS_PINEVIEW(dev)) @@ -2036,16 +1986,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); - spin_lock_init(&dev_priv->user_irq_lock); + spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); dev_priv->trace_irq_seqno = 0; ret = drm_vblank_init(dev, I915_NUM_PIPE); - - if (ret) { - (void) i915_driver_unload(dev); - return ret; - } + if (ret) + goto out_gem_unload; /* Start out suspended */ dev_priv->mm.suspended = 1; @@ -2053,10 +2000,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_detect_pch(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev, prealloc_size, agp_size); + ret = i915_load_modeset_init(dev); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); - goto out_workqueue_free; + goto out_gem_unload; } } @@ -2074,12 +2021,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; +out_gem_unload: + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + intel_teardown_gmbus(dev); + intel_teardown_mchbar(dev); out_workqueue_free: destroy_workqueue(dev_priv->wq); out_iomapfree: io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: - iounmap(dev_priv->regs); + pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: @@ -2096,6 +2049,9 @@ int i915_driver_unload(struct drm_device *dev) i915_mch_dev = NULL; spin_unlock(&mchdev_lock); + if (dev_priv->mm.inactive_shrinker.shrink) + unregister_shrinker(&dev_priv->mm.inactive_shrinker); + mutex_lock(&dev->struct_mutex); ret = i915_gpu_idle(dev); if (ret) @@ -2153,7 +2109,7 @@ int i915_driver_unload(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); if (I915_HAS_FBC(dev) && i915_powersave) i915_cleanup_compression(dev); - drm_mm_takedown(&dev_priv->mm.vram); + drm_mm_takedown(&dev_priv->mm.stolen); intel_cleanup_overlay(dev); @@ -2162,7 +2118,7 @@ int i915_driver_unload(struct drm_device *dev) } if (dev_priv->regs != NULL) - iounmap(dev_priv->regs); + pci_iounmap(dev->pdev, dev_priv->regs); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f737960712e6..9eee6cf7901e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = { static const struct intel_device_info intel_i965gm_info = { .gen = 4, .is_crestline = 1, - .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, + .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, .has_overlay = 1, .supports_tv = 1, }; @@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = { static const struct intel_device_info intel_gm45_info = { .gen = 4, .is_g4x = 1, - .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, + .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, .has_bsd_ring = 1, @@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = { static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_mobile = 1, - .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, + .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, /* disabled due to buggy hardware */ .has_bsd_ring = 1, }; @@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, .has_bsd_ring = 1, .has_blt_ring = 1, }; @@ -244,6 +245,28 @@ void intel_detect_pch (struct drm_device *dev) } } +void __gen6_force_wake_get(struct drm_i915_private *dev_priv) +{ + int count; + + count = 0; + while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) + udelay(10); + + I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(FORCEWAKE); + + count = 0; + while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) + udelay(10); +} + +void __gen6_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE, 0); + POSTING_READ(FORCEWAKE); +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -304,6 +327,12 @@ static int i915_drm_thaw(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + mutex_unlock(&dev->struct_mutex); + } + i915_restore_state(dev); intel_opregion_setup(dev); @@ -405,6 +434,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags) return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); } +static int gen6_do_reset(struct drm_device *dev, u8 flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL); + return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); +} + /** * i965_reset - reset chip after a hang * @dev: drm device to reset @@ -431,7 +468,8 @@ int i915_reset(struct drm_device *dev, u8 flags) bool need_display = true; int ret; - mutex_lock(&dev->struct_mutex); + if (!mutex_trylock(&dev->struct_mutex)) + return -EBUSY; i915_gem_reset(dev); @@ -439,6 +477,9 @@ int i915_reset(struct drm_device *dev, u8 flags) if (get_seconds() - dev_priv->last_gpu_reset < 5) { DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); } else switch (INTEL_INFO(dev)->gen) { + case 6: + ret = gen6_do_reset(dev, flags); + break; case 5: ret = ironlake_do_reset(dev, flags); break; @@ -472,9 +513,14 @@ int i915_reset(struct drm_device *dev, u8 flags) */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { - struct intel_ring_buffer *ring = &dev_priv->render_ring; dev_priv->mm.suspended = 0; - ring->init(dev, ring); + + dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); + if (HAS_BSD(dev)) + dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); + if (HAS_BLT(dev)) + dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); + mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_irq_install(dev); @@ -606,6 +652,8 @@ static struct drm_driver driver = { .device_is_agp = i915_driver_device_is_agp, .enable_vblank = i915_enable_vblank, .disable_vblank = i915_disable_vblank, + .get_vblank_timestamp = i915_get_vblank_timestamp, + .get_scanout_position = i915_get_crtc_scanoutpos, .irq_preinstall = i915_driver_irq_preinstall, .irq_postinstall = i915_driver_irq_postinstall, .irq_uninstall = i915_driver_irq_uninstall, @@ -661,8 +709,6 @@ static int __init i915_init(void) driver.num_ioctls = i915_max_ioctl; - i915_gem_shrinker_init(); - /* * If CONFIG_DRM_I915_KMS is set, default to KMS unless * explicitly disabled with the module pararmeter. @@ -684,17 +730,11 @@ static int __init i915_init(void) driver.driver_features &= ~DRIVER_MODESET; #endif - if (!(driver.driver_features & DRIVER_MODESET)) { - driver.suspend = i915_suspend; - driver.resume = i915_resume; - } - return drm_init(&driver); } static void __exit i915_exit(void) { - i915_gem_shrinker_exit(); drm_exit(&driver); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 409826da3099..aac1bf332f75 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object { int id; struct page **page_list; drm_dma_handle_t *handle; - struct drm_gem_object *cur_obj; + struct drm_i915_gem_object *cur_obj; }; struct mem_block { @@ -124,9 +124,9 @@ struct drm_i915_master_private { #define I915_FENCE_REG_NONE -1 struct drm_i915_fence_reg { - struct drm_gem_object *obj; struct list_head lru_list; - bool gpu; + struct drm_i915_gem_object *obj; + uint32_t setup_seqno; }; struct sdvo_device_mapping { @@ -139,6 +139,8 @@ struct sdvo_device_mapping { u8 ddc_pin; }; +struct intel_display_error_state; + struct drm_i915_error_state { u32 eir; u32 pgtbl_er; @@ -148,11 +150,23 @@ struct drm_i915_error_state { u32 ipehr; u32 instdone; u32 acthd; + u32 error; /* gen6+ */ + u32 bcs_acthd; /* gen6+ blt engine */ + u32 bcs_ipehr; + u32 bcs_ipeir; + u32 bcs_instdone; + u32 bcs_seqno; + u32 vcs_acthd; /* gen6+ bsd engine */ + u32 vcs_ipehr; + u32 vcs_ipeir; + u32 vcs_instdone; + u32 vcs_seqno; u32 instpm; u32 instps; u32 instdone1; u32 seqno; u64 bbaddr; + u64 fence[16]; struct timeval time; struct drm_i915_error_object { int page_count; @@ -171,9 +185,11 @@ struct drm_i915_error_state { u32 tiling:2; u32 dirty:1; u32 purgeable:1; - } *active_bo; - u32 active_bo_count; + u32 ring:4; + } *active_bo, *pinned_bo; + u32 active_bo_count, pinned_bo_count; struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; }; struct drm_i915_display_funcs { @@ -207,7 +223,6 @@ struct intel_device_info { u8 is_broadwater : 1; u8 is_crestline : 1; u8 has_fbc : 1; - u8 has_rc6 : 1; u8 has_pipe_cxsr : 1; u8 has_hotplug : 1; u8 cursor_needs_physical : 1; @@ -243,6 +258,7 @@ typedef struct drm_i915_private { const struct intel_device_info *info; int has_gem; + int relative_constants_mode; void __iomem *regs; @@ -253,20 +269,15 @@ typedef struct drm_i915_private { } *gmbus; struct pci_dev *bridge_dev; - struct intel_ring_buffer render_ring; - struct intel_ring_buffer bsd_ring; - struct intel_ring_buffer blt_ring; + struct intel_ring_buffer ring[I915_NUM_RINGS]; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; - void *seqno_page; dma_addr_t dma_status_page; uint32_t counter; - unsigned int seqno_gfx_addr; drm_local_map_t hws_map; - struct drm_gem_object *seqno_obj; - struct drm_gem_object *pwrctx; - struct drm_gem_object *renderctx; + struct drm_i915_gem_object *pwrctx; + struct drm_i915_gem_object *renderctx; struct resource mch_res; @@ -275,25 +286,17 @@ typedef struct drm_i915_private { int front_offset; int current_page; int page_flipping; -#define I915_DEBUG_READ (1<<0) -#define I915_DEBUG_WRITE (1<<1) - unsigned long debug_flags; - wait_queue_head_t irq_queue; atomic_t irq_received; - /** Protects user_irq_refcount and irq_mask_reg */ - spinlock_t user_irq_lock; u32 trace_irq_seqno; + + /* protects the irq masks */ + spinlock_t irq_lock; /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 irq_mask_reg; u32 pipestat[2]; - /** splitted irq regs for graphics and display engine on Ironlake, - irq_mask_reg is still used for display irq. */ - u32 gt_irq_mask_reg; - u32 gt_irq_enable_reg; - u32 de_irq_enable_reg; - u32 pch_irq_mask_reg; - u32 pch_irq_enable_reg; + u32 irq_mask; + u32 gt_irq_mask; + u32 pch_irq_mask; u32 hotplug_supported_mask; struct work_struct hotplug_work; @@ -306,7 +309,7 @@ typedef struct drm_i915_private { int num_pipe; /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd; @@ -530,23 +533,21 @@ typedef struct drm_i915_private { struct { /** Bridge to intel-gtt-ko */ - struct intel_gtt *gtt; + const struct intel_gtt *gtt; /** Memory allocator for GTT stolen memory */ - struct drm_mm vram; + struct drm_mm stolen; /** Memory allocator for GTT */ struct drm_mm gtt_space; + /** List of all objects in gtt_space. Used to restore gtt + * mappings on resume */ + struct list_head gtt_list; + /** End of mappable part of GTT */ + unsigned long gtt_mappable_end; struct io_mapping *gtt_mapping; int gtt_mtrr; - /** - * Membership on list of all loaded devices, used to evict - * inactive buffers under memory pressure. - * - * Modifications should only be done whilst holding the - * shrink_list_lock spinlock. - */ - struct list_head shrink_list; + struct shrinker inactive_shrinker; /** * List of objects currently involved in rendering. @@ -609,16 +610,6 @@ typedef struct drm_i915_private { struct delayed_work retire_work; /** - * Waiting sequence number, if any - */ - uint32_t waiting_gem_seqno; - - /** - * Last seq seen at irq time - */ - uint32_t irq_gem_seqno; - - /** * Flag if the X Server, and thus DRM, is not currently in * control of the device. * @@ -645,16 +636,11 @@ typedef struct drm_i915_private { /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; - uint32_t flush_rings; - /* accounting, useful for userland debugging */ - size_t object_memory; - size_t pin_memory; - size_t gtt_memory; size_t gtt_total; + size_t mappable_gtt_total; + size_t object_memory; u32 object_count; - u32 pin_count; - u32 gtt_count; } mm; struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ @@ -688,14 +674,14 @@ typedef struct drm_i915_private { u8 fmax; u8 fstart; - u64 last_count1; - unsigned long last_time1; - u64 last_count2; - struct timespec last_time2; - unsigned long gfx_power; - int c_m; - int r_t; - u8 corr; + u64 last_count1; + unsigned long last_time1; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + int c_m; + int r_t; + u8 corr; spinlock_t *mchdev_lock; enum no_fbc_reason no_fbc_reason; @@ -709,20 +695,20 @@ typedef struct drm_i915_private { struct intel_fbdev *fbdev; } drm_i915_private_t; -/** driver private structure attached to each drm_gem_object */ struct drm_i915_gem_object { struct drm_gem_object base; /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; + struct list_head gtt_list; /** This object's place on the active/flushing/inactive lists */ struct list_head ring_list; struct list_head mm_list; /** This object's place on GPU write list */ struct list_head gpu_write_list; - /** This object's place on eviction list */ - struct list_head evict_list; + /** This object's place in the batchbuffer or on the eviction list */ + struct list_head exec_list; /** * This is set if the object is on the active or flushing lists @@ -738,6 +724,12 @@ struct drm_i915_gem_object { unsigned int dirty : 1; /** + * This is set if the object has been written to since the last + * GPU flush. + */ + unsigned int pending_gpu_write : 1; + + /** * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. * Protected by dev->struct_mutex. @@ -747,29 +739,15 @@ struct drm_i915_gem_object { signed int fence_reg : 5; /** - * Used for checking the object doesn't appear more than once - * in an execbuffer object list. - */ - unsigned int in_execbuffer : 1; - - /** * Advice: are the backing pages purgeable? */ unsigned int madv : 2; /** - * Refcount for the pages array. With the current locking scheme, there - * are at most two concurrent users: Binding a bo to the gtt and - * pwrite/pread using physical addresses. So two bits for a maximum - * of two users are enough. - */ - unsigned int pages_refcount : 2; -#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 - - /** * Current tiling mode for the object. */ unsigned int tiling_mode : 2; + unsigned int tiling_changed : 1; /** How many users have pinned this object in GTT space. The following * users can each hold at most one reference: pwrite/pread, pin_ioctl @@ -783,28 +761,54 @@ struct drm_i915_gem_object { unsigned int pin_count : 4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf - /** AGP memory structure for our GTT binding. */ - DRM_AGP_MEM *agp_mem; + /** + * Is the object at the current location in the gtt mappable and + * fenceable? Used to avoid costly recalculations. + */ + unsigned int map_and_fenceable : 1; + + /** + * Whether the current gtt mapping needs to be mappable (and isn't just + * mappable by accident). Track pin and fault separate for a more + * accurate mappable working set. + */ + unsigned int fault_mappable : 1; + unsigned int pin_mappable : 1; + + /* + * Is the GPU currently using a fence to access this buffer, + */ + unsigned int pending_fenced_gpu_access:1; + unsigned int fenced_gpu_access:1; struct page **pages; /** - * Current offset of the object in GTT space. - * - * This is the same as gtt_space->start + * DMAR support */ - uint32_t gtt_offset; + struct scatterlist *sg_list; + int num_sg; - /* Which ring is refering to is this object */ - struct intel_ring_buffer *ring; + /** + * Used for performing relocations during execbuffer insertion. + */ + struct hlist_node exec_node; + unsigned long exec_handle; /** - * Fake offset for use by mmap(2) + * Current offset of the object in GTT space. + * + * This is the same as gtt_space->start */ - uint64_t mmap_offset; + uint32_t gtt_offset; /** Breadcrumb of last rendering to the buffer. */ uint32_t last_rendering_seqno; + struct intel_ring_buffer *ring; + + /** Breadcrumb of last fenced GPU access to the buffer. */ + uint32_t last_fenced_seqno; + struct intel_ring_buffer *last_fenced_ring; /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; @@ -880,6 +884,68 @@ enum intel_chip_family { CHIP_I965 = 0x08, }; +#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) + +#define IS_I830(dev) ((dev)->pci_device == 0x3577) +#define IS_845G(dev) ((dev)->pci_device == 0x2562) +#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) +#define IS_I865G(dev) ((dev)->pci_device == 0x2572) +#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) +#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) +#define IS_I945G(dev) ((dev)->pci_device == 0x2772) +#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) +#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) +#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) +#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) +#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) +#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) +#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) +#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) +#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) +#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) +#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) +#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) + +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) + +#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) +#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) +#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) + +#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) +#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) + +/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte + * rows, which changed the alignment requirements and fence programming. + */ +#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ + IS_I915GM(dev))) +#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) +#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) +#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) +/* dsparb controlled by hw only */ +#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) + +#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) +#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) +#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) + +#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) +#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) + +#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) +#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) + +#include "i915_trace.h" + extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; @@ -907,8 +973,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect *boxes, - int i, int DR1, int DR4); + struct drm_clip_rect *box, + int DR1, int DR4); extern int i915_reset(struct drm_device *dev, u8 flags); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); @@ -918,6 +984,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); +void i915_handle_error(struct drm_device *dev, bool wedged); extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, @@ -953,6 +1020,13 @@ void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void intel_enable_asle (struct drm_device *dev); +int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags); + +int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, + int *vpos, int *hpos); #ifdef CONFIG_DEBUG_FS extern void i915_destroy_error_state(struct drm_device *dev); @@ -1017,15 +1091,28 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); -struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, - size_t size); +void i915_gem_flush_ring(struct drm_device *dev, + struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains); +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size); void i915_gem_free_object(struct drm_gem_object *obj); -int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); -void i915_gem_object_unpin(struct drm_gem_object *obj); -int i915_gem_object_unbind(struct drm_gem_object *obj); -void i915_gem_release_mmap(struct drm_gem_object *obj); +int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, + bool map_and_fenceable); +void i915_gem_object_unpin(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); +void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); +int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); +int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, + bool interruptible); +void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring, + u32 seqno); + /** * Returns true if seq1 is later than seq2. */ @@ -1035,73 +1122,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } -int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, - bool interruptible); -int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, - bool interruptible); +static inline u32 +i915_gem_next_request_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return ring->outstanding_lazy_request = dev_priv->next_seqno; +} + +int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined, + bool interruptible); +int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); + void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev); -void i915_gem_clflush_object(struct drm_gem_object *obj); -int i915_gem_object_set_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); -int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, - bool interruptible); -int i915_gem_init_ringbuffer(struct drm_device *dev); +void i915_gem_clflush_object(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain); +int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, + bool interruptible); +int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); -int i915_gem_do_init(struct drm_device *dev, unsigned long start, - unsigned long end); -int i915_gpu_idle(struct drm_device *dev); -int i915_gem_idle(struct drm_device *dev); -uint32_t i915_add_request(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_i915_gem_request *request, - struct intel_ring_buffer *ring); -int i915_do_wait_request(struct drm_device *dev, - uint32_t seqno, - bool interruptible, - struct intel_ring_buffer *ring); +void i915_gem_do_init(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end); +int __must_check i915_gpu_idle(struct drm_device *dev); +int __must_check i915_gem_idle(struct drm_device *dev); +int __must_check i915_add_request(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_i915_gem_request *request, + struct intel_ring_buffer *ring); +int __must_check i915_do_wait_request(struct drm_device *dev, + uint32_t seqno, + bool interruptible, + struct intel_ring_buffer *ring); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); -int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, - int write); -int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, - bool pipelined); +int __must_check +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, + bool write); +int __must_check +i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, int id, int align); void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj); + struct drm_i915_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); -void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); +void i915_gem_release(struct drm_device *dev, struct drm_file *file); -void i915_gem_shrinker_init(void); -void i915_gem_shrinker_exit(void); +/* i915_gem_gtt.c */ +void i915_gem_restore_gtt_mappings(struct drm_device *dev); +int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); /* i915_gem_evict.c */ -int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment); -int i915_gem_evict_everything(struct drm_device *dev); -int i915_gem_evict_inactive(struct drm_device *dev); +int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, + unsigned alignment, bool mappable); +int __must_check i915_gem_evict_everything(struct drm_device *dev, + bool purgeable_only); +int __must_check i915_gem_evict_inactive(struct drm_device *dev, + bool purgeable_only); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); -void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); -void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); -bool i915_tiling_ok(struct drm_device *dev, int stride, int size, - int tiling_mode); -bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, - int tiling_mode); +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); +void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_debug.c */ -void i915_gem_dump_object(struct drm_gem_object *obj, int len, +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark); #if WATCH_LISTS int i915_verify_lists(struct drm_device *dev); #else #define i915_verify_lists(dev) 0 #endif -void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); -void i915_gem_dump_object(struct drm_gem_object *obj, int len, +void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, + int handle); +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark); /* i915_debugfs.c */ @@ -1163,6 +1265,7 @@ extern void intel_disable_fbc(struct drm_device *dev); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern bool intel_fbc_enabled(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch (struct drm_device *dev); extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); @@ -1170,79 +1273,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); + +extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern void intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error); #endif +#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) + +#define BEGIN_LP_RING(n) \ + intel_ring_begin(LP_RING(dev_priv), (n)) + +#define OUT_RING(x) \ + intel_ring_emit(LP_RING(dev_priv), x) + +#define ADVANCE_LP_RING() \ + intel_ring_advance(LP_RING(dev_priv)) + /** * Lock test for when it's just for synchronization of ring access. * * In that case, we don't need to do it when GEM is initialized as nobody else * has access to the ring. */ -#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ - if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ - == NULL) \ - LOCK_TEST_WITH_RETURN(dev, file_priv); \ +#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ + if (LP_RING(dev->dev_private)->obj == NULL) \ + LOCK_TEST_WITH_RETURN(dev, file); \ } while (0) -static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg) + +#define __i915_read(x, y) \ +static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ + u##x val = read##y(dev_priv->regs + reg); \ + trace_i915_reg_rw('R', reg, val, sizeof(val)); \ + return val; \ +} +__i915_read(8, b) +__i915_read(16, w) +__i915_read(32, l) +__i915_read(64, q) +#undef __i915_read + +#define __i915_write(x, y) \ +static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ + trace_i915_reg_rw('W', reg, val, sizeof(val)); \ + write##y(val, dev_priv->regs + reg); \ +} +__i915_write(8, b) +__i915_write(16, w) +__i915_write(32, l) +__i915_write(64, q) +#undef __i915_write + +#define I915_READ8(reg) i915_read8(dev_priv, (reg)) +#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) + +#define I915_READ16(reg) i915_read16(dev_priv, (reg)) +#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) +#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) +#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) + +#define I915_READ(reg) i915_read32(dev_priv, (reg)) +#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) +#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) +#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) + +#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) +#define I915_READ64(reg) i915_read64(dev_priv, (reg)) + +#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) +#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) + + +/* On SNB platform, before reading ring registers forcewake bit + * must be set to prevent GT core from power down and stale values being + * returned. + */ +void __gen6_force_wake_get(struct drm_i915_private *dev_priv); +void __gen6_force_wake_put (struct drm_i915_private *dev_priv); +static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val; - val = readl(dev_priv->regs + reg); - if (dev_priv->debug_flags & I915_DEBUG_READ) - printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg); + if (dev_priv->info->gen >= 6) { + __gen6_force_wake_get(dev_priv); + val = I915_READ(reg); + __gen6_force_wake_put(dev_priv); + } else + val = I915_READ(reg); + return val; } -static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, - u32 val) +static inline void +i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) { - writel(val, dev_priv->regs + reg); - if (dev_priv->debug_flags & I915_DEBUG_WRITE) - printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg); + /* Trace down the write operation before the real write */ + trace_i915_reg_rw('W', reg, val, len); + switch (len) { + case 8: + writeq(val, dev_priv->regs + reg); + break; + case 4: + writel(val, dev_priv->regs + reg); + break; + case 2: + writew(val, dev_priv->regs + reg); + break; + case 1: + writeb(val, dev_priv->regs + reg); + break; + } } -#define I915_READ(reg) i915_read(dev_priv, (reg)) -#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val)) -#define I915_READ16(reg) readw(dev_priv->regs + (reg)) -#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) -#define I915_READ8(reg) readb(dev_priv->regs + (reg)) -#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) -#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) -#define I915_READ64(reg) readq(dev_priv->regs + (reg)) -#define POSTING_READ(reg) (void)I915_READ(reg) -#define POSTING_READ16(reg) (void)I915_READ16(reg) - -#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \ - I915_DEBUG_WRITE) -#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \ - I915_DEBUG_WRITE)) - -#define I915_VERBOSE 0 - -#define BEGIN_LP_RING(n) do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ - intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \ -} while (0) - - -#define OUT_RING(x) do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ - intel_ring_emit(dev, &dev_priv__->render_ring, x); \ -} while (0) - -#define ADVANCE_LP_RING() do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG("ADVANCE_LP_RING %x\n", \ - dev_priv__->render_ring.tail); \ - intel_ring_advance(dev, &dev_priv__->render_ring); \ -} while(0) - /** * Reads a dword out of the status page, which is written to from the command * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or @@ -1259,72 +1403,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, * The area from dword 0x20 to 0x3ff is available for driver usage. */ #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ - (dev_priv->render_ring.status_page.page_addr))[reg]) + (LP_RING(dev_priv)->status_page.page_addr))[reg]) #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define I915_GEM_HWS_INDEX 0x20 #define I915_BREADCRUMB_INDEX 0x21 -#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) - -#define IS_I830(dev) ((dev)->pci_device == 0x3577) -#define IS_845G(dev) ((dev)->pci_device == 0x2562) -#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) -#define IS_I865G(dev) ((dev)->pci_device == 0x2572) -#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) -#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) -#define IS_I945G(dev) ((dev)->pci_device == 0x2772) -#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) -#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) -#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) -#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) -#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) -#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) -#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) -#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) -#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) -#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) -#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) -#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) - -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) - -#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) -#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) -#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) - -#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) -#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) - -/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte - * rows, which changed the alignment requirements and fence programming. - */ -#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ - IS_I915GM(dev))) -#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) -#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) -#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) -#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) -#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) -/* dsparb controlled by hw only */ -#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) - -#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) -#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) -#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) - -#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) -#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) - -#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) -#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) -#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) - -#define PRIMARY_RINGBUFFER_SIZE (128*1024) - #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 17b1cba3b5f1..c79c0b62ef60 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,39 +34,31 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/pci.h> -#include <linux/intel-gtt.h> -static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); - -static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, - bool pipelined); -static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); -static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); -static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, - int write); -static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, +static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); +static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); +static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, + bool write); +static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size); -static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, - bool interruptible); -static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, - unsigned alignment); -static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); -static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, +static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); +static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, + unsigned alignment, + bool map_and_fenceable); +static void i915_gem_clear_fence_reg(struct drm_device *dev, + struct drm_i915_fence_reg *reg); +static int i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv); -static void i915_gem_free_object_tail(struct drm_gem_object *obj); + struct drm_file *file); +static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); -static int -i915_gem_object_get_pages(struct drm_gem_object *obj, - gfp_t gfpmask); +static int i915_gem_inactive_shrink(struct shrinker *shrinker, + int nr_to_scan, + gfp_t gfp_mask); -static void -i915_gem_object_put_pages(struct drm_gem_object *obj); - -static LIST_HEAD(shrink_list); -static DEFINE_SPINLOCK(shrink_list_lock); /* some bookkeeping */ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, @@ -83,34 +75,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, dev_priv->mm.object_memory -= size; } -static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, - size_t size) -{ - dev_priv->mm.gtt_count++; - dev_priv->mm.gtt_memory += size; -} - -static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, - size_t size) -{ - dev_priv->mm.gtt_count--; - dev_priv->mm.gtt_memory -= size; -} - -static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, - size_t size) -{ - dev_priv->mm.pin_count++; - dev_priv->mm.pin_memory += size; -} - -static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, - size_t size) -{ - dev_priv->mm.pin_count--; - dev_priv->mm.pin_memory -= size; -} - int i915_gem_check_is_wedged(struct drm_device *dev) { @@ -141,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev) return -EIO; } -static int i915_mutex_lock_interruptible(struct drm_device *dev) +int i915_mutex_lock_interruptible(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -164,75 +128,76 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev) } static inline bool -i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) +i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return obj_priv->gtt_space && - !obj_priv->active && - obj_priv->pin_count == 0; + return obj->gtt_space && !obj->active && obj->pin_count == 0; } -int i915_gem_do_init(struct drm_device *dev, - unsigned long start, - unsigned long end) +void i915_gem_do_init(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end) { drm_i915_private_t *dev_priv = dev->dev_private; - if (start >= end || - (start & (PAGE_SIZE - 1)) != 0 || - (end & (PAGE_SIZE - 1)) != 0) { - return -EINVAL; - } - drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); dev_priv->mm.gtt_total = end - start; - - return 0; + dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; + dev_priv->mm.gtt_mappable_end = mappable_end; } int i915_gem_init_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_init *args = data; - int ret; + + if (args->gtt_start >= args->gtt_end || + (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) + return -EINVAL; mutex_lock(&dev->struct_mutex); - ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); + i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_get_aperture *args = data; + struct drm_i915_gem_object *obj; + size_t pinned; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; + pinned = 0; mutex_lock(&dev->struct_mutex); - args->aper_size = dev_priv->mm.gtt_total; - args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; + list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) + pinned += obj->gtt_space->size; mutex_unlock(&dev->struct_mutex); + args->aper_size = dev_priv->mm.gtt_total; + args->aper_available_size = args->aper_size -pinned; + return 0; } - /** * Creates a new mm object and returns a handle to it. */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_create *args = data; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret; u32 handle; @@ -243,45 +208,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOMEM; - ret = drm_gem_handle_create(file_priv, obj, &handle); + ret = drm_gem_handle_create(file, &obj->base, &handle); if (ret) { - drm_gem_object_release(obj); - i915_gem_info_remove_obj(dev->dev_private, obj->size); + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(dev->dev_private, obj->base.size); kfree(obj); return ret; } /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); trace_i915_gem_object_create(obj); args->handle = handle; return 0; } -static inline int -fast_shmem_read(struct page **pages, - loff_t page_base, int page_offset, - char __user *data, - int length) +static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { - char *vaddr; - int ret; - - vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); - ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); - kunmap_atomic(vaddr); - - return ret; -} - -static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) -{ - drm_i915_private_t *dev_priv = obj->dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + drm_i915_private_t *dev_priv = obj->base.dev->dev_private; return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && - obj_priv->tiling_mode != I915_TILING_NONE; + obj->tiling_mode != I915_TILING_NONE; } static inline void @@ -357,38 +305,51 @@ slow_shmem_bit17_copy(struct page *gpu_page, * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). */ static int -i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pread_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pread *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; - loff_t offset, page_base; + loff_t offset; char __user *user_data; int page_offset, page_length; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { + struct page *page; + char *vaddr; + int ret; + /* Operation in this page * - * page_base = page offset within aperture * page_offset = offset within page * page_length = bytes to copy for this page */ - page_base = (offset & ~(PAGE_SIZE-1)); page_offset = offset & (PAGE_SIZE-1); page_length = remain; if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - if (fast_shmem_read(obj_priv->pages, - page_base, page_offset, - user_data, page_length)) + page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) + return PTR_ERR(page); + + vaddr = kmap_atomic(page); + ret = __copy_to_user_inatomic(user_data, + vaddr + page_offset, + page_length); + kunmap_atomic(vaddr); + + mark_page_accessed(page); + page_cache_release(page); + if (ret) return -EFAULT; remain -= page_length; @@ -399,30 +360,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, return 0; } -static int -i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) -{ - int ret; - - ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); - - /* If we've insufficient memory to map in the pages, attempt - * to make some space by throwing out some old buffers. - */ - if (ret == -ENOMEM) { - struct drm_device *dev = obj->dev; - - ret = i915_gem_evict_something(dev, obj->size, - i915_gem_get_gtt_alignment(obj)); - if (ret) - return ret; - - ret = i915_gem_object_get_pages(obj, 0); - } - - return ret; -} - /** * This is the fallback shmem pread path, which allocates temporary storage * in kernel space to copy_to_user into outside of the struct_mutex, so we @@ -430,18 +367,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) * and not take page faults. */ static int -i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pread_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pread *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; loff_t offset, pinned_pages, i; loff_t first_data_page, last_data_page, num_pages; - int shmem_page_index, shmem_page_offset; - int data_page_index, data_page_offset; + int shmem_page_offset; + int data_page_index, data_page_offset; int page_length; int ret; uint64_t data_ptr = args->data_ptr; @@ -480,19 +418,18 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { + struct page *page; + /* Operation in this page * - * shmem_page_index = page number within shmem file * shmem_page_offset = offset within page in shmem file * data_page_index = page number in get_user_pages return * data_page_offset = offset with data_page_index page. * page_length = bytes to copy for this page */ - shmem_page_index = offset / PAGE_SIZE; shmem_page_offset = offset & ~PAGE_MASK; data_page_index = data_ptr / PAGE_SIZE - first_data_page; data_page_offset = data_ptr & ~PAGE_MASK; @@ -503,8 +440,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; + page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) + return PTR_ERR(page); + if (do_bit17_swizzling) { - slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + slow_shmem_bit17_copy(page, shmem_page_offset, user_pages[data_page_index], data_page_offset, @@ -513,11 +455,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, } else { slow_shmem_copy(user_pages[data_page_index], data_page_offset, - obj_priv->pages[shmem_page_index], + page, shmem_page_offset, page_length); } + mark_page_accessed(page); + page_cache_release(page); + remain -= page_length; data_ptr += page_length; offset += page_length; @@ -526,6 +471,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, out: for (i = 0; i < pinned_pages; i++) { SetPageDirty(user_pages[i]); + mark_page_accessed(user_pages[i]); page_cache_release(user_pages[i]); } drm_free_large(user_pages); @@ -540,11 +486,10 @@ out: */ int i915_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pread *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret = 0; if (args->size == 0) @@ -564,39 +509,33 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Bounds check source. */ - if (args->offset > obj->size || args->size > obj->size - args->offset) { + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { ret = -EINVAL; goto out; } - ret = i915_gem_object_get_pages_or_evict(obj); - if (ret) - goto out; - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, args->size); if (ret) - goto out_put; + goto out; ret = -EFAULT; if (!i915_gem_object_needs_bit17_swizzle(obj)) - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + ret = i915_gem_shmem_pread_fast(dev, obj, args, file); if (ret == -EFAULT) - ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); + ret = i915_gem_shmem_pread_slow(dev, obj, args, file); -out_put: - i915_gem_object_put_pages(obj); out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -646,32 +585,16 @@ slow_kernel_write(struct io_mapping *mapping, io_mapping_unmap(dst_vaddr); } -static inline int -fast_shmem_write(struct page **pages, - loff_t page_base, int page_offset, - char __user *data, - int length) -{ - char *vaddr; - int ret; - - vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); - ret = __copy_from_user_inatomic(vaddr + page_offset, data, length); - kunmap_atomic(vaddr); - - return ret; -} - /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. */ static int -i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_gtt_pwrite_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t offset, page_base; @@ -681,8 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); - offset = obj_priv->gtt_offset + args->offset; + offset = obj->gtt_offset + args->offset; while (remain > 0) { /* Operation in this page @@ -722,11 +644,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). */ static int -i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_gtt_pwrite_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t gtt_page_base, offset; @@ -763,12 +685,15 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, goto out_unpin_pages; } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto out_unpin_pages; + + ret = i915_gem_object_put_fence(obj); if (ret) goto out_unpin_pages; - obj_priv = to_intel_bo(obj); - offset = obj_priv->gtt_offset + args->offset; + offset = obj->gtt_offset + args->offset; while (remain > 0) { /* Operation in this page @@ -814,39 +739,58 @@ out_unpin_pages: * copy_from_user into the kmapped pages backing the object. */ static int -i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pwrite_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; - loff_t offset, page_base; + loff_t offset; char __user *user_data; int page_offset, page_length; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); offset = args->offset; - obj_priv->dirty = 1; + obj->dirty = 1; while (remain > 0) { + struct page *page; + char *vaddr; + int ret; + /* Operation in this page * - * page_base = page offset within aperture * page_offset = offset within page * page_length = bytes to copy for this page */ - page_base = (offset & ~(PAGE_SIZE-1)); page_offset = offset & (PAGE_SIZE-1); page_length = remain; if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - if (fast_shmem_write(obj_priv->pages, - page_base, page_offset, - user_data, page_length)) + page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) + return PTR_ERR(page); + + vaddr = kmap_atomic(page, KM_USER0); + ret = __copy_from_user_inatomic(vaddr + page_offset, + user_data, + page_length); + kunmap_atomic(vaddr, KM_USER0); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. Return the error and we'll + * retry in the slow path. + */ + if (ret) return -EFAULT; remain -= page_length; @@ -865,17 +809,18 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, * struct_mutex is held. */ static int -i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pwrite_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; loff_t offset, pinned_pages, i; loff_t first_data_page, last_data_page, num_pages; - int shmem_page_index, shmem_page_offset; + int shmem_page_offset; int data_page_index, data_page_offset; int page_length; int ret; @@ -913,20 +858,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - obj_priv = to_intel_bo(obj); offset = args->offset; - obj_priv->dirty = 1; + obj->dirty = 1; while (remain > 0) { + struct page *page; + /* Operation in this page * - * shmem_page_index = page number within shmem file * shmem_page_offset = offset within page in shmem file * data_page_index = page number in get_user_pages return * data_page_offset = offset with data_page_index page. * page_length = bytes to copy for this page */ - shmem_page_index = offset / PAGE_SIZE; shmem_page_offset = offset & ~PAGE_MASK; data_page_index = data_ptr / PAGE_SIZE - first_data_page; data_page_offset = data_ptr & ~PAGE_MASK; @@ -937,21 +881,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; + page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto out; + } + if (do_bit17_swizzling) { - slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + slow_shmem_bit17_copy(page, shmem_page_offset, user_pages[data_page_index], data_page_offset, page_length, 0); } else { - slow_shmem_copy(obj_priv->pages[shmem_page_index], + slow_shmem_copy(page, shmem_page_offset, user_pages[data_page_index], data_page_offset, page_length); } + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + remain -= page_length; data_ptr += page_length; offset += page_length; @@ -975,8 +930,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_pwrite *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; if (args->size == 0) @@ -996,15 +950,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Bounds check destination. */ - if (args->offset > obj->size || args->size > obj->size - args->offset) { + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { ret = -EINVAL; goto out; } @@ -1015,16 +969,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj_priv->phys_obj) + if (obj->phys_obj) ret = i915_gem_phys_pwrite(dev, obj, args, file); - else if (obj_priv->tiling_mode == I915_TILING_NONE && - obj_priv->gtt_space && - obj->write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_object_pin(obj, 0); + else if (obj->gtt_space && + obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + ret = i915_gem_object_pin(obj, 0, true); if (ret) goto out; - ret = i915_gem_object_set_to_gtt_domain(obj, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto out_unpin; + + ret = i915_gem_object_put_fence(obj); if (ret) goto out_unpin; @@ -1035,26 +992,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, out_unpin: i915_gem_object_unpin(obj); } else { - ret = i915_gem_object_get_pages_or_evict(obj); - if (ret) - goto out; - ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret) - goto out_put; + goto out; ret = -EFAULT; if (!i915_gem_object_needs_bit17_swizzle(obj)) ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); if (ret == -EFAULT) ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); - -out_put: - i915_gem_object_put_pages(obj); } out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1066,12 +1016,10 @@ unlock: */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_set_domain *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; uint32_t read_domains = args->read_domains; uint32_t write_domain = args->write_domain; int ret; @@ -1096,28 +1044,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - - intel_mark_busy(dev, obj); if (read_domains & I915_GEM_DOMAIN_GTT) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); - /* Update the LRU on the fence for the CPU access that's - * about to occur. - */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); - } - /* Silently promote "you're not bound, there was nothing to do" * to success, since the client was just asking us to * make sure everything was done. @@ -1128,11 +1063,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); } - /* Maintain LRU order of "inactive" objects */ - if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1143,10 +1074,10 @@ unlock: */ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_sw_finish *args = data; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret = 0; if (!(dev->driver->driver_features & DRIVER_GEM)) @@ -1156,17 +1087,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } /* Pinned buffers may be scanout, so flush the cache */ - if (to_intel_bo(obj)->pin_count) + if (obj->pin_count) i915_gem_object_flush_cpu_write_domain(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1181,8 +1112,9 @@ unlock: */ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap *args = data; struct drm_gem_object *obj; loff_t offset; @@ -1191,10 +1123,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = drm_gem_object_lookup(dev, file, args->handle); if (obj == NULL) return -ENOENT; + if (obj->size > dev_priv->mm.gtt_mappable_end) { + drm_gem_object_unreference_unlocked(obj); + return -E2BIG; + } + offset = args->offset; down_write(¤t->mm->mmap_sem); @@ -1229,10 +1166,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, */ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); pgoff_t page_offset; unsigned long pfn; int ret = 0; @@ -1244,27 +1180,35 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); - if (!obj_priv->gtt_space) { - ret = i915_gem_object_bind_to_gtt(obj, 0); - if (ret) - goto unlock; - ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (!obj->map_and_fenceable) { + ret = i915_gem_object_unbind(obj); if (ret) goto unlock; } - - /* Need a new fence register? */ - if (obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj, true); + if (!obj->gtt_space) { + ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto unlock; } - if (i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto unlock; + + if (obj->tiling_mode == I915_TILING_NONE) + ret = i915_gem_object_put_fence(obj); + else + ret = i915_gem_object_get_fence(obj, NULL, true); + if (ret) + goto unlock; + + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + + obj->fault_mappable = true; - pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + + pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + page_offset; /* Finally, remap it using the new GTT offset */ @@ -1273,11 +1217,12 @@ unlock: mutex_unlock(&dev->struct_mutex); switch (ret) { + case -EAGAIN: + set_need_resched(); case 0: case -ERESTARTSYS: return VM_FAULT_NOPAGE; case -ENOMEM: - case -EAGAIN: return VM_FAULT_OOM; default: return VM_FAULT_SIGBUS; @@ -1296,37 +1241,39 @@ unlock: * This routine allocates and attaches a fake offset for @obj. */ static int -i915_gem_create_mmap_offset(struct drm_gem_object *obj) +i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_map_list *list; struct drm_local_map *map; int ret = 0; /* Set the object up for mmap'ing */ - list = &obj->map_list; + list = &obj->base.map_list; list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); if (!list->map) return -ENOMEM; map = list->map; map->type = _DRM_GEM; - map->size = obj->size; + map->size = obj->base.size; map->handle = obj; /* Get a DRM GEM mmap offset allocated... */ list->file_offset_node = drm_mm_search_free(&mm->offset_manager, - obj->size / PAGE_SIZE, 0, 0); + obj->base.size / PAGE_SIZE, + 0, 0); if (!list->file_offset_node) { - DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); + DRM_ERROR("failed to allocate offset for bo %d\n", + obj->base.name); ret = -ENOSPC; goto out_free_list; } list->file_offset_node = drm_mm_get_block(list->file_offset_node, - obj->size / PAGE_SIZE, 0); + obj->base.size / PAGE_SIZE, + 0); if (!list->file_offset_node) { ret = -ENOMEM; goto out_free_list; @@ -1339,16 +1286,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) goto out_free_mm; } - /* By now we should be all set, any drm_mmap request on the offset - * below will get to our mmap & fault handler */ - obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; - return 0; out_free_mm: drm_mm_put_block(list->file_offset_node); out_free_list: kfree(list->map); + list->map = NULL; return ret; } @@ -1368,38 +1312,51 @@ out_free_list: * fixup by i915_gem_fault(). */ void -i915_gem_release_mmap(struct drm_gem_object *obj) +i915_gem_release_mmap(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + if (!obj->fault_mappable) + return; + + unmap_mapping_range(obj->base.dev->dev_mapping, + (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, + obj->base.size, 1); - if (dev->dev_mapping) - unmap_mapping_range(dev->dev_mapping, - obj_priv->mmap_offset, obj->size, 1); + obj->fault_mappable = false; } static void -i915_gem_free_mmap_offset(struct drm_gem_object *obj) +i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_device *dev = obj->base.dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; + struct drm_map_list *list = &obj->base.map_list; - list = &obj->map_list; drm_ht_remove_item(&mm->offset_hash, &list->hash); + drm_mm_put_block(list->file_offset_node); + kfree(list->map); + list->map = NULL; +} - if (list->file_offset_node) { - drm_mm_put_block(list->file_offset_node); - list->file_offset_node = NULL; - } +static uint32_t +i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + uint32_t size; - if (list->map) { - kfree(list->map); - list->map = NULL; - } + if (INTEL_INFO(dev)->gen >= 4 || + obj->tiling_mode == I915_TILING_NONE) + return obj->base.size; - obj_priv->mmap_offset = 0; + /* Previous chips need a power-of-two fence region when tiling */ + if (INTEL_INFO(dev)->gen == 3) + size = 1024*1024; + else + size = 512*1024; + + while (size < obj->base.size) + size <<= 1; + + return size; } /** @@ -1407,42 +1364,68 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) * @obj: object to check * * Return the required GTT alignment for an object, taking into account - * potential fence register mapping if needed. + * potential fence register mapping. */ static uint32_t -i915_gem_get_gtt_alignment(struct drm_gem_object *obj) +i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int start, i; + struct drm_device *dev = obj->base.dev; /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ - if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE) + if (INTEL_INFO(dev)->gen >= 4 || + obj->tiling_mode == I915_TILING_NONE) return 4096; /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - if (INTEL_INFO(dev)->gen == 3) - start = 1024*1024; - else - start = 512*1024; + return i915_gem_get_gtt_size(obj); +} - for (i = start; i < obj->size; i <<= 1) - ; +/** + * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an + * unfenced object + * @obj: object to check + * + * Return the required GTT alignment for an object, only taking into account + * unfenced tiled surface requirements. + */ +static uint32_t +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + int tile_height; + + /* + * Minimum alignment is 4k (GTT page size) for sane hw. + */ + if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || + obj->tiling_mode == I915_TILING_NONE) + return 4096; + + /* + * Older chips need unfenced tiled buffers to be aligned to the left + * edge of an even tile row (where tile rows are counted as if the bo is + * placed in a fenced gtt region). + */ + if (IS_GEN2(dev) || + (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) + tile_height = 32; + else + tile_height = 8; - return i; + return tile_height * obj->stride * 2; } /** * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing * @dev: DRM device * @data: GTT mapping ioctl data - * @file_priv: GEM object info + * @file: GEM object info * * Simply returns the fake offset to userspace so it can mmap it. * The mmap call will end up in drm_gem_mmap(), which will set things @@ -1455,11 +1438,11 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj) */ int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap_gtt *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) @@ -1469,130 +1452,196 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->base.size > dev_priv->mm.gtt_mappable_end) { + ret = -E2BIG; + goto unlock; + } + + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); ret = -EINVAL; goto out; } - if (!obj_priv->mmap_offset) { + if (!obj->base.map_list.map) { ret = i915_gem_create_mmap_offset(obj); if (ret) goto out; } - args->offset = obj_priv->mmap_offset; - - /* - * Pull it into the GTT so that we have a page list (makes the - * initial fault faster and any subsequent flushing possible). - */ - if (!obj_priv->agp_mem) { - ret = i915_gem_object_bind_to_gtt(obj, 0); - if (ret) - goto out; - } + args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; } +static int +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, + gfp_t gfpmask) +{ + int page_count, i; + struct address_space *mapping; + struct inode *inode; + struct page *page; + + /* Get the list of pages out of our struct file. They'll be pinned + * at this point until we release them. + */ + page_count = obj->base.size / PAGE_SIZE; + BUG_ON(obj->pages != NULL); + obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); + if (obj->pages == NULL) + return -ENOMEM; + + inode = obj->base.filp->f_path.dentry->d_inode; + mapping = inode->i_mapping; + for (i = 0; i < page_count; i++) { + page = read_cache_page_gfp(mapping, i, + GFP_HIGHUSER | + __GFP_COLD | + __GFP_RECLAIMABLE | + gfpmask); + if (IS_ERR(page)) + goto err_pages; + + obj->pages[i] = page; + } + + if (obj->tiling_mode != I915_TILING_NONE) + i915_gem_object_do_bit_17_swizzle(obj); + + return 0; + +err_pages: + while (i--) + page_cache_release(obj->pages[i]); + + drm_free_large(obj->pages); + obj->pages = NULL; + return PTR_ERR(page); +} + static void -i915_gem_object_put_pages(struct drm_gem_object *obj) +i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size / PAGE_SIZE; + int page_count = obj->base.size / PAGE_SIZE; int i; - BUG_ON(obj_priv->pages_refcount == 0); - BUG_ON(obj_priv->madv == __I915_MADV_PURGED); + BUG_ON(obj->madv == __I915_MADV_PURGED); - if (--obj_priv->pages_refcount != 0) - return; - - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) i915_gem_object_save_bit_17_swizzle(obj); - if (obj_priv->madv == I915_MADV_DONTNEED) - obj_priv->dirty = 0; + if (obj->madv == I915_MADV_DONTNEED) + obj->dirty = 0; for (i = 0; i < page_count; i++) { - if (obj_priv->dirty) - set_page_dirty(obj_priv->pages[i]); + if (obj->dirty) + set_page_dirty(obj->pages[i]); - if (obj_priv->madv == I915_MADV_WILLNEED) - mark_page_accessed(obj_priv->pages[i]); + if (obj->madv == I915_MADV_WILLNEED) + mark_page_accessed(obj->pages[i]); - page_cache_release(obj_priv->pages[i]); + page_cache_release(obj->pages[i]); } - obj_priv->dirty = 0; - - drm_free_large(obj_priv->pages); - obj_priv->pages = NULL; -} - -static uint32_t -i915_gem_next_request_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; + obj->dirty = 0; - ring->outstanding_lazy_request = true; - return dev_priv->next_seqno; + drm_free_large(obj->pages); + obj->pages = NULL; } -static void -i915_gem_object_move_to_active(struct drm_gem_object *obj, - struct intel_ring_buffer *ring) +void +i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring, + u32 seqno) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - uint32_t seqno = i915_gem_next_request_seqno(dev, ring); BUG_ON(ring == NULL); - obj_priv->ring = ring; + obj->ring = ring; /* Add a reference if we're newly entering the active list. */ - if (!obj_priv->active) { - drm_gem_object_reference(obj); - obj_priv->active = 1; + if (!obj->active) { + drm_gem_object_reference(&obj->base); + obj->active = 1; } /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); - list_move_tail(&obj_priv->ring_list, &ring->active_list); - obj_priv->last_rendering_seqno = seqno; + list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj->ring_list, &ring->active_list); + + obj->last_rendering_seqno = seqno; + if (obj->fenced_gpu_access) { + struct drm_i915_fence_reg *reg; + + BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE); + + obj->last_fenced_seqno = seqno; + obj->last_fenced_ring = ring; + + reg = &dev_priv->fence_regs[obj->fence_reg]; + list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + } +} + +static void +i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) +{ + list_del_init(&obj->ring_list); + obj->last_rendering_seqno = 0; } static void -i915_gem_object_move_to_flushing(struct drm_gem_object *obj) +i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - BUG_ON(!obj_priv->active); - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); - list_del_init(&obj_priv->ring_list); - obj_priv->last_rendering_seqno = 0; + BUG_ON(!obj->active); + list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); + + i915_gem_object_move_off_active(obj); +} + +static void +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (obj->pin_count != 0) + list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); + else + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + + BUG_ON(!list_empty(&obj->gpu_write_list)); + BUG_ON(!obj->active); + obj->ring = NULL; + + i915_gem_object_move_off_active(obj); + obj->fenced_gpu_access = false; + + obj->active = 0; + obj->pending_gpu_write = false; + drm_gem_object_unreference(&obj->base); + + WARN_ON(i915_verify_lists(dev)); } /* Immediately discard the backing storage */ static void -i915_gem_object_truncate(struct drm_gem_object *obj) +i915_gem_object_truncate(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct inode *inode; /* Our goal here is to return as much of the memory as @@ -1601,42 +1650,18 @@ i915_gem_object_truncate(struct drm_gem_object *obj) * backing pages, *now*. Here we mirror the actions taken * when by shmem_delete_inode() to release the backing store. */ - inode = obj->filp->f_path.dentry->d_inode; + inode = obj->base.filp->f_path.dentry->d_inode; truncate_inode_pages(inode->i_mapping, 0); if (inode->i_op->truncate_range) inode->i_op->truncate_range(inode, 0, (loff_t)-1); - obj_priv->madv = __I915_MADV_PURGED; + obj->madv = __I915_MADV_PURGED; } static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) -{ - return obj_priv->madv == I915_MADV_DONTNEED; -} - -static void -i915_gem_object_move_to_inactive(struct drm_gem_object *obj) +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - - if (obj_priv->pin_count != 0) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); - else - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - list_del_init(&obj_priv->ring_list); - - BUG_ON(!list_empty(&obj_priv->gpu_write_list)); - - obj_priv->last_rendering_seqno = 0; - obj_priv->ring = NULL; - if (obj_priv->active) { - obj_priv->active = 0; - drm_gem_object_unreference(obj); - } - WARN_ON(i915_verify_lists(dev)); + return obj->madv == I915_MADV_DONTNEED; } static void @@ -1644,37 +1669,27 @@ i915_gem_process_flushing_list(struct drm_device *dev, uint32_t flush_domains, struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv, *next; + struct drm_i915_gem_object *obj, *next; - list_for_each_entry_safe(obj_priv, next, + list_for_each_entry_safe(obj, next, &ring->gpu_write_list, gpu_write_list) { - struct drm_gem_object *obj = &obj_priv->base; - - if (obj->write_domain & flush_domains) { - uint32_t old_write_domain = obj->write_domain; + if (obj->base.write_domain & flush_domains) { + uint32_t old_write_domain = obj->base.write_domain; - obj->write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_active(obj, ring); - - /* update the fence lru list */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); - } + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_active(obj, ring, + i915_gem_next_request_seqno(dev, ring)); trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } } } -uint32_t +int i915_add_request(struct drm_device *dev, struct drm_file *file, struct drm_i915_gem_request *request, @@ -1684,17 +1699,17 @@ i915_add_request(struct drm_device *dev, struct drm_i915_file_private *file_priv = NULL; uint32_t seqno; int was_empty; + int ret; + + BUG_ON(request == NULL); if (file != NULL) file_priv = file->driver_priv; - if (request == NULL) { - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return 0; - } + ret = ring->add_request(ring, &seqno); + if (ret) + return ret; - seqno = ring->add_request(dev, ring, 0); ring->outstanding_lazy_request = false; request->seqno = seqno; @@ -1718,26 +1733,7 @@ i915_add_request(struct drm_device *dev, queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); } - return seqno; -} - -/** - * Command execution barrier - * - * Ensures that all commands in the ring are finished - * before signalling the CPU - */ -static void -i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) -{ - uint32_t flush_domains = 0; - - /* The sampler always gets flushed on i965 (sigh) */ - if (INTEL_INFO(dev)->gen >= 4) - flush_domains |= I915_GEM_DOMAIN_SAMPLER; - - ring->flush(dev, ring, - I915_GEM_DOMAIN_COMMAND, flush_domains); + return 0; } static inline void @@ -1770,62 +1766,76 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, } while (!list_empty(&ring->active_list)) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj_priv = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); - obj_priv->base.write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_inactive(&obj_priv->base); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_inactive(obj); + } +} + +static void i915_gem_reset_fences(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < 16; i++) { + struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; + struct drm_i915_gem_object *obj = reg->obj; + + if (!obj) + continue; + + if (obj->tiling_mode) + i915_gem_release_mmap(obj); + + reg->obj->fence_reg = I915_FENCE_REG_NONE; + reg->obj->fenced_gpu_access = false; + reg->obj->last_fenced_seqno = 0; + reg->obj->last_fenced_ring = NULL; + i915_gem_clear_fence_reg(dev, reg); } } void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int i; - i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); - i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); - i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); /* Remove anything from the flushing lists. The GPU cache is likely * to be lost on reset along with the data, so simply move the * lost bo to the inactive list. */ while (!list_empty(&dev_priv->mm.flushing_list)) { - obj_priv = list_first_entry(&dev_priv->mm.flushing_list, - struct drm_i915_gem_object, - mm_list); + obj= list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + mm_list); - obj_priv->base.write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_inactive(&obj_priv->base); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_inactive(obj); } /* Move everything out of the GPU domains to ensure we do any * necessary invalidation upon reuse. */ - list_for_each_entry(obj_priv, + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { - obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; + obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } /* The fence registers are invalidated so clear them out */ - for (i = 0; i < 16; i++) { - struct drm_i915_fence_reg *reg; - - reg = &dev_priv->fence_regs[i]; - if (!reg->obj) - continue; - - i915_gem_clear_fence_reg(reg->obj); - } + i915_gem_reset_fences(dev); } /** @@ -1837,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; + int i; if (!ring->status_page.page_addr || list_empty(&ring->request_list)) @@ -1844,7 +1855,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev, WARN_ON(i915_verify_lists(dev)); - seqno = ring->get_seqno(dev, ring); + seqno = ring->get_seqno(ring); + + for (i = 0; i < I915_NUM_RINGS; i++) + if (seqno >= ring->sync_seqno[i]) + ring->sync_seqno[i] = 0; + while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -1866,18 +1882,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev, * by the ringbuffer to the flushing/inactive lists as appropriate. */ while (!list_empty(&ring->active_list)) { - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj_priv = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); + obj= list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); - if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) + if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) break; - obj = &obj_priv->base; - if (obj->write_domain != 0) + if (obj->base.write_domain != 0) i915_gem_object_move_to_flushing(obj); else i915_gem_object_move_to_inactive(obj); @@ -1885,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, if (unlikely (dev_priv->trace_irq_seqno && i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { - ring->user_irq_put(dev, ring); + ring->irq_put(ring); dev_priv->trace_irq_seqno = 0; } @@ -1896,24 +1910,24 @@ void i915_gem_retire_requests(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; if (!list_empty(&dev_priv->mm.deferred_free_list)) { - struct drm_i915_gem_object *obj_priv, *tmp; + struct drm_i915_gem_object *obj, *next; /* We must be careful that during unbind() we do not * accidentally infinitely recurse into retire requests. * Currently: * retire -> free -> unbind -> wait -> retire_ring */ - list_for_each_entry_safe(obj_priv, tmp, + list_for_each_entry_safe(obj, next, &dev_priv->mm.deferred_free_list, mm_list) - i915_gem_free_object_tail(&obj_priv->base); + i915_gem_free_object_tail(obj); } - i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); - i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); - i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]); } static void @@ -1935,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work) i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && - (!list_empty(&dev_priv->render_ring.request_list) || - !list_empty(&dev_priv->bsd_ring.request_list) || - !list_empty(&dev_priv->blt_ring.request_list))) + (!list_empty(&dev_priv->ring[RCS].request_list) || + !list_empty(&dev_priv->ring[VCS].request_list) || + !list_empty(&dev_priv->ring[BCS].request_list))) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -1955,14 +1969,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (atomic_read(&dev_priv->mm.wedged)) return -EAGAIN; - if (ring->outstanding_lazy_request) { - seqno = i915_add_request(dev, NULL, NULL, ring); - if (seqno == 0) + if (seqno == ring->outstanding_lazy_request) { + struct drm_i915_gem_request *request; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) return -ENOMEM; + + ret = i915_add_request(dev, NULL, request, ring); + if (ret) { + kfree(request); + return ret; + } + + seqno = request->seqno; } - BUG_ON(seqno == dev_priv->next_seqno); - if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { + if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { if (HAS_PCH_SPLIT(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); else @@ -1976,21 +1999,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); - ring->waiting_gem_seqno = seqno; - ring->user_irq_get(dev, ring); - if (interruptible) - ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed( - ring->get_seqno(dev, ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); - else - wait_event(ring->irq_queue, - i915_seqno_passed( - ring->get_seqno(dev, ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); + ring->waiting_seqno = seqno; + if (ring->irq_get(ring)) { + if (interruptible) + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + else + wait_event(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); - ring->user_irq_put(dev, ring); - ring->waiting_gem_seqno = 0; + ring->irq_put(ring); + } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring), + seqno) || + atomic_read(&dev_priv->mm.wedged), 3000)) + ret = -EBUSY; + ring->waiting_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); } @@ -1999,7 +2024,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (ret && ret != -ERESTARTSYS) DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", - __func__, ret, seqno, ring->get_seqno(dev, ring), + __func__, ret, seqno, ring->get_seqno(ring), dev_priv->next_seqno); /* Directly dispatch request retiring. While we have the work queue @@ -2024,70 +2049,30 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, return i915_do_wait_request(dev, seqno, 1, ring); } -static void -i915_gem_flush_ring(struct drm_device *dev, - struct drm_file *file_priv, - struct intel_ring_buffer *ring, - uint32_t invalidate_domains, - uint32_t flush_domains) -{ - ring->flush(dev, ring, invalidate_domains, flush_domains); - i915_gem_process_flushing_list(dev, flush_domains, ring); -} - -static void -i915_gem_flush(struct drm_device *dev, - struct drm_file *file_priv, - uint32_t invalidate_domains, - uint32_t flush_domains, - uint32_t flush_rings) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - - if (flush_domains & I915_GEM_DOMAIN_CPU) - drm_agp_chipset_flush(dev); - - if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { - if (flush_rings & RING_RENDER) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->render_ring, - invalidate_domains, flush_domains); - if (flush_rings & RING_BSD) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->bsd_ring, - invalidate_domains, flush_domains); - if (flush_rings & RING_BLT) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->blt_ring, - invalidate_domains, flush_domains); - } -} - /** * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. */ -static int -i915_gem_object_wait_rendering(struct drm_gem_object *obj, +int +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool interruptible) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_device *dev = obj->base.dev; int ret; /* This function only exists to support waiting for existing rendering, * not for emitting required flushes. */ - BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); + BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); /* If there is rendering queued on the buffer being evicted, wait for * it. */ - if (obj_priv->active) { + if (obj->active) { ret = i915_do_wait_request(dev, - obj_priv->last_rendering_seqno, + obj->last_rendering_seqno, interruptible, - obj_priv->ring); + obj->ring); if (ret) return ret; } @@ -2099,17 +2084,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, * Unbinds an object from the GTT aperture. */ int -i915_gem_object_unbind(struct drm_gem_object *obj) +i915_gem_object_unbind(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return 0; - if (obj_priv->pin_count != 0) { + if (obj->pin_count != 0) { DRM_ERROR("Attempting to unbind pinned buffer\n"); return -EINVAL; } @@ -2132,27 +2114,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj) */ if (ret) { i915_gem_clflush_object(obj); - obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } /* release the fence reg _after_ flushing */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - i915_gem_clear_fence_reg(obj); - - drm_unbind_agp(obj_priv->agp_mem); - drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); + ret = i915_gem_object_put_fence(obj); + if (ret == -ERESTARTSYS) + return ret; - i915_gem_object_put_pages(obj); - BUG_ON(obj_priv->pages_refcount); + i915_gem_gtt_unbind_object(obj); + i915_gem_object_put_pages_gtt(obj); - i915_gem_info_remove_gtt(dev_priv, obj->size); - list_del_init(&obj_priv->mm_list); + list_del_init(&obj->gtt_list); + list_del_init(&obj->mm_list); + /* Avoid an unnecessary call to unbind on rebind. */ + obj->map_and_fenceable = true; - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; - obj_priv->gtt_offset = 0; + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; + obj->gtt_offset = 0; - if (i915_gem_object_is_purgeable(obj_priv)) + if (i915_gem_object_is_purgeable(obj)) i915_gem_object_truncate(obj); trace_i915_gem_object_unbind(obj); @@ -2160,14 +2142,25 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return ret; } +void +i915_gem_flush_ring(struct drm_device *dev, + struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains) +{ + ring->flush(ring, invalidate_domains, flush_domains); + i915_gem_process_flushing_list(dev, flush_domains, ring); +} + static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; - i915_gem_flush_ring(dev, NULL, ring, - I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (!list_empty(&ring->gpu_write_list)) + i915_gem_flush_ring(dev, ring, + I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); return i915_wait_request(dev, i915_gem_next_request_seqno(dev, ring), ring); @@ -2178,7 +2171,7 @@ i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; bool lists_empty; - int ret; + int ret, i; lists_empty = (list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->mm.active_list)); @@ -2186,258 +2179,296 @@ i915_gpu_idle(struct drm_device *dev) return 0; /* Flush everything onto the inactive list. */ - ret = i915_ring_idle(dev, &dev_priv->render_ring); - if (ret) - return ret; - - ret = i915_ring_idle(dev, &dev_priv->bsd_ring); - if (ret) - return ret; - - ret = i915_ring_idle(dev, &dev_priv->blt_ring); - if (ret) - return ret; - - return 0; -} - -static int -i915_gem_object_get_pages(struct drm_gem_object *obj, - gfp_t gfpmask) -{ - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count, i; - struct address_space *mapping; - struct inode *inode; - struct page *page; - - BUG_ON(obj_priv->pages_refcount - == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); - - if (obj_priv->pages_refcount++ != 0) - return 0; - - /* Get the list of pages out of our struct file. They'll be pinned - * at this point until we release them. - */ - page_count = obj->size / PAGE_SIZE; - BUG_ON(obj_priv->pages != NULL); - obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); - if (obj_priv->pages == NULL) { - obj_priv->pages_refcount--; - return -ENOMEM; - } - - inode = obj->filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; - for (i = 0; i < page_count; i++) { - page = read_cache_page_gfp(mapping, i, - GFP_HIGHUSER | - __GFP_COLD | - __GFP_RECLAIMABLE | - gfpmask); - if (IS_ERR(page)) - goto err_pages; - - obj_priv->pages[i] = page; + for (i = 0; i < I915_NUM_RINGS; i++) { + ret = i915_ring_idle(dev, &dev_priv->ring[i]); + if (ret) + return ret; } - if (obj_priv->tiling_mode != I915_TILING_NONE) - i915_gem_object_do_bit_17_swizzle(obj); - return 0; - -err_pages: - while (i--) - page_cache_release(obj_priv->pages[i]); - - drm_free_large(obj_priv->pages); - obj_priv->pages = NULL; - obj_priv->pages_refcount--; - return PTR_ERR(page); } -static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) +static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { - struct drm_gem_object *obj = reg->obj; - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & - 0xfffff000) << 32; - val |= obj_priv->gtt_offset & 0xfffff000; - val |= (uint64_t)((obj_priv->stride / 128) - 1) << + val = (uint64_t)((obj->gtt_offset + size - 4096) & + 0xfffff000) << 32; + val |= obj->gtt_offset & 0xfffff000; + val |= (uint64_t)((obj->stride / 128) - 1) << SANDYBRIDGE_FENCE_PITCH_SHIFT; - if (obj_priv->tiling_mode == I915_TILING_Y) + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); + if (pipelined) { + int ret = intel_ring_begin(pipelined, 6); + if (ret) + return ret; + + intel_ring_emit(pipelined, MI_NOOP); + intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8); + intel_ring_emit(pipelined, (u32)val); + intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4); + intel_ring_emit(pipelined, (u32)(val >> 32)); + intel_ring_advance(pipelined); + } else + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); + + return 0; } -static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) +static int i965_write_fence_reg(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { - struct drm_gem_object *obj = reg->obj; - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & + val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; - val |= obj_priv->gtt_offset & 0xfffff000; - val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; - if (obj_priv->tiling_mode == I915_TILING_Y) + val |= obj->gtt_offset & 0xfffff000; + val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); + if (pipelined) { + int ret = intel_ring_begin(pipelined, 6); + if (ret) + return ret; + + intel_ring_emit(pipelined, MI_NOOP); + intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8); + intel_ring_emit(pipelined, (u32)val); + intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4); + intel_ring_emit(pipelined, (u32)(val >> 32)); + intel_ring_advance(pipelined); + } else + I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); + + return 0; } -static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) +static int i915_write_fence_reg(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { - struct drm_gem_object *obj = reg->obj; - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + u32 fence_reg, val, pitch_val; int tile_width; - uint32_t fence_reg, val; - uint32_t pitch_val; - if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || - (obj_priv->gtt_offset & (obj->size - 1))) { - WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", - __func__, obj_priv->gtt_offset, obj->size); - return; - } + if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1)), + "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", + obj->gtt_offset, obj->map_and_fenceable, size)) + return -EINVAL; - if (obj_priv->tiling_mode == I915_TILING_Y && - HAS_128_BYTE_Y_TILING(dev)) + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) tile_width = 128; else tile_width = 512; /* Note: pitch better be a power of two tile widths */ - pitch_val = obj_priv->stride / tile_width; + pitch_val = obj->stride / tile_width; pitch_val = ffs(pitch_val) - 1; - if (obj_priv->tiling_mode == I915_TILING_Y && - HAS_128_BYTE_Y_TILING(dev)) - WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); - else - WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); - - val = obj_priv->gtt_offset; - if (obj_priv->tiling_mode == I915_TILING_Y) + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I915_FENCE_SIZE_BITS(obj->size); + val |= I915_FENCE_SIZE_BITS(size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - if (regnum < 8) - fence_reg = FENCE_REG_830_0 + (regnum * 4); + fence_reg = obj->fence_reg; + if (fence_reg < 8) + fence_reg = FENCE_REG_830_0 + fence_reg * 4; else - fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); - I915_WRITE(fence_reg, val); + fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; + + if (pipelined) { + int ret = intel_ring_begin(pipelined, 4); + if (ret) + return ret; + + intel_ring_emit(pipelined, MI_NOOP); + intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(pipelined, fence_reg); + intel_ring_emit(pipelined, val); + intel_ring_advance(pipelined); + } else + I915_WRITE(fence_reg, val); + + return 0; } -static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) +static int i830_write_fence_reg(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { - struct drm_gem_object *obj = reg->obj; - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint32_t val; uint32_t pitch_val; - uint32_t fence_size_bits; - if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || - (obj_priv->gtt_offset & (obj->size - 1))) { - WARN(1, "%s: object 0x%08x not 512K or size aligned\n", - __func__, obj_priv->gtt_offset); - return; - } + if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1)), + "object 0x%08x not 512K or pot-size 0x%08x aligned\n", + obj->gtt_offset, size)) + return -EINVAL; - pitch_val = obj_priv->stride / 128; + pitch_val = obj->stride / 128; pitch_val = ffs(pitch_val) - 1; - WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); - val = obj_priv->gtt_offset; - if (obj_priv->tiling_mode == I915_TILING_Y) + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; - fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); - WARN_ON(fence_size_bits & ~0x00000f00); - val |= fence_size_bits; + val |= I830_FENCE_SIZE_BITS(size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); + if (pipelined) { + int ret = intel_ring_begin(pipelined, 4); + if (ret) + return ret; + + intel_ring_emit(pipelined, MI_NOOP); + intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); + intel_ring_emit(pipelined, val); + intel_ring_advance(pipelined); + } else + I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); + + return 0; } -static int i915_find_fence_reg(struct drm_device *dev, - bool interruptible) +static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ + return i915_seqno_passed(ring->get_seqno(ring), seqno); +} + +static int +i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined, + bool interruptible) +{ + int ret; + + if (obj->fenced_gpu_access) { + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(obj->base.dev, + obj->last_fenced_ring, + 0, obj->base.write_domain); + + obj->fenced_gpu_access = false; + } + + if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { + if (!ring_passed_seqno(obj->last_fenced_ring, + obj->last_fenced_seqno)) { + ret = i915_do_wait_request(obj->base.dev, + obj->last_fenced_seqno, + interruptible, + obj->last_fenced_ring); + if (ret) + return ret; + } + + obj->last_fenced_seqno = 0; + obj->last_fenced_ring = NULL; + } + + return 0; +} + +int +i915_gem_object_put_fence(struct drm_i915_gem_object *obj) +{ + int ret; + + if (obj->tiling_mode) + i915_gem_release_mmap(obj); + + ret = i915_gem_object_flush_fence(obj, NULL, true); + if (ret) + return ret; + + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + i915_gem_clear_fence_reg(obj->base.dev, + &dev_priv->fence_regs[obj->fence_reg]); + + obj->fence_reg = I915_FENCE_REG_NONE; + } + + return 0; +} + +static struct drm_i915_fence_reg * +i915_find_fence_reg(struct drm_device *dev, + struct intel_ring_buffer *pipelined) { - struct drm_i915_fence_reg *reg = NULL; - struct drm_i915_gem_object *obj_priv = NULL; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_gem_object *obj = NULL; - int i, avail, ret; + struct drm_i915_fence_reg *reg, *first, *avail; + int i; /* First try to find a free reg */ - avail = 0; + avail = NULL; for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { reg = &dev_priv->fence_regs[i]; if (!reg->obj) - return i; + return reg; - obj_priv = to_intel_bo(reg->obj); - if (!obj_priv->pin_count) - avail++; + if (!reg->obj->pin_count) + avail = reg; } - if (avail == 0) - return -ENOSPC; + if (avail == NULL) + return NULL; /* None available, try to steal one or wait for a user to finish */ - i = I915_FENCE_REG_NONE; - list_for_each_entry(reg, &dev_priv->mm.fence_list, - lru_list) { - obj = reg->obj; - obj_priv = to_intel_bo(obj); - - if (obj_priv->pin_count) + avail = first = NULL; + list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { + if (reg->obj->pin_count) continue; - /* found one! */ - i = obj_priv->fence_reg; - break; + if (first == NULL) + first = reg; + + if (!pipelined || + !reg->obj->last_fenced_ring || + reg->obj->last_fenced_ring == pipelined) { + avail = reg; + break; + } } - BUG_ON(i == I915_FENCE_REG_NONE); + if (avail == NULL) + avail = first; - /* We only have a reference on obj from the active list. put_fence_reg - * might drop that one, causing a use-after-free in it. So hold a - * private reference to obj like the other callers of put_fence_reg - * (set_tiling ioctl) do. */ - drm_gem_object_reference(obj); - ret = i915_gem_object_put_fence_reg(obj, interruptible); - drm_gem_object_unreference(obj); - if (ret != 0) - return ret; - - return i; + return avail; } /** - * i915_gem_object_get_fence_reg - set up a fence reg for an object + * i915_gem_object_get_fence - set up a fence reg for an object * @obj: object to map through a fence reg + * @pipelined: ring on which to queue the change, or NULL for CPU access + * @interruptible: must we wait uninterruptibly for the register to retire? * * When mapping objects through the GTT, userspace wants to be able to write * to them without having to worry about swizzling if the object is tiled. @@ -2449,72 +2480,138 @@ static int i915_find_fence_reg(struct drm_device *dev, * and tiling format. */ int -i915_gem_object_get_fence_reg(struct drm_gem_object *obj, - bool interruptible) +i915_gem_object_get_fence(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined, + bool interruptible) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - struct drm_i915_fence_reg *reg = NULL; + struct drm_i915_fence_reg *reg; int ret; - /* Just update our place in the LRU if our fence is getting used. */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + /* XXX disable pipelining. There are bugs. Shocking. */ + pipelined = NULL; + + /* Just update our place in the LRU if our fence is getting reused. */ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + reg = &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + + if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) + pipelined = NULL; + + if (!pipelined) { + if (reg->setup_seqno) { + if (!ring_passed_seqno(obj->last_fenced_ring, + reg->setup_seqno)) { + ret = i915_do_wait_request(obj->base.dev, + reg->setup_seqno, + interruptible, + obj->last_fenced_ring); + if (ret) + return ret; + } + + reg->setup_seqno = 0; + } + } else if (obj->last_fenced_ring && + obj->last_fenced_ring != pipelined) { + ret = i915_gem_object_flush_fence(obj, + pipelined, + interruptible); + if (ret) + return ret; + } else if (obj->tiling_changed) { + if (obj->fenced_gpu_access) { + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(obj->base.dev, obj->ring, + 0, obj->base.write_domain); + + obj->fenced_gpu_access = false; + } + } + + if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) + pipelined = NULL; + BUG_ON(!pipelined && reg->setup_seqno); + + if (obj->tiling_changed) { + if (pipelined) { + reg->setup_seqno = + i915_gem_next_request_seqno(dev, pipelined); + obj->last_fenced_seqno = reg->setup_seqno; + obj->last_fenced_ring = pipelined; + } + goto update; + } + return 0; } - switch (obj_priv->tiling_mode) { - case I915_TILING_NONE: - WARN(1, "allocating a fence for non-tiled object?\n"); - break; - case I915_TILING_X: - if (!obj_priv->stride) - return -EINVAL; - WARN((obj_priv->stride & (512 - 1)), - "object 0x%08x is X tiled but has non-512B pitch\n", - obj_priv->gtt_offset); - break; - case I915_TILING_Y: - if (!obj_priv->stride) - return -EINVAL; - WARN((obj_priv->stride & (128 - 1)), - "object 0x%08x is Y tiled but has non-128B pitch\n", - obj_priv->gtt_offset); - break; - } + reg = i915_find_fence_reg(dev, pipelined); + if (reg == NULL) + return -ENOSPC; - ret = i915_find_fence_reg(dev, interruptible); - if (ret < 0) + ret = i915_gem_object_flush_fence(obj, pipelined, interruptible); + if (ret) return ret; - obj_priv->fence_reg = ret; - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; - list_add_tail(®->lru_list, &dev_priv->mm.fence_list); + if (reg->obj) { + struct drm_i915_gem_object *old = reg->obj; + + drm_gem_object_reference(&old->base); + + if (old->tiling_mode) + i915_gem_release_mmap(old); + + ret = i915_gem_object_flush_fence(old, + pipelined, + interruptible); + if (ret) { + drm_gem_object_unreference(&old->base); + return ret; + } + + if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) + pipelined = NULL; + + old->fence_reg = I915_FENCE_REG_NONE; + old->last_fenced_ring = pipelined; + old->last_fenced_seqno = + pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; + + drm_gem_object_unreference(&old->base); + } else if (obj->last_fenced_seqno == 0) + pipelined = NULL; reg->obj = obj; + list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + obj->fence_reg = reg - dev_priv->fence_regs; + obj->last_fenced_ring = pipelined; + + reg->setup_seqno = + pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; + obj->last_fenced_seqno = reg->setup_seqno; +update: + obj->tiling_changed = false; switch (INTEL_INFO(dev)->gen) { case 6: - sandybridge_write_fence_reg(reg); + ret = sandybridge_write_fence_reg(obj, pipelined); break; case 5: case 4: - i965_write_fence_reg(reg); + ret = i965_write_fence_reg(obj, pipelined); break; case 3: - i915_write_fence_reg(reg); + ret = i915_write_fence_reg(obj, pipelined); break; case 2: - i830_write_fence_reg(reg); + ret = i830_write_fence_reg(obj, pipelined); break; } - trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, - obj_priv->tiling_mode); - - return 0; + return ret; } /** @@ -2522,154 +2619,127 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, * @obj: object to clear * * Zeroes out the fence register itself and clears out the associated - * data structures in dev_priv and obj_priv. + * data structures in dev_priv and obj. */ static void -i915_gem_clear_fence_reg(struct drm_gem_object *obj) +i915_gem_clear_fence_reg(struct drm_device *dev, + struct drm_i915_fence_reg *reg) { - struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; - uint32_t fence_reg; + uint32_t fence_reg = reg - dev_priv->fence_regs; switch (INTEL_INFO(dev)->gen) { case 6: - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + - (obj_priv->fence_reg * 8), 0); + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); break; case 5: case 4: - I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); + I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); break; case 3: - if (obj_priv->fence_reg >= 8) - fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; + if (fence_reg >= 8) + fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; else case 2: - fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; + fence_reg = FENCE_REG_830_0 + fence_reg * 4; I915_WRITE(fence_reg, 0); break; } - reg->obj = NULL; - obj_priv->fence_reg = I915_FENCE_REG_NONE; list_del_init(®->lru_list); -} - -/** - * i915_gem_object_put_fence_reg - waits on outstanding fenced access - * to the buffer to finish, and then resets the fence register. - * @obj: tiled object holding a fence register. - * @bool: whether the wait upon the fence is interruptible - * - * Zeroes out the fence register itself and clears out the associated - * data structures in dev_priv and obj_priv. - */ -int -i915_gem_object_put_fence_reg(struct drm_gem_object *obj, - bool interruptible) -{ - struct drm_device *dev = obj->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - struct drm_i915_fence_reg *reg; - - if (obj_priv->fence_reg == I915_FENCE_REG_NONE) - return 0; - - /* If we've changed tiling, GTT-mappings of the object - * need to re-fault to ensure that the correct fence register - * setup is in place. - */ - i915_gem_release_mmap(obj); - - /* On the i915, GPU access to tiled buffers is via a fence, - * therefore we must wait for any outstanding access to complete - * before clearing the fence. - */ - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; - if (reg->gpu) { - int ret; - - ret = i915_gem_object_flush_gpu_write_domain(obj, true); - if (ret) - return ret; - - ret = i915_gem_object_wait_rendering(obj, interruptible); - if (ret) - return ret; - - reg->gpu = false; - } - - i915_gem_object_flush_gtt_write_domain(obj); - i915_gem_clear_fence_reg(obj); - - return 0; + reg->obj = NULL; + reg->setup_seqno = 0; } /** * Finds free space in the GTT aperture and binds the object there. */ static int -i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) +i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, + unsigned alignment, + bool map_and_fenceable) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_mm_node *free_space; - gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; + gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; + u32 size, fence_size, fence_alignment, unfenced_alignment; + bool mappable, fenceable; int ret; - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to bind a purgeable object\n"); return -EINVAL; } + fence_size = i915_gem_get_gtt_size(obj); + fence_alignment = i915_gem_get_gtt_alignment(obj); + unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); + if (alignment == 0) - alignment = i915_gem_get_gtt_alignment(obj); - if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { + alignment = map_and_fenceable ? fence_alignment : + unfenced_alignment; + if (map_and_fenceable && alignment & (fence_alignment - 1)) { DRM_ERROR("Invalid object alignment requested %u\n", alignment); return -EINVAL; } + size = map_and_fenceable ? fence_size : obj->base.size; + /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->size > dev_priv->mm.gtt_total) { + if (obj->base.size > + (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { DRM_ERROR("Attempting to bind an object larger than the aperture\n"); return -E2BIG; } search_free: - free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, - obj->size, alignment, 0); - if (free_space != NULL) - obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, - alignment); - if (obj_priv->gtt_space == NULL) { + if (map_and_fenceable) + free_space = + drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, + size, alignment, 0, + dev_priv->mm.gtt_mappable_end, + 0); + else + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, + size, alignment, 0); + + if (free_space != NULL) { + if (map_and_fenceable) + obj->gtt_space = + drm_mm_get_block_range_generic(free_space, + size, alignment, 0, + dev_priv->mm.gtt_mappable_end, + 0); + else + obj->gtt_space = + drm_mm_get_block(free_space, size, alignment); + } + if (obj->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ - ret = i915_gem_evict_something(dev, obj->size, alignment); + ret = i915_gem_evict_something(dev, size, alignment, + map_and_fenceable); if (ret) return ret; goto search_free; } - ret = i915_gem_object_get_pages(obj, gfpmask); + ret = i915_gem_object_get_pages_gtt(obj, gfpmask); if (ret) { - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; if (ret == -ENOMEM) { /* first try to clear up some space from the GTT */ - ret = i915_gem_evict_something(dev, obj->size, - alignment); + ret = i915_gem_evict_something(dev, size, + alignment, + map_and_fenceable); if (ret) { /* now try to shrink everyone else */ if (gfpmask) { @@ -2686,126 +2756,113 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) return ret; } - /* Create an AGP memory structure pointing at our pages, and bind it - * into the GTT. - */ - obj_priv->agp_mem = drm_agp_bind_pages(dev, - obj_priv->pages, - obj->size >> PAGE_SHIFT, - obj_priv->gtt_space->start, - obj_priv->agp_type); - if (obj_priv->agp_mem == NULL) { - i915_gem_object_put_pages(obj); - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; - - ret = i915_gem_evict_something(dev, obj->size, alignment); + ret = i915_gem_gtt_bind_object(obj); + if (ret) { + i915_gem_object_put_pages_gtt(obj); + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; + + ret = i915_gem_evict_something(dev, size, + alignment, map_and_fenceable); if (ret) return ret; goto search_free; } - /* keep track of bounds object by adding it to the inactive list */ - list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_add_gtt(dev_priv, obj->size); + list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); + list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache */ - BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); - BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - obj_priv->gtt_offset = obj_priv->gtt_space->start; - trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); + obj->gtt_offset = obj->gtt_space->start; + fenceable = + obj->gtt_space->size == fence_size && + (obj->gtt_space->start & (fence_alignment -1)) == 0; + + mappable = + obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; + + obj->map_and_fenceable = mappable && fenceable; + + trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); return 0; } void -i915_gem_clflush_object(struct drm_gem_object *obj) +i915_gem_clflush_object(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ - if (obj_priv->pages == NULL) + if (obj->pages == NULL) return; trace_i915_gem_object_clflush(obj); - drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); + drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); } /** Flushes any GPU write domain for the object if it's dirty. */ -static int -i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, - bool pipelined) +static void +i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; - uint32_t old_write_domain; + struct drm_device *dev = obj->base.dev; - if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) - return 0; + if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) + return; /* Queue the GPU write cache flushing we need. */ - old_write_domain = obj->write_domain; - i915_gem_flush_ring(dev, NULL, - to_intel_bo(obj)->ring, - 0, obj->write_domain); - BUG_ON(obj->write_domain); - - trace_i915_gem_object_change_domain(obj, - obj->read_domains, - old_write_domain); - - if (pipelined) - return 0; - - return i915_gem_object_wait_rendering(obj, true); + i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); + BUG_ON(obj->base.write_domain); } /** Flushes the GTT write domain for the object if it's dirty. */ static void -i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) +i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) { uint32_t old_write_domain; - if (obj->write_domain != I915_GEM_DOMAIN_GTT) + if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) return; /* No actual flushing is required for the GTT write domain. Writes * to it immediately go to main memory as far as we know, so there's * no chipset flush. It also doesn't land in render cache. */ - old_write_domain = obj->write_domain; - obj->write_domain = 0; + i915_gem_release_mmap(obj); + + old_write_domain = obj->base.write_domain; + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } /** Flushes the CPU write domain for the object if it's dirty. */ static void -i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) +i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; uint32_t old_write_domain; - if (obj->write_domain != I915_GEM_DOMAIN_CPU) + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - old_write_domain = obj->write_domain; - obj->write_domain = 0; + intel_gtt_chipset_flush(); + old_write_domain = obj->base.write_domain; + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } @@ -2816,40 +2873,36 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) * flushes to occur. */ int -i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain, old_read_domains; int ret; /* Not valid to be called on unbound objects. */ - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj, false); - if (ret != 0) - return ret; - - i915_gem_object_flush_cpu_write_domain(obj); - - if (write) { + i915_gem_object_flush_gpu_write_domain(obj); + if (obj->pending_gpu_write || write) { ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; } - old_write_domain = obj->write_domain; - old_read_domains = obj->read_domains; + i915_gem_object_flush_cpu_write_domain(obj); + + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); - obj->read_domains |= I915_GEM_DOMAIN_GTT; + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->base.read_domains |= I915_GEM_DOMAIN_GTT; if (write) { - obj->read_domains = I915_GEM_DOMAIN_GTT; - obj->write_domain = I915_GEM_DOMAIN_GTT; - obj_priv->dirty = 1; + obj->base.read_domains = I915_GEM_DOMAIN_GTT; + obj->base.write_domain = I915_GEM_DOMAIN_GTT; + obj->dirty = 1; } trace_i915_gem_object_change_domain(obj, @@ -2864,23 +2917,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) * wait, as in modesetting process we're not supposed to be interrupted. */ int -i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, - bool pipelined) +i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; int ret; /* Not valid to be called on unbound objects. */ - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj, true); - if (ret) - return ret; + i915_gem_object_flush_gpu_write_domain(obj); /* Currently, we are always called from an non-interruptible context. */ - if (!pipelined) { + if (pipelined != obj->ring) { ret = i915_gem_object_wait_rendering(obj, false); if (ret) return ret; @@ -2888,12 +2938,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, i915_gem_object_flush_cpu_write_domain(obj); - old_read_domains = obj->read_domains; - obj->read_domains |= I915_GEM_DOMAIN_GTT; + old_read_domains = obj->base.read_domains; + obj->base.read_domains |= I915_GEM_DOMAIN_GTT; trace_i915_gem_object_change_domain(obj, old_read_domains, - obj->write_domain); + obj->base.write_domain); return 0; } @@ -2906,10 +2956,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, return 0; if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, + i915_gem_flush_ring(obj->base.dev, obj->ring, 0, obj->base.write_domain); - return i915_gem_object_wait_rendering(&obj->base, interruptible); + return i915_gem_object_wait_rendering(obj, interruptible); } /** @@ -2919,13 +2969,14 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, * flushes to occur. */ static int -i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) { uint32_t old_write_domain, old_read_domains; int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj, false); - if (ret != 0) + i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) return ret; i915_gem_object_flush_gtt_write_domain(obj); @@ -2935,33 +2986,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) */ i915_gem_object_set_to_full_cpu_read_domain(obj); - if (write) { - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; - } - - old_write_domain = obj->write_domain; - old_read_domains = obj->read_domains; + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* Flush the CPU cache if it's still invalid. */ - if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { + if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { i915_gem_clflush_object(obj); - obj->read_domains |= I915_GEM_DOMAIN_CPU; + obj->base.read_domains |= I915_GEM_DOMAIN_CPU; } /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); /* If we're writing through the CPU, then the GPU read domains will * need to be invalidated at next use. */ if (write) { - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; } trace_i915_gem_object_change_domain(obj, @@ -2971,184 +3016,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) return 0; } -/* - * Set the next domain for the specified object. This - * may not actually perform the necessary flushing/invaliding though, - * as that may want to be batched with other set_domain operations - * - * This is (we hope) the only really tricky part of gem. The goal - * is fairly simple -- track which caches hold bits of the object - * and make sure they remain coherent. A few concrete examples may - * help to explain how it works. For shorthand, we use the notation - * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the - * a pair of read and write domain masks. - * - * Case 1: the batch buffer - * - * 1. Allocated - * 2. Written by CPU - * 3. Mapped to GTT - * 4. Read by GPU - * 5. Unmapped from GTT - * 6. Freed - * - * Let's take these a step at a time - * - * 1. Allocated - * Pages allocated from the kernel may still have - * cache contents, so we set them to (CPU, CPU) always. - * 2. Written by CPU (using pwrite) - * The pwrite function calls set_domain (CPU, CPU) and - * this function does nothing (as nothing changes) - * 3. Mapped by GTT - * This function asserts that the object is not - * currently in any GPU-based read or write domains - * 4. Read by GPU - * i915_gem_execbuffer calls set_domain (COMMAND, 0). - * As write_domain is zero, this function adds in the - * current read domains (CPU+COMMAND, 0). - * flush_domains is set to CPU. - * invalidate_domains is set to COMMAND - * clflush is run to get data out of the CPU caches - * then i915_dev_set_domain calls i915_gem_flush to - * emit an MI_FLUSH and drm_agp_chipset_flush - * 5. Unmapped from GTT - * i915_gem_object_unbind calls set_domain (CPU, CPU) - * flush_domains and invalidate_domains end up both zero - * so no flushing/invalidating happens - * 6. Freed - * yay, done - * - * Case 2: The shared render buffer - * - * 1. Allocated - * 2. Mapped to GTT - * 3. Read/written by GPU - * 4. set_domain to (CPU,CPU) - * 5. Read/written by CPU - * 6. Read/written by GPU - * - * 1. Allocated - * Same as last example, (CPU, CPU) - * 2. Mapped to GTT - * Nothing changes (assertions find that it is not in the GPU) - * 3. Read/written by GPU - * execbuffer calls set_domain (RENDER, RENDER) - * flush_domains gets CPU - * invalidate_domains gets GPU - * clflush (obj) - * MI_FLUSH and drm_agp_chipset_flush - * 4. set_domain (CPU, CPU) - * flush_domains gets GPU - * invalidate_domains gets CPU - * wait_rendering (obj) to make sure all drawing is complete. - * This will include an MI_FLUSH to get the data from GPU - * to memory - * clflush (obj) to invalidate the CPU cache - * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) - * 5. Read/written by CPU - * cache lines are loaded and dirtied - * 6. Read written by GPU - * Same as last GPU access - * - * Case 3: The constant buffer - * - * 1. Allocated - * 2. Written by CPU - * 3. Read by GPU - * 4. Updated (written) by CPU again - * 5. Read by GPU - * - * 1. Allocated - * (CPU, CPU) - * 2. Written by CPU - * (CPU, CPU) - * 3. Read by GPU - * (CPU+RENDER, 0) - * flush_domains = CPU - * invalidate_domains = RENDER - * clflush (obj) - * MI_FLUSH - * drm_agp_chipset_flush - * 4. Updated (written) by CPU again - * (CPU, CPU) - * flush_domains = 0 (no previous write domain) - * invalidate_domains = 0 (no new read domains) - * 5. Read by GPU - * (CPU+RENDER, 0) - * flush_domains = CPU - * invalidate_domains = RENDER - * clflush (obj) - * MI_FLUSH - * drm_agp_chipset_flush - */ -static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - struct intel_ring_buffer *ring) -{ - struct drm_device *dev = obj->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - uint32_t invalidate_domains = 0; - uint32_t flush_domains = 0; - uint32_t old_read_domains; - - intel_mark_busy(dev, obj); - - /* - * If the object isn't moving to a new write domain, - * let the object stay in multiple read domains - */ - if (obj->pending_write_domain == 0) - obj->pending_read_domains |= obj->read_domains; - else - obj_priv->dirty = 1; - - /* - * Flush the current write domain if - * the new read domains don't match. Invalidate - * any read domains which differ from the old - * write domain - */ - if (obj->write_domain && - (obj->write_domain != obj->pending_read_domains || - obj_priv->ring != ring)) { - flush_domains |= obj->write_domain; - invalidate_domains |= - obj->pending_read_domains & ~obj->write_domain; - } - /* - * Invalidate any read caches which may have - * stale data. That is, any new read domains. - */ - invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; - if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) - i915_gem_clflush_object(obj); - - old_read_domains = obj->read_domains; - - /* The actual obj->write_domain will be updated with - * pending_write_domain after we emit the accumulated flush for all - * of our domain changes in execbuffers (which clears objects' - * write_domains). So if we have a current write domain that we - * aren't changing, set pending_write_domain to that. - */ - if (flush_domains == 0 && obj->pending_write_domain == 0) - obj->pending_write_domain = obj->write_domain; - obj->read_domains = obj->pending_read_domains; - - dev->invalidate_domains |= invalidate_domains; - dev->flush_domains |= flush_domains; - if (flush_domains & I915_GEM_GPU_DOMAINS) - dev_priv->mm.flush_rings |= obj_priv->ring->id; - if (invalidate_domains & I915_GEM_GPU_DOMAINS) - dev_priv->mm.flush_rings |= ring->id; - - trace_i915_gem_object_change_domain(obj, - old_read_domains, - obj->write_domain); -} - /** * Moves the object from a partially CPU read to a full one. * @@ -3156,30 +3023,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). */ static void -i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) +i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - - if (!obj_priv->page_cpu_valid) + if (!obj->page_cpu_valid) return; /* If we're partially in the CPU read domain, finish moving it in. */ - if (obj->read_domains & I915_GEM_DOMAIN_CPU) { + if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { int i; - for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { - if (obj_priv->page_cpu_valid[i]) + for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { + if (obj->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->pages + i, 1); + drm_clflush_pages(obj->pages + i, 1); } } /* Free the page_cpu_valid mappings which are now stale, whether * or not we've got I915_GEM_DOMAIN_CPU. */ - kfree(obj_priv->page_cpu_valid); - obj_priv->page_cpu_valid = NULL; + kfree(obj->page_cpu_valid); + obj->page_cpu_valid = NULL; } /** @@ -3195,354 +3060,62 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) * flushes to occur. */ static int -i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, +i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; int i, ret; - if (offset == 0 && size == obj->size) + if (offset == 0 && size == obj->base.size) return i915_gem_object_set_to_cpu_domain(obj, 0); - ret = i915_gem_object_flush_gpu_write_domain(obj, false); - if (ret != 0) + i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) return ret; + i915_gem_object_flush_gtt_write_domain(obj); /* If we're already fully in the CPU read domain, we're done. */ - if (obj_priv->page_cpu_valid == NULL && - (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) + if (obj->page_cpu_valid == NULL && + (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) return 0; /* Otherwise, create/clear the per-page CPU read domain flag if we're * newly adding I915_GEM_DOMAIN_CPU */ - if (obj_priv->page_cpu_valid == NULL) { - obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, - GFP_KERNEL); - if (obj_priv->page_cpu_valid == NULL) + if (obj->page_cpu_valid == NULL) { + obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, + GFP_KERNEL); + if (obj->page_cpu_valid == NULL) return -ENOMEM; - } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) - memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); + } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) + memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); /* Flush the cache on any pages that are still invalid from the CPU's * perspective. */ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { - if (obj_priv->page_cpu_valid[i]) + if (obj->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->pages + i, 1); + drm_clflush_pages(obj->pages + i, 1); - obj_priv->page_cpu_valid[i] = 1; + obj->page_cpu_valid[i] = 1; } /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); - old_read_domains = obj->read_domains; - obj->read_domains |= I915_GEM_DOMAIN_CPU; + old_read_domains = obj->base.read_domains; + obj->base.read_domains |= I915_GEM_DOMAIN_CPU; trace_i915_gem_object_change_domain(obj, old_read_domains, - obj->write_domain); - - return 0; -} - -/** - * Pin an object to the GTT and evaluate the relocations landing in it. - */ -static int -i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry) -{ - struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_relocation_entry __user *user_relocs; - struct drm_gem_object *target_obj = NULL; - uint32_t target_handle = 0; - int i, ret = 0; - - user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; - for (i = 0; i < entry->relocation_count; i++) { - struct drm_i915_gem_relocation_entry reloc; - uint32_t target_offset; - - if (__copy_from_user_inatomic(&reloc, - user_relocs+i, - sizeof(reloc))) { - ret = -EFAULT; - break; - } - - if (reloc.target_handle != target_handle) { - drm_gem_object_unreference(target_obj); - - target_obj = drm_gem_object_lookup(dev, file_priv, - reloc.target_handle); - if (target_obj == NULL) { - ret = -ENOENT; - break; - } - - target_handle = reloc.target_handle; - } - target_offset = to_intel_bo(target_obj)->gtt_offset; - -#if WATCH_RELOC - DRM_INFO("%s: obj %p offset %08x target %d " - "read %08x write %08x gtt %08x " - "presumed %08x delta %08x\n", - __func__, - obj, - (int) reloc.offset, - (int) reloc.target_handle, - (int) reloc.read_domains, - (int) reloc.write_domain, - (int) target_offset, - (int) reloc.presumed_offset, - reloc.delta); -#endif - - /* The target buffer should have appeared before us in the - * exec_object list, so it should have a GTT space bound by now. - */ - if (target_offset == 0) { - DRM_ERROR("No GTT space found for object %d\n", - reloc.target_handle); - ret = -EINVAL; - break; - } - - /* Validate that the target is in a valid r/w GPU domain */ - if (reloc.write_domain & (reloc.write_domain - 1)) { - DRM_ERROR("reloc with multiple write domains: " - "obj %p target %d offset %d " - "read %08x write %08x", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.read_domains, - reloc.write_domain); - ret = -EINVAL; - break; - } - if (reloc.write_domain & I915_GEM_DOMAIN_CPU || - reloc.read_domains & I915_GEM_DOMAIN_CPU) { - DRM_ERROR("reloc with read/write CPU domains: " - "obj %p target %d offset %d " - "read %08x write %08x", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.read_domains, - reloc.write_domain); - ret = -EINVAL; - break; - } - if (reloc.write_domain && target_obj->pending_write_domain && - reloc.write_domain != target_obj->pending_write_domain) { - DRM_ERROR("Write domain conflict: " - "obj %p target %d offset %d " - "new %08x old %08x\n", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.write_domain, - target_obj->pending_write_domain); - ret = -EINVAL; - break; - } - - target_obj->pending_read_domains |= reloc.read_domains; - target_obj->pending_write_domain |= reloc.write_domain; - - /* If the relocation already has the right value in it, no - * more work needs to be done. - */ - if (target_offset == reloc.presumed_offset) - continue; - - /* Check that the relocation address is valid... */ - if (reloc.offset > obj->base.size - 4) { - DRM_ERROR("Relocation beyond object bounds: " - "obj %p target %d offset %d size %d.\n", - obj, reloc.target_handle, - (int) reloc.offset, (int) obj->base.size); - ret = -EINVAL; - break; - } - if (reloc.offset & 3) { - DRM_ERROR("Relocation not 4-byte aligned: " - "obj %p target %d offset %d.\n", - obj, reloc.target_handle, - (int) reloc.offset); - ret = -EINVAL; - break; - } - - /* and points to somewhere within the target object. */ - if (reloc.delta >= target_obj->size) { - DRM_ERROR("Relocation beyond target object bounds: " - "obj %p target %d delta %d size %d.\n", - obj, reloc.target_handle, - (int) reloc.delta, (int) target_obj->size); - ret = -EINVAL; - break; - } - - reloc.delta += target_offset; - if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { - uint32_t page_offset = reloc.offset & ~PAGE_MASK; - char *vaddr; - - vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); - *(uint32_t *)(vaddr + page_offset) = reloc.delta; - kunmap_atomic(vaddr); - } else { - uint32_t __iomem *reloc_entry; - void __iomem *reloc_page; - - ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); - if (ret) - break; - - /* Map the page containing the relocation we're going to perform. */ - reloc.offset += obj->gtt_offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - reloc.offset & PAGE_MASK); - reloc_entry = (uint32_t __iomem *) - (reloc_page + (reloc.offset & ~PAGE_MASK)); - iowrite32(reloc.delta, reloc_entry); - io_mapping_unmap_atomic(reloc_page); - } - - /* and update the user's relocation entry */ - reloc.presumed_offset = target_offset; - if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, - &reloc.presumed_offset, - sizeof(reloc.presumed_offset))) { - ret = -EFAULT; - break; - } - } - - drm_gem_object_unreference(target_obj); - return ret; -} - -static int -i915_gem_execbuffer_pin(struct drm_device *dev, - struct drm_file *file, - struct drm_gem_object **object_list, - struct drm_i915_gem_exec_object2 *exec_list, - int count) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret, i, retry; - - /* attempt to pin all of the buffers into the GTT */ - for (retry = 0; retry < 2; retry++) { - ret = 0; - for (i = 0; i < count; i++) { - struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; - struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); - bool need_fence = - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - - /* Check fence reg constraints and rebind if necessary */ - if (need_fence && - !i915_gem_object_fence_offset_ok(&obj->base, - obj->tiling_mode)) { - ret = i915_gem_object_unbind(&obj->base); - if (ret) - break; - } - - ret = i915_gem_object_pin(&obj->base, entry->alignment); - if (ret) - break; - - /* - * Pre-965 chips need a fence register set up in order - * to properly handle blits to/from tiled surfaces. - */ - if (need_fence) { - ret = i915_gem_object_get_fence_reg(&obj->base, true); - if (ret) { - i915_gem_object_unpin(&obj->base); - break; - } - - dev_priv->fence_regs[obj->fence_reg].gpu = true; - } - - entry->offset = obj->gtt_offset; - } - - while (i--) - i915_gem_object_unpin(object_list[i]); - - if (ret == 0) - break; - - if (ret != -ENOSPC || retry) - return ret; - - ret = i915_gem_evict_everything(dev); - if (ret) - return ret; - } - - return 0; -} - -static int -i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, - struct drm_file *file, - struct intel_ring_buffer *ring, - struct drm_gem_object **objects, - int count) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret, i; - - /* Zero the global flush/invalidate flags. These - * will be modified as new domains are computed - * for each object - */ - dev->invalidate_domains = 0; - dev->flush_domains = 0; - dev_priv->mm.flush_rings = 0; - for (i = 0; i < count; i++) - i915_gem_object_set_to_gpu_domain(objects[i], ring); - - if (dev->invalidate_domains | dev->flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - dev->invalidate_domains, - dev->flush_domains); -#endif - i915_gem_flush(dev, file, - dev->invalidate_domains, - dev->flush_domains, - dev_priv->mm.flush_rings); - } - - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); - /* XXX replace with semaphores */ - if (obj->ring && ring != obj->ring) { - ret = i915_gem_object_wait_rendering(&obj->base, true); - if (ret) - return ret; - } - } + obj->base.write_domain); return 0; } @@ -3582,586 +3155,129 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return 0; ret = 0; - if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { + if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { /* And wait for the seqno passing without holding any locks and * causing extra latency for others. This is safe as the irq * generation is designed to be run atomically and so is * lockless. */ - ring->user_irq_get(dev, ring); - ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(dev, ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); - ring->user_irq_put(dev, ring); - - if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) - ret = -EIO; - } - - if (ret == 0) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); - - return ret; -} - -static int -i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, - uint64_t exec_offset) -{ - uint32_t exec_start, exec_len; - - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - exec_len = (uint32_t) exec->batch_len; - - if ((exec_start | exec_len) & 0x7) - return -EINVAL; - - if (!exec_start) - return -EINVAL; - - return 0; -} - -static int -validate_exec_list(struct drm_i915_gem_exec_object2 *exec, - int count) -{ - int i; - - for (i = 0; i < count; i++) { - char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; - size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); - - if (!access_ok(VERIFY_READ, ptr, length)) - return -EFAULT; - - /* we may also need to update the presumed offsets */ - if (!access_ok(VERIFY_WRITE, ptr, length)) - return -EFAULT; - - if (fault_in_pages_readable(ptr, length)) - return -EFAULT; - } - - return 0; -} - -static int -i915_gem_do_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file, - struct drm_i915_gem_execbuffer2 *args, - struct drm_i915_gem_exec_object2 *exec_list) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object **object_list = NULL; - struct drm_gem_object *batch_obj; - struct drm_i915_gem_object *obj_priv; - struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_request *request = NULL; - int ret, i, flips; - uint64_t exec_offset; - - struct intel_ring_buffer *ring = NULL; - - ret = i915_gem_check_is_wedged(dev); - if (ret) - return ret; - - ret = validate_exec_list(exec_list, args->buffer_count); - if (ret) - return ret; - -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif - switch (args->flags & I915_EXEC_RING_MASK) { - case I915_EXEC_DEFAULT: - case I915_EXEC_RENDER: - ring = &dev_priv->render_ring; - break; - case I915_EXEC_BSD: - if (!HAS_BSD(dev)) { - DRM_ERROR("execbuf with invalid ring (BSD)\n"); - return -EINVAL; - } - ring = &dev_priv->bsd_ring; - break; - case I915_EXEC_BLT: - if (!HAS_BLT(dev)) { - DRM_ERROR("execbuf with invalid ring (BLT)\n"); - return -EINVAL; - } - ring = &dev_priv->blt_ring; - break; - default: - DRM_ERROR("execbuf with unknown ring: %d\n", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } - - if (args->buffer_count < 1) { - DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); - return -EINVAL; - } - object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); - if (object_list == NULL) { - DRM_ERROR("Failed to allocate object list for %d buffers\n", - args->buffer_count); - ret = -ENOMEM; - goto pre_mutex_err; - } - - if (args->num_cliprects != 0) { - cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), - GFP_KERNEL); - if (cliprects == NULL) { - ret = -ENOMEM; - goto pre_mutex_err; - } - - ret = copy_from_user(cliprects, - (struct drm_clip_rect __user *) - (uintptr_t) args->cliprects_ptr, - sizeof(*cliprects) * args->num_cliprects); - if (ret != 0) { - DRM_ERROR("copy %d cliprects failed: %d\n", - args->num_cliprects, ret); - ret = -EFAULT; - goto pre_mutex_err; - } - } - - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) { - ret = -ENOMEM; - goto pre_mutex_err; - } - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto pre_mutex_err; - - if (dev_priv->mm.suspended) { - mutex_unlock(&dev->struct_mutex); - ret = -EBUSY; - goto pre_mutex_err; - } - - /* Look up object handles */ - for (i = 0; i < args->buffer_count; i++) { - object_list[i] = drm_gem_object_lookup(dev, file, - exec_list[i].handle); - if (object_list[i] == NULL) { - DRM_ERROR("Invalid object handle %d at index %d\n", - exec_list[i].handle, i); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; - ret = -ENOENT; - goto err; - } - - obj_priv = to_intel_bo(object_list[i]); - if (obj_priv->in_execbuffer) { - DRM_ERROR("Object %p appears more than once in object list\n", - object_list[i]); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; - ret = -EINVAL; - goto err; - } - obj_priv->in_execbuffer = true; - } - - /* Move the objects en-masse into the GTT, evicting if necessary. */ - ret = i915_gem_execbuffer_pin(dev, file, - object_list, exec_list, - args->buffer_count); - if (ret) - goto err; - - /* The objects are in their final locations, apply the relocations. */ - for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); - obj->base.pending_read_domains = 0; - obj->base.pending_write_domain = 0; - ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); - if (ret) - goto err; - } - - /* Set the pending read domains for the batch buffer to COMMAND */ - batch_obj = object_list[args->buffer_count-1]; - if (batch_obj->pending_write_domain) { - DRM_ERROR("Attempting to use self-modifying batch buffer\n"); - ret = -EINVAL; - goto err; - } - batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - - /* Sanity check the batch buffer */ - exec_offset = to_intel_bo(batch_obj)->gtt_offset; - ret = i915_gem_check_execbuffer(args, exec_offset); - if (ret != 0) { - DRM_ERROR("execbuf with invalid offset/length\n"); - goto err; - } - - ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, - object_list, args->buffer_count); - if (ret) - goto err; - - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; - uint32_t old_write_domain = obj->write_domain; - obj->write_domain = obj->pending_write_domain; - trace_i915_gem_object_change_domain(obj, - obj->read_domains, - old_write_domain); - } - -#if WATCH_COHERENCY - for (i = 0; i < args->buffer_count; i++) { - i915_gem_object_check_coherency(object_list[i], - exec_list[i].handle); - } -#endif - -#if WATCH_EXEC - i915_gem_dump_object(batch_obj, - args->batch_len, - __func__, - ~0); -#endif - - /* Check for any pending flips. As we only maintain a flip queue depth - * of 1, we can simply insert a WAIT for the next display flip prior - * to executing the batch and avoid stalling the CPU. - */ - flips = 0; - for (i = 0; i < args->buffer_count; i++) { - if (object_list[i]->write_domain) - flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); - } - if (flips) { - int plane, flip_mask; - - for (plane = 0; flips >> plane; plane++) { - if (((flips >> plane) & 1) == 0) - continue; - - if (plane) - flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; - else - flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, - MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(dev, ring, MI_NOOP); - intel_ring_advance(dev, ring); - } - } - - /* Exec the batchbuffer */ - ret = ring->dispatch_gem_execbuffer(dev, ring, args, - cliprects, exec_offset); - if (ret) { - DRM_ERROR("dispatch failed %d\n", ret); - goto err; - } - - /* - * Ensure that the commands in the batch buffer are - * finished before the interrupt fires - */ - i915_retire_commands(dev, ring); - - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; - - i915_gem_object_move_to_active(obj, ring); - if (obj->write_domain) - list_move_tail(&to_intel_bo(obj)->gpu_write_list, - &ring->gpu_write_list); - } - - i915_add_request(dev, file, request, ring); - request = NULL; - -err: - for (i = 0; i < args->buffer_count; i++) { - if (object_list[i]) { - obj_priv = to_intel_bo(object_list[i]); - obj_priv->in_execbuffer = false; - } - drm_gem_object_unreference(object_list[i]); - } - - mutex_unlock(&dev->struct_mutex); - -pre_mutex_err: - drm_free_large(object_list); - kfree(cliprects); - kfree(request); - - return ret; -} - -/* - * Legacy execbuffer just creates an exec2 list from the original exec object - * list array and passes it to the real function. - */ -int -i915_gem_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_gem_execbuffer *args = data; - struct drm_i915_gem_execbuffer2 exec2; - struct drm_i915_gem_exec_object *exec_list = NULL; - struct drm_i915_gem_exec_object2 *exec2_list = NULL; - int ret, i; - -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif - - if (args->buffer_count < 1) { - DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); - return -EINVAL; - } - - /* Copy in the exec list from userland */ - exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); - exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); - if (exec_list == NULL || exec2_list == NULL) { - DRM_ERROR("Failed to allocate exec list for %d buffers\n", - args->buffer_count); - drm_free_large(exec_list); - drm_free_large(exec2_list); - return -ENOMEM; - } - ret = copy_from_user(exec_list, - (struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - sizeof(*exec_list) * args->buffer_count); - if (ret != 0) { - DRM_ERROR("copy %d exec entries failed %d\n", - args->buffer_count, ret); - drm_free_large(exec_list); - drm_free_large(exec2_list); - return -EFAULT; - } - - for (i = 0; i < args->buffer_count; i++) { - exec2_list[i].handle = exec_list[i].handle; - exec2_list[i].relocation_count = exec_list[i].relocation_count; - exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; - exec2_list[i].alignment = exec_list[i].alignment; - exec2_list[i].offset = exec_list[i].offset; - if (INTEL_INFO(dev)->gen < 4) - exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; - else - exec2_list[i].flags = 0; - } + if (ring->irq_get(ring)) { + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + ring->irq_put(ring); - exec2.buffers_ptr = args->buffers_ptr; - exec2.buffer_count = args->buffer_count; - exec2.batch_start_offset = args->batch_start_offset; - exec2.batch_len = args->batch_len; - exec2.DR1 = args->DR1; - exec2.DR4 = args->DR4; - exec2.num_cliprects = args->num_cliprects; - exec2.cliprects_ptr = args->cliprects_ptr; - exec2.flags = I915_EXEC_RENDER; - - ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); - if (!ret) { - /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) - exec_list[i].offset = exec2_list[i].offset; - /* ... and back out to userspace */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) + ret = -EIO; } } - drm_free_large(exec_list); - drm_free_large(exec2_list); - return ret; -} - -int -i915_gem_execbuffer2(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_gem_execbuffer2 *args = data; - struct drm_i915_gem_exec_object2 *exec2_list = NULL; - int ret; - -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif - - if (args->buffer_count < 1) { - DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); - return -EINVAL; - } - - exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); - if (exec2_list == NULL) { - DRM_ERROR("Failed to allocate exec list for %d buffers\n", - args->buffer_count); - return -ENOMEM; - } - ret = copy_from_user(exec2_list, - (struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - sizeof(*exec2_list) * args->buffer_count); - if (ret != 0) { - DRM_ERROR("copy %d exec entries failed %d\n", - args->buffer_count, ret); - drm_free_large(exec2_list); - return -EFAULT; - } - - ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); - if (!ret) { - /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec2_list, - sizeof(*exec2_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); - } - } + if (ret == 0) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); - drm_free_large(exec2_list); return ret; } int -i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) +i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, + bool map_and_fenceable) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; - BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); + BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); WARN_ON(i915_verify_lists(dev)); - if (obj_priv->gtt_space != NULL) { - if (alignment == 0) - alignment = i915_gem_get_gtt_alignment(obj); - if (obj_priv->gtt_offset & (alignment - 1)) { - WARN(obj_priv->pin_count, - "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n", - obj_priv->gtt_offset, alignment); + if (obj->gtt_space != NULL) { + if ((alignment && obj->gtt_offset & (alignment - 1)) || + (map_and_fenceable && !obj->map_and_fenceable)) { + WARN(obj->pin_count, + "bo is already pinned with incorrect alignment:" + " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," + " obj->map_and_fenceable=%d\n", + obj->gtt_offset, alignment, + map_and_fenceable, + obj->map_and_fenceable); ret = i915_gem_object_unbind(obj); if (ret) return ret; } } - if (obj_priv->gtt_space == NULL) { - ret = i915_gem_object_bind_to_gtt(obj, alignment); + if (obj->gtt_space == NULL) { + ret = i915_gem_object_bind_to_gtt(obj, alignment, + map_and_fenceable); if (ret) return ret; } - obj_priv->pin_count++; - - /* If the object is not active and not pending a flush, - * remove it from the inactive list - */ - if (obj_priv->pin_count == 1) { - i915_gem_info_add_pin(dev_priv, obj->size); - if (!obj_priv->active) - list_move_tail(&obj_priv->mm_list, + if (obj->pin_count++ == 0) { + if (!obj->active) + list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); } + obj->pin_mappable |= map_and_fenceable; WARN_ON(i915_verify_lists(dev)); return 0; } void -i915_gem_object_unpin(struct drm_gem_object *obj) +i915_gem_object_unpin(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); WARN_ON(i915_verify_lists(dev)); - obj_priv->pin_count--; - BUG_ON(obj_priv->pin_count < 0); - BUG_ON(obj_priv->gtt_space == NULL); + BUG_ON(obj->pin_count == 0); + BUG_ON(obj->gtt_space == NULL); - /* If the object is no longer pinned, and is - * neither active nor being flushed, then stick it on - * the inactive list - */ - if (obj_priv->pin_count == 0) { - if (!obj_priv->active) - list_move_tail(&obj_priv->mm_list, + if (--obj->pin_count == 0) { + if (!obj->active) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_remove_pin(dev_priv, obj->size); + obj->pin_mappable = false; } WARN_ON(i915_verify_lists(dev)); } int i915_gem_pin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pin *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); ret = -EINVAL; goto out; } - if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { + if (obj->pin_filp != NULL && obj->pin_filp != file) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; } - obj_priv->user_pin_count++; - obj_priv->pin_filp = file_priv; - if (obj_priv->user_pin_count == 1) { - ret = i915_gem_object_pin(obj, args->alignment); + obj->user_pin_count++; + obj->pin_filp = file; + if (obj->user_pin_count == 1) { + ret = i915_gem_object_pin(obj, args->alignment, true); if (ret) goto out; } @@ -4170,9 +3286,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, * as the X server doesn't manage domains yet */ i915_gem_object_flush_cpu_write_domain(obj); - args->offset = obj_priv->gtt_offset; + args->offset = obj->gtt_offset; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4180,38 +3296,36 @@ unlock: int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pin *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->pin_filp != file_priv) { + if (obj->pin_filp != file) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; } - obj_priv->user_pin_count--; - if (obj_priv->user_pin_count == 0) { - obj_priv->pin_filp = NULL; + obj->user_pin_count--; + if (obj->user_pin_count == 0) { + obj->pin_filp = NULL; i915_gem_object_unpin(obj); } out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4219,52 +3333,64 @@ unlock: int i915_gem_busy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_busy *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Count all active objects as busy, even if they are currently not used * by the gpu. Users of this interface expect objects to eventually * become non-busy without any further actions, therefore emit any * necessary flushes here. */ - args->busy = obj_priv->active; + args->busy = obj->active; if (args->busy) { /* Unconditionally flush objects, even when the gpu still uses this * object. Userspace calling this function indicates that it wants to * use this buffer rather sooner than later, so issuing the required * flush earlier is beneficial. */ - if (obj->write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(dev, file_priv, - obj_priv->ring, - 0, obj->write_domain); + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { + i915_gem_flush_ring(dev, obj->ring, + 0, obj->base.write_domain); + } else if (obj->ring->outstanding_lazy_request == + obj->last_rendering_seqno) { + struct drm_i915_gem_request *request; + + /* This ring is not being cleared by active usage, + * so emit a request to do so. + */ + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request) + ret = i915_add_request(dev, + NULL, request, + obj->ring); + else + ret = -ENOMEM; + } /* Update the active list for the hardware's current position. * Otherwise this only updates on a delayed timer or when irqs * are actually unmasked, and our working set ends up being * larger than required. */ - i915_gem_retire_requests_ring(dev, obj_priv->ring); + i915_gem_retire_requests_ring(dev, obj->ring); - args->busy = obj_priv->active; + args->busy = obj->active; } - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4282,8 +3408,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_madvise *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; switch (args->madv) { @@ -4298,37 +3423,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->pin_count) { + if (obj->pin_count) { ret = -EINVAL; goto out; } - if (obj_priv->madv != __I915_MADV_PURGED) - obj_priv->madv = args->madv; + if (obj->madv != __I915_MADV_PURGED) + obj->madv = args->madv; /* if the object is no longer bound, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj_priv) && - obj_priv->gtt_space == NULL) + if (i915_gem_object_is_purgeable(obj) && + obj->gtt_space == NULL) i915_gem_object_truncate(obj); - args->retained = obj_priv->madv != __I915_MADV_PURGED; + args->retained = obj->madv != __I915_MADV_PURGED; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; } -struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, - size_t size) +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; @@ -4351,11 +3475,15 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, obj->base.driver_private = NULL; obj->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->gtt_list); INIT_LIST_HEAD(&obj->ring_list); + INIT_LIST_HEAD(&obj->exec_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; + /* Avoid an unnecessary call to unbind on the first bind. */ + obj->map_and_fenceable = true; - return &obj->base; + return obj; } int i915_gem_init_object(struct drm_gem_object *obj) @@ -4365,42 +3493,41 @@ int i915_gem_init_object(struct drm_gem_object *obj) return 0; } -static void i915_gem_free_object_tail(struct drm_gem_object *obj) +static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; ret = i915_gem_object_unbind(obj); if (ret == -ERESTARTSYS) { - list_move(&obj_priv->mm_list, + list_move(&obj->mm_list, &dev_priv->mm.deferred_free_list); return; } - if (obj_priv->mmap_offset) + if (obj->base.map_list.map) i915_gem_free_mmap_offset(obj); - drm_gem_object_release(obj); - i915_gem_info_remove_obj(dev_priv, obj->size); + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(dev_priv, obj->base.size); - kfree(obj_priv->page_cpu_valid); - kfree(obj_priv->bit_17); - kfree(obj_priv); + kfree(obj->page_cpu_valid); + kfree(obj->bit_17); + kfree(obj); } -void i915_gem_free_object(struct drm_gem_object *obj) +void i915_gem_free_object(struct drm_gem_object *gem_obj) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + struct drm_device *dev = obj->base.dev; trace_i915_gem_object_destroy(obj); - while (obj_priv->pin_count > 0) + while (obj->pin_count > 0) i915_gem_object_unpin(obj); - if (obj_priv->phys_obj) + if (obj->phys_obj) i915_gem_detach_phys_object(dev, obj); i915_gem_free_object_tail(obj); @@ -4427,13 +3554,15 @@ i915_gem_idle(struct drm_device *dev) /* Under UMS, be paranoid and evict. */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_gem_evict_inactive(dev); + ret = i915_gem_evict_inactive(dev, false); if (ret) { mutex_unlock(&dev->struct_mutex); return ret; } } + i915_gem_reset_fences(dev); + /* Hack! Don't let anybody do execbuf while we don't control the chip. * We need to replace this with a semaphore, or something. * And not confound mm.suspended! @@ -4452,82 +3581,15 @@ i915_gem_idle(struct drm_device *dev) return 0; } -/* - * 965+ support PIPE_CONTROL commands, which provide finer grained control - * over cache flushing. - */ -static int -i915_gem_init_pipe_control(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - int ret; - - obj = i915_gem_alloc_object(dev, 4096); - if (obj == NULL) { - DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; - goto err; - } - obj_priv = to_intel_bo(obj); - obj_priv->agp_type = AGP_USER_CACHED_MEMORY; - - ret = i915_gem_object_pin(obj, 4096); - if (ret) - goto err_unref; - - dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; - dev_priv->seqno_page = kmap(obj_priv->pages[0]); - if (dev_priv->seqno_page == NULL) - goto err_unpin; - - dev_priv->seqno_obj = obj; - memset(dev_priv->seqno_page, 0, PAGE_SIZE); - - return 0; - -err_unpin: - i915_gem_object_unpin(obj); -err_unref: - drm_gem_object_unreference(obj); -err: - return ret; -} - - -static void -i915_gem_cleanup_pipe_control(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - - obj = dev_priv->seqno_obj; - obj_priv = to_intel_bo(obj); - kunmap(obj_priv->pages[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - dev_priv->seqno_obj = NULL; - - dev_priv->seqno_page = NULL; -} - int i915_gem_init_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; - if (HAS_PIPE_CONTROL(dev)) { - ret = i915_gem_init_pipe_control(dev); - if (ret) - return ret; - } - ret = intel_init_render_ring_buffer(dev); if (ret) - goto cleanup_pipe_control; + return ret; if (HAS_BSD(dev)) { ret = intel_init_bsd_ring_buffer(dev); @@ -4546,12 +3608,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) return 0; cleanup_bsd_ring: - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); cleanup_render_ring: - intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); -cleanup_pipe_control: - if (HAS_PIPE_CONTROL(dev)) - i915_gem_cleanup_pipe_control(dev); + intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); return ret; } @@ -4559,12 +3618,10 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; - intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); - intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); - if (HAS_PIPE_CONTROL(dev)) - i915_gem_cleanup_pipe_control(dev); + for (i = 0; i < I915_NUM_RINGS; i++) + intel_cleanup_ring_buffer(&dev_priv->ring[i]); } int @@ -4572,7 +3629,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; @@ -4592,14 +3649,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); - BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); - BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); - BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); - BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); - BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); + for (i = 0; i < I915_NUM_RINGS; i++) { + BUG_ON(!list_empty(&dev_priv->ring[i].active_list)); + BUG_ON(!list_empty(&dev_priv->ring[i].request_list)); + } mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -4661,17 +3716,14 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.pinned_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); - init_ring_lists(&dev_priv->render_ring); - init_ring_lists(&dev_priv->bsd_ring); - init_ring_lists(&dev_priv->blt_ring); + INIT_LIST_HEAD(&dev_priv->mm.gtt_list); + for (i = 0; i < I915_NUM_RINGS; i++) + init_ring_lists(&dev_priv->ring[i]); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); init_completion(&dev_priv->error_completion); - spin_lock(&shrink_list_lock); - list_add(&dev_priv->mm.shrink_list, &shrink_list); - spin_unlock(&shrink_list_lock); /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ if (IS_GEN3(dev)) { @@ -4683,6 +3735,8 @@ i915_gem_load(struct drm_device *dev) } } + dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; + /* Old X drivers will take 0-2 for front, back, depth buffers */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev_priv->fence_reg_start = 3; @@ -4714,6 +3768,10 @@ i915_gem_load(struct drm_device *dev) } i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); + + dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; + dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; + register_shrinker(&dev_priv->mm.inactive_shrinker); } /* @@ -4783,47 +3841,47 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) } void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv; + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; + char *vaddr; int i; - int ret; int page_count; - obj_priv = to_intel_bo(obj); - if (!obj_priv->phys_obj) + if (!obj->phys_obj) return; + vaddr = obj->phys_obj->handle->vaddr; - ret = i915_gem_object_get_pages(obj, 0); - if (ret) - goto out; - - page_count = obj->size / PAGE_SIZE; - + page_count = obj->base.size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - char *dst = kmap_atomic(obj_priv->pages[i]); - char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); - - memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(dst); + struct page *page = read_cache_page_gfp(mapping, i, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (!IS_ERR(page)) { + char *dst = kmap_atomic(page); + memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); + kunmap_atomic(dst); + + drm_clflush_pages(&page, 1); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + } } - drm_clflush_pages(obj_priv->pages, page_count); - drm_agp_chipset_flush(dev); + intel_gtt_chipset_flush(); - i915_gem_object_put_pages(obj); -out: - obj_priv->phys_obj->cur_obj = NULL; - obj_priv->phys_obj = NULL; + obj->phys_obj->cur_obj = NULL; + obj->phys_obj = NULL; } int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, int id, int align) { + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; int ret = 0; int page_count; int i; @@ -4831,10 +3889,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, if (id > I915_MAX_PHYS_OBJECT) return -EINVAL; - obj_priv = to_intel_bo(obj); - - if (obj_priv->phys_obj) { - if (obj_priv->phys_obj->id == id) + if (obj->phys_obj) { + if (obj->phys_obj->id == id) return 0; i915_gem_detach_phys_object(dev, obj); } @@ -4842,51 +3898,50 @@ i915_gem_attach_phys_object(struct drm_device *dev, /* create a new object */ if (!dev_priv->mm.phys_objs[id - 1]) { ret = i915_gem_init_phys_object(dev, id, - obj->size, align); + obj->base.size, align); if (ret) { - DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); - goto out; + DRM_ERROR("failed to init phys object %d size: %zu\n", + id, obj->base.size); + return ret; } } /* bind to the object */ - obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; - obj_priv->phys_obj->cur_obj = obj; - - ret = i915_gem_object_get_pages(obj, 0); - if (ret) { - DRM_ERROR("failed to get page list\n"); - goto out; - } + obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; + obj->phys_obj->cur_obj = obj; - page_count = obj->size / PAGE_SIZE; + page_count = obj->base.size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - char *src = kmap_atomic(obj_priv->pages[i]); - char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); + struct page *page; + char *dst, *src; + page = read_cache_page_gfp(mapping, i, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) + return PTR_ERR(page); + + src = kmap_atomic(page); + dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); kunmap_atomic(src); - } - i915_gem_object_put_pages(obj); + mark_page_accessed(page); + page_cache_release(page); + } return 0; -out: - return ret; } static int -i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; + void *vaddr = obj->phys_obj->handle->vaddr + args->offset; char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; - DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { unsigned long unwritten; @@ -4901,7 +3956,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, return -EFAULT; } - drm_agp_chipset_flush(dev); + intel_gtt_chipset_flush(); return 0; } @@ -4939,144 +3994,68 @@ i915_gpu_is_active(struct drm_device *dev) } static int -i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) -{ - drm_i915_private_t *dev_priv, *next_dev; - struct drm_i915_gem_object *obj_priv, *next_obj; - int cnt = 0; - int would_deadlock = 1; +i915_gem_inactive_shrink(struct shrinker *shrinker, + int nr_to_scan, + gfp_t gfp_mask) +{ + struct drm_i915_private *dev_priv = + container_of(shrinker, + struct drm_i915_private, + mm.inactive_shrinker); + struct drm_device *dev = dev_priv->dev; + struct drm_i915_gem_object *obj, *next; + int cnt; + + if (!mutex_trylock(&dev->struct_mutex)) + return 0; /* "fast-path" to count number of available objects */ if (nr_to_scan == 0) { - spin_lock(&shrink_list_lock); - list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { - struct drm_device *dev = dev_priv->dev; - - if (mutex_trylock(&dev->struct_mutex)) { - list_for_each_entry(obj_priv, - &dev_priv->mm.inactive_list, - mm_list) - cnt++; - mutex_unlock(&dev->struct_mutex); - } - } - spin_unlock(&shrink_list_lock); - - return (cnt / 100) * sysctl_vfs_cache_pressure; + cnt = 0; + list_for_each_entry(obj, + &dev_priv->mm.inactive_list, + mm_list) + cnt++; + mutex_unlock(&dev->struct_mutex); + return cnt / 100 * sysctl_vfs_cache_pressure; } - spin_lock(&shrink_list_lock); - rescan: /* first scan for clean buffers */ - list_for_each_entry_safe(dev_priv, next_dev, - &shrink_list, mm.shrink_list) { - struct drm_device *dev = dev_priv->dev; - - if (! mutex_trylock(&dev->struct_mutex)) - continue; - - spin_unlock(&shrink_list_lock); - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev); - list_for_each_entry_safe(obj_priv, next_obj, - &dev_priv->mm.inactive_list, - mm_list) { - if (i915_gem_object_is_purgeable(obj_priv)) { - i915_gem_object_unbind(&obj_priv->base); - if (--nr_to_scan <= 0) - break; - } + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if (i915_gem_object_is_purgeable(obj)) { + if (i915_gem_object_unbind(obj) == 0 && + --nr_to_scan == 0) + break; } - - spin_lock(&shrink_list_lock); - mutex_unlock(&dev->struct_mutex); - - would_deadlock = 0; - - if (nr_to_scan <= 0) - break; } /* second pass, evict/count anything still on the inactive list */ - list_for_each_entry_safe(dev_priv, next_dev, - &shrink_list, mm.shrink_list) { - struct drm_device *dev = dev_priv->dev; - - if (! mutex_trylock(&dev->struct_mutex)) - continue; - - spin_unlock(&shrink_list_lock); - - list_for_each_entry_safe(obj_priv, next_obj, - &dev_priv->mm.inactive_list, - mm_list) { - if (nr_to_scan > 0) { - i915_gem_object_unbind(&obj_priv->base); - nr_to_scan--; - } else - cnt++; - } - - spin_lock(&shrink_list_lock); - mutex_unlock(&dev->struct_mutex); - - would_deadlock = 0; + cnt = 0; + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if (nr_to_scan && + i915_gem_object_unbind(obj) == 0) + nr_to_scan--; + else + cnt++; } - if (nr_to_scan) { - int active = 0; - + if (nr_to_scan && i915_gpu_is_active(dev)) { /* * We are desperate for pages, so as a last resort, wait * for the GPU to finish and discard whatever we can. * This has a dramatic impact to reduce the number of * OOM-killer events whilst running the GPU aggressively. */ - list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { - struct drm_device *dev = dev_priv->dev; - - if (!mutex_trylock(&dev->struct_mutex)) - continue; - - spin_unlock(&shrink_list_lock); - - if (i915_gpu_is_active(dev)) { - i915_gpu_idle(dev); - active++; - } - - spin_lock(&shrink_list_lock); - mutex_unlock(&dev->struct_mutex); - } - - if (active) + if (i915_gpu_idle(dev) == 0) goto rescan; } - - spin_unlock(&shrink_list_lock); - - if (would_deadlock) - return -1; - else if (cnt > 0) - return (cnt / 100) * sysctl_vfs_cache_pressure; - else - return 0; -} - -static struct shrinker shrinker = { - .shrink = i915_gem_shrink, - .seeks = DEFAULT_SEEKS, -}; - -__init void -i915_gem_shrinker_init(void) -{ - register_shrinker(&shrinker); -} - -__exit void -i915_gem_shrinker_exit(void) -{ - unregister_shrinker(&shrinker); + mutex_unlock(&dev->struct_mutex); + return cnt / 100 * sysctl_vfs_cache_pressure; } diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 48644b840a8d..29d014c48ca2 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, } void -i915_gem_dump_object(struct drm_gem_object *obj, int len, +i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page; - DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); + DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { int page_len, chunk, chunk_len; @@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, chunk_len = page_len - chunk; if (chunk_len > 128) chunk_len = 128; - i915_gem_dump_page(obj_priv->pages[page], + i915_gem_dump_page(obj->pages[page], chunk, chunk + chunk_len, - obj_priv->gtt_offset + + obj->gtt_offset + page * PAGE_SIZE, mark); } @@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, #if WATCH_COHERENCY void -i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) +i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_device *dev = obj->base.dev; int page; uint32_t *gtt_mapping; uint32_t *backing_map = NULL; int bad_count = 0; DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", - __func__, obj, obj_priv->gtt_offset, handle, + __func__, obj, obj->gtt_offset, handle, obj->size / 1024); - gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, - obj->size); + gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); if (gtt_mapping == NULL) { DRM_ERROR("failed to map GTT space\n"); return; @@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) for (page = 0; page < obj->size / PAGE_SIZE; page++) { int i; - backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); + backing_map = kmap_atomic(obj->pages[page], KM_USER0); if (backing_map == NULL) { DRM_ERROR("failed to map backing page\n"); @@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) if (cpuval != gttval) { DRM_INFO("incoherent CPU vs GPU at 0x%08x: " "0x%08x vs 0x%08x\n", - (int)(obj_priv->gtt_offset + + (int)(obj->gtt_offset + page * PAGE_SIZE + i * 4), cpuval, gttval); if (bad_count++ >= 8) { diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index d8ae7d1d0cc6..78b8cf90c922 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -32,28 +32,36 @@ #include "i915_drm.h" static bool -mark_free(struct drm_i915_gem_object *obj_priv, - struct list_head *unwind) +mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) { - list_add(&obj_priv->evict_list, unwind); - drm_gem_object_reference(&obj_priv->base); - return drm_mm_scan_add_block(obj_priv->gtt_space); + list_add(&obj->exec_list, unwind); + drm_gem_object_reference(&obj->base); + return drm_mm_scan_add_block(obj->gtt_space); } int -i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) +i915_gem_evict_something(struct drm_device *dev, int min_size, + unsigned alignment, bool mappable) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret = 0; i915_gem_retire_requests(dev); /* Re-check for free space after retiring requests */ - if (drm_mm_search_free(&dev_priv->mm.gtt_space, - min_size, alignment, 0)) - return 0; + if (mappable) { + if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, + min_size, alignment, 0, + dev_priv->mm.gtt_mappable_end, + 0)) + return 0; + } else { + if (drm_mm_search_free(&dev_priv->mm.gtt_space, + min_size, alignment, 0)) + return 0; + } /* * The goal is to evict objects and amalgamate space in LRU order. @@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen */ INIT_LIST_HEAD(&unwind_list); - drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); + if (mappable) + drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size, + alignment, 0, + dev_priv->mm.gtt_mappable_end); + else + drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { - if (mark_free(obj_priv, &unwind_list)) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { + if (mark_free(obj, &unwind_list)) goto found; } /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { /* Does the object require an outstanding flush? */ - if (obj_priv->base.write_domain || obj_priv->pin_count) + if (obj->base.write_domain || obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } /* Finally add anything with a pending flush (in order of retirement) */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { - if (obj_priv->pin_count) + list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { + if (obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - if (! obj_priv->base.write_domain || obj_priv->pin_count) + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (! obj->base.write_domain || obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } /* Nothing found, clean up and bail out! */ - list_for_each_entry(obj_priv, &unwind_list, evict_list) { - ret = drm_mm_scan_remove_block(obj_priv->gtt_space); + list_for_each_entry(obj, &unwind_list, exec_list) { + ret = drm_mm_scan_remove_block(obj->gtt_space); BUG_ON(ret); - drm_gem_object_unreference(&obj_priv->base); + drm_gem_object_unreference(&obj->base); } /* We expect the caller to unpin, evict all and try again, or give up. @@ -131,33 +144,33 @@ found: * temporary list. */ INIT_LIST_HEAD(&eviction_list); while (!list_empty(&unwind_list)) { - obj_priv = list_first_entry(&unwind_list, - struct drm_i915_gem_object, - evict_list); - if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { - list_move(&obj_priv->evict_list, &eviction_list); + obj = list_first_entry(&unwind_list, + struct drm_i915_gem_object, + exec_list); + if (drm_mm_scan_remove_block(obj->gtt_space)) { + list_move(&obj->exec_list, &eviction_list); continue; } - list_del(&obj_priv->evict_list); - drm_gem_object_unreference(&obj_priv->base); + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); } /* Unbinding will emit any required flushes */ while (!list_empty(&eviction_list)) { - obj_priv = list_first_entry(&eviction_list, - struct drm_i915_gem_object, - evict_list); + obj = list_first_entry(&eviction_list, + struct drm_i915_gem_object, + exec_list); if (ret == 0) - ret = i915_gem_object_unbind(&obj_priv->base); - list_del(&obj_priv->evict_list); - drm_gem_object_unreference(&obj_priv->base); + ret = i915_gem_object_unbind(obj); + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); } return ret; } int -i915_gem_evict_everything(struct drm_device *dev) +i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; @@ -176,36 +189,22 @@ i915_gem_evict_everything(struct drm_device *dev) BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); - ret = i915_gem_evict_inactive(dev); - if (ret) - return ret; - - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->mm.active_list)); - BUG_ON(!lists_empty); - - return 0; + return i915_gem_evict_inactive(dev, purgeable_only); } /** Unbinds all inactive objects. */ int -i915_gem_evict_inactive(struct drm_device *dev) +i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) { drm_i915_private_t *dev_priv = dev->dev_private; - - while (!list_empty(&dev_priv->mm.inactive_list)) { - struct drm_gem_object *obj; - int ret; - - obj = &list_first_entry(&dev_priv->mm.inactive_list, - struct drm_i915_gem_object, - mm_list)->base; - - ret = i915_gem_object_unbind(obj); - if (ret != 0) { - DRM_ERROR("Error unbinding object: %d\n", ret); - return ret; + struct drm_i915_gem_object *obj, *next; + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, mm_list) { + if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { + int ret = i915_gem_object_unbind(obj); + if (ret) + return ret; } } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c new file mode 100644 index 000000000000..61129e6759eb --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -0,0 +1,1343 @@ +/* + * Copyright © 2008,2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * Chris Wilson <chris@chris-wilson.co.uk> + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" + +struct change_domains { + uint32_t invalidate_domains; + uint32_t flush_domains; + uint32_t flush_rings; +}; + +/* + * Set the next domain for the specified object. This + * may not actually perform the necessary flushing/invaliding though, + * as that may want to be batched with other set_domain operations + * + * This is (we hope) the only really tricky part of gem. The goal + * is fairly simple -- track which caches hold bits of the object + * and make sure they remain coherent. A few concrete examples may + * help to explain how it works. For shorthand, we use the notation + * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the + * a pair of read and write domain masks. + * + * Case 1: the batch buffer + * + * 1. Allocated + * 2. Written by CPU + * 3. Mapped to GTT + * 4. Read by GPU + * 5. Unmapped from GTT + * 6. Freed + * + * Let's take these a step at a time + * + * 1. Allocated + * Pages allocated from the kernel may still have + * cache contents, so we set them to (CPU, CPU) always. + * 2. Written by CPU (using pwrite) + * The pwrite function calls set_domain (CPU, CPU) and + * this function does nothing (as nothing changes) + * 3. Mapped by GTT + * This function asserts that the object is not + * currently in any GPU-based read or write domains + * 4. Read by GPU + * i915_gem_execbuffer calls set_domain (COMMAND, 0). + * As write_domain is zero, this function adds in the + * current read domains (CPU+COMMAND, 0). + * flush_domains is set to CPU. + * invalidate_domains is set to COMMAND + * clflush is run to get data out of the CPU caches + * then i915_dev_set_domain calls i915_gem_flush to + * emit an MI_FLUSH and drm_agp_chipset_flush + * 5. Unmapped from GTT + * i915_gem_object_unbind calls set_domain (CPU, CPU) + * flush_domains and invalidate_domains end up both zero + * so no flushing/invalidating happens + * 6. Freed + * yay, done + * + * Case 2: The shared render buffer + * + * 1. Allocated + * 2. Mapped to GTT + * 3. Read/written by GPU + * 4. set_domain to (CPU,CPU) + * 5. Read/written by CPU + * 6. Read/written by GPU + * + * 1. Allocated + * Same as last example, (CPU, CPU) + * 2. Mapped to GTT + * Nothing changes (assertions find that it is not in the GPU) + * 3. Read/written by GPU + * execbuffer calls set_domain (RENDER, RENDER) + * flush_domains gets CPU + * invalidate_domains gets GPU + * clflush (obj) + * MI_FLUSH and drm_agp_chipset_flush + * 4. set_domain (CPU, CPU) + * flush_domains gets GPU + * invalidate_domains gets CPU + * wait_rendering (obj) to make sure all drawing is complete. + * This will include an MI_FLUSH to get the data from GPU + * to memory + * clflush (obj) to invalidate the CPU cache + * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) + * 5. Read/written by CPU + * cache lines are loaded and dirtied + * 6. Read written by GPU + * Same as last GPU access + * + * Case 3: The constant buffer + * + * 1. Allocated + * 2. Written by CPU + * 3. Read by GPU + * 4. Updated (written) by CPU again + * 5. Read by GPU + * + * 1. Allocated + * (CPU, CPU) + * 2. Written by CPU + * (CPU, CPU) + * 3. Read by GPU + * (CPU+RENDER, 0) + * flush_domains = CPU + * invalidate_domains = RENDER + * clflush (obj) + * MI_FLUSH + * drm_agp_chipset_flush + * 4. Updated (written) by CPU again + * (CPU, CPU) + * flush_domains = 0 (no previous write domain) + * invalidate_domains = 0 (no new read domains) + * 5. Read by GPU + * (CPU+RENDER, 0) + * flush_domains = CPU + * invalidate_domains = RENDER + * clflush (obj) + * MI_FLUSH + * drm_agp_chipset_flush + */ +static void +i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring, + struct change_domains *cd) +{ + uint32_t invalidate_domains = 0, flush_domains = 0; + + /* + * If the object isn't moving to a new write domain, + * let the object stay in multiple read domains + */ + if (obj->base.pending_write_domain == 0) + obj->base.pending_read_domains |= obj->base.read_domains; + + /* + * Flush the current write domain if + * the new read domains don't match. Invalidate + * any read domains which differ from the old + * write domain + */ + if (obj->base.write_domain && + (((obj->base.write_domain != obj->base.pending_read_domains || + obj->ring != ring)) || + (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { + flush_domains |= obj->base.write_domain; + invalidate_domains |= + obj->base.pending_read_domains & ~obj->base.write_domain; + } + /* + * Invalidate any read caches which may have + * stale data. That is, any new read domains. + */ + invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) + i915_gem_clflush_object(obj); + + /* blow away mappings if mapped through GTT */ + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) + i915_gem_release_mmap(obj); + + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all + * of our domain changes in execbuffers (which clears objects' + * write_domains). So if we have a current write domain that we + * aren't changing, set pending_write_domain to that. + */ + if (flush_domains == 0 && obj->base.pending_write_domain == 0) + obj->base.pending_write_domain = obj->base.write_domain; + + cd->invalidate_domains |= invalidate_domains; + cd->flush_domains |= flush_domains; + if (flush_domains & I915_GEM_GPU_DOMAINS) + cd->flush_rings |= obj->ring->id; + if (invalidate_domains & I915_GEM_GPU_DOMAINS) + cd->flush_rings |= ring->id; +} + +struct eb_objects { + int and; + struct hlist_head buckets[0]; +}; + +static struct eb_objects * +eb_create(int size) +{ + struct eb_objects *eb; + int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; + while (count > size) + count >>= 1; + eb = kzalloc(count*sizeof(struct hlist_head) + + sizeof(struct eb_objects), + GFP_KERNEL); + if (eb == NULL) + return eb; + + eb->and = count - 1; + return eb; +} + +static void +eb_reset(struct eb_objects *eb) +{ + memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); +} + +static void +eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) +{ + hlist_add_head(&obj->exec_node, + &eb->buckets[obj->exec_handle & eb->and]); +} + +static struct drm_i915_gem_object * +eb_get_object(struct eb_objects *eb, unsigned long handle) +{ + struct hlist_head *head; + struct hlist_node *node; + struct drm_i915_gem_object *obj; + + head = &eb->buckets[handle & eb->and]; + hlist_for_each(node, head) { + obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); + if (obj->exec_handle == handle) + return obj; + } + + return NULL; +} + +static void +eb_destroy(struct eb_objects *eb) +{ + kfree(eb); +} + +static int +i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, + struct eb_objects *eb, + struct drm_i915_gem_exec_object2 *entry, + struct drm_i915_gem_relocation_entry *reloc) +{ + struct drm_device *dev = obj->base.dev; + struct drm_gem_object *target_obj; + uint32_t target_offset; + int ret = -EINVAL; + + /* we've already hold a reference to all valid objects */ + target_obj = &eb_get_object(eb, reloc->target_handle)->base; + if (unlikely(target_obj == NULL)) + return -ENOENT; + + target_offset = to_intel_bo(target_obj)->gtt_offset; + +#if WATCH_RELOC + DRM_INFO("%s: obj %p offset %08x target %d " + "read %08x write %08x gtt %08x " + "presumed %08x delta %08x\n", + __func__, + obj, + (int) reloc->offset, + (int) reloc->target_handle, + (int) reloc->read_domains, + (int) reloc->write_domain, + (int) target_offset, + (int) reloc->presumed_offset, + reloc->delta); +#endif + + /* The target buffer should have appeared before us in the + * exec_object list, so it should have a GTT space bound by now. + */ + if (unlikely(target_offset == 0)) { + DRM_ERROR("No GTT space found for object %d\n", + reloc->target_handle); + return ret; + } + + /* Validate that the target is in a valid r/w GPU domain */ + if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { + DRM_ERROR("reloc with multiple write domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + return ret; + } + if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { + DRM_ERROR("reloc with read/write CPU domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + return ret; + } + if (unlikely(reloc->write_domain && target_obj->pending_write_domain && + reloc->write_domain != target_obj->pending_write_domain)) { + DRM_ERROR("Write domain conflict: " + "obj %p target %d offset %d " + "new %08x old %08x\n", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->write_domain, + target_obj->pending_write_domain); + return ret; + } + + target_obj->pending_read_domains |= reloc->read_domains; + target_obj->pending_write_domain |= reloc->write_domain; + + /* If the relocation already has the right value in it, no + * more work needs to be done. + */ + if (target_offset == reloc->presumed_offset) + return 0; + + /* Check that the relocation address is valid... */ + if (unlikely(reloc->offset > obj->base.size - 4)) { + DRM_ERROR("Relocation beyond object bounds: " + "obj %p target %d offset %d size %d.\n", + obj, reloc->target_handle, + (int) reloc->offset, + (int) obj->base.size); + return ret; + } + if (unlikely(reloc->offset & 3)) { + DRM_ERROR("Relocation not 4-byte aligned: " + "obj %p target %d offset %d.\n", + obj, reloc->target_handle, + (int) reloc->offset); + return ret; + } + + /* and points to somewhere within the target object. */ + if (unlikely(reloc->delta >= target_obj->size)) { + DRM_ERROR("Relocation beyond target object bounds: " + "obj %p target %d delta %d size %d.\n", + obj, reloc->target_handle, + (int) reloc->delta, + (int) target_obj->size); + return ret; + } + + reloc->delta += target_offset; + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { + uint32_t page_offset = reloc->offset & ~PAGE_MASK; + char *vaddr; + + vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); + *(uint32_t *)(vaddr + page_offset) = reloc->delta; + kunmap_atomic(vaddr); + } else { + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + return ret; + + /* Map the page containing the relocation we're going to perform. */ + reloc->offset += obj->gtt_offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + reloc->offset & PAGE_MASK); + reloc_entry = (uint32_t __iomem *) + (reloc_page + (reloc->offset & ~PAGE_MASK)); + iowrite32(reloc->delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page); + } + + /* and update the user's relocation entry */ + reloc->presumed_offset = target_offset; + + return 0; +} + +static int +i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, + struct eb_objects *eb, + struct drm_i915_gem_exec_object2 *entry) +{ + struct drm_i915_gem_relocation_entry __user *user_relocs; + int i, ret; + + user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; + for (i = 0; i < entry->relocation_count; i++) { + struct drm_i915_gem_relocation_entry reloc; + + if (__copy_from_user_inatomic(&reloc, + user_relocs+i, + sizeof(reloc))) + return -EFAULT; + + ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc); + if (ret) + return ret; + + if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, + &reloc.presumed_offset, + sizeof(reloc.presumed_offset))) + return -EFAULT; + } + + return 0; +} + +static int +i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, + struct eb_objects *eb, + struct drm_i915_gem_exec_object2 *entry, + struct drm_i915_gem_relocation_entry *relocs) +{ + int i, ret; + + for (i = 0; i < entry->relocation_count; i++) { + ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]); + if (ret) + return ret; + } + + return 0; +} + +static int +i915_gem_execbuffer_relocate(struct drm_device *dev, + struct eb_objects *eb, + struct list_head *objects, + struct drm_i915_gem_exec_object2 *exec) +{ + struct drm_i915_gem_object *obj; + int ret; + + list_for_each_entry(obj, objects, exec_list) { + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++); + if (ret) + return ret; + } + + return 0; +} + +static int +i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, + struct drm_file *file, + struct list_head *objects, + struct drm_i915_gem_exec_object2 *exec) +{ + struct drm_i915_gem_object *obj; + struct drm_i915_gem_exec_object2 *entry; + int ret, retry; + bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; + + /* Attempt to pin all of the buffers into the GTT. + * This is done in 3 phases: + * + * 1a. Unbind all objects that do not match the GTT constraints for + * the execbuffer (fenceable, mappable, alignment etc). + * 1b. Increment pin count for already bound objects. + * 2. Bind new objects. + * 3. Decrement pin count. + * + * This avoid unnecessary unbinding of later objects in order to makr + * room for the earlier objects *unless* we need to defragment. + */ + retry = 0; + do { + ret = 0; + + /* Unbind any ill-fitting objects or pin. */ + entry = exec; + list_for_each_entry(obj, objects, exec_list) { + bool need_fence, need_mappable; + + if (!obj->gtt_space) { + entry++; + continue; + } + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = + entry->relocation_count ? true : need_fence; + + if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || + (need_mappable && !obj->map_and_fenceable)) + ret = i915_gem_object_unbind(obj); + else + ret = i915_gem_object_pin(obj, + entry->alignment, + need_mappable); + if (ret) + goto err; + + entry++; + } + + /* Bind fresh objects */ + entry = exec; + list_for_each_entry(obj, objects, exec_list) { + bool need_fence; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + + if (!obj->gtt_space) { + bool need_mappable = + entry->relocation_count ? true : need_fence; + + ret = i915_gem_object_pin(obj, + entry->alignment, + need_mappable); + if (ret) + break; + } + + if (has_fenced_gpu_access) { + if (need_fence) { + ret = i915_gem_object_get_fence(obj, ring, 1); + if (ret) + break; + } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode == I915_TILING_NONE) { + /* XXX pipelined! */ + ret = i915_gem_object_put_fence(obj); + if (ret) + break; + } + obj->pending_fenced_gpu_access = need_fence; + } + + entry->offset = obj->gtt_offset; + entry++; + } + + /* Decrement pin count for bound objects */ + list_for_each_entry(obj, objects, exec_list) { + if (obj->gtt_space) + i915_gem_object_unpin(obj); + } + + if (ret != -ENOSPC || retry > 1) + return ret; + + /* First attempt, just clear anything that is purgeable. + * Second attempt, clear the entire GTT. + */ + ret = i915_gem_evict_everything(ring->dev, retry == 0); + if (ret) + return ret; + + retry++; + } while (1); + +err: + obj = list_entry(obj->exec_list.prev, + struct drm_i915_gem_object, + exec_list); + while (objects != &obj->exec_list) { + if (obj->gtt_space) + i915_gem_object_unpin(obj); + + obj = list_entry(obj->exec_list.prev, + struct drm_i915_gem_object, + exec_list); + } + + return ret; +} + +static int +i915_gem_execbuffer_relocate_slow(struct drm_device *dev, + struct drm_file *file, + struct intel_ring_buffer *ring, + struct list_head *objects, + struct eb_objects *eb, + struct drm_i915_gem_exec_object2 *exec, + int count) +{ + struct drm_i915_gem_relocation_entry *reloc; + struct drm_i915_gem_object *obj; + int i, total, ret; + + /* We may process another execbuffer during the unlock... */ + while (list_empty(objects)) { + obj = list_first_entry(objects, + struct drm_i915_gem_object, + exec_list); + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); + } + + mutex_unlock(&dev->struct_mutex); + + total = 0; + for (i = 0; i < count; i++) + total += exec[i].relocation_count; + + reloc = drm_malloc_ab(total, sizeof(*reloc)); + if (reloc == NULL) { + mutex_lock(&dev->struct_mutex); + return -ENOMEM; + } + + total = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + + user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; + + if (copy_from_user(reloc+total, user_relocs, + exec[i].relocation_count * sizeof(*reloc))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + + total += exec[i].relocation_count; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + mutex_lock(&dev->struct_mutex); + goto err; + } + + /* reacquire the objects */ + INIT_LIST_HEAD(objects); + eb_reset(eb); + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, + exec[i].handle)); + if (obj == NULL) { + DRM_ERROR("Invalid object handle %d at index %d\n", + exec[i].handle, i); + ret = -ENOENT; + goto err; + } + + list_add_tail(&obj->exec_list, objects); + obj->exec_handle = exec[i].handle; + eb_add_object(eb, obj); + } + + ret = i915_gem_execbuffer_reserve(ring, file, objects, exec); + if (ret) + goto err; + + total = 0; + list_for_each_entry(obj, objects, exec_list) { + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, + exec, + reloc + total); + if (ret) + goto err; + + total += exec->relocation_count; + exec++; + } + + /* Leave the user relocations as are, this is the painfully slow path, + * and we want to avoid the complication of dropping the lock whilst + * having buffers reserved in the aperture and so causing spurious + * ENOSPC for random operations. + */ + +err: + drm_free_large(reloc); + return ret; +} + +static void +i915_gem_execbuffer_flush(struct drm_device *dev, + uint32_t invalidate_domains, + uint32_t flush_domains, + uint32_t flush_rings) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + + if (flush_domains & I915_GEM_DOMAIN_CPU) + intel_gtt_chipset_flush(); + + if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { + for (i = 0; i < I915_NUM_RINGS; i++) + if (flush_rings & (1 << i)) + i915_gem_flush_ring(dev, &dev_priv->ring[i], + invalidate_domains, + flush_domains); + } +} + +static int +i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to) +{ + struct intel_ring_buffer *from = obj->ring; + u32 seqno; + int ret, idx; + + if (from == NULL || to == from) + return 0; + + if (INTEL_INFO(obj->base.dev)->gen < 6) + return i915_gem_object_wait_rendering(obj, true); + + idx = intel_ring_sync_index(from, to); + + seqno = obj->last_rendering_seqno; + if (seqno <= from->sync_seqno[idx]) + return 0; + + if (seqno == from->outstanding_lazy_request) { + struct drm_i915_gem_request *request; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + + ret = i915_add_request(obj->base.dev, NULL, request, from); + if (ret) { + kfree(request); + return ret; + } + + seqno = request->seqno; + } + + from->sync_seqno[idx] = seqno; + return intel_ring_sync(to, from, seqno - 1); +} + +static int +i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, + struct list_head *objects) +{ + struct drm_i915_gem_object *obj; + struct change_domains cd; + int ret; + + cd.invalidate_domains = 0; + cd.flush_domains = 0; + cd.flush_rings = 0; + list_for_each_entry(obj, objects, exec_list) + i915_gem_object_set_to_gpu_domain(obj, ring, &cd); + + if (cd.invalidate_domains | cd.flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + cd.invalidate_domains, + cd.flush_domains); +#endif + i915_gem_execbuffer_flush(ring->dev, + cd.invalidate_domains, + cd.flush_domains, + cd.flush_rings); + } + + list_for_each_entry(obj, objects, exec_list) { + ret = i915_gem_execbuffer_sync_rings(obj, ring); + if (ret) + return ret; + } + + return 0; +} + +static bool +i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) +{ + return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; +} + +static int +validate_exec_list(struct drm_i915_gem_exec_object2 *exec, + int count) +{ + int i; + + for (i = 0; i < count; i++) { + char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; + int length; /* limited by fault_in_pages_readable() */ + + /* First check for malicious input causing overflow */ + if (exec[i].relocation_count > + INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) + return -EINVAL; + + length = exec[i].relocation_count * + sizeof(struct drm_i915_gem_relocation_entry); + if (!access_ok(VERIFY_READ, ptr, length)) + return -EFAULT; + + /* we may also need to update the presumed offsets */ + if (!access_ok(VERIFY_WRITE, ptr, length)) + return -EFAULT; + + if (fault_in_pages_readable(ptr, length)) + return -EFAULT; + } + + return 0; +} + +static int +i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, + struct list_head *objects) +{ + struct drm_i915_gem_object *obj; + int flips; + + /* Check for any pending flips. As we only maintain a flip queue depth + * of 1, we can simply insert a WAIT for the next display flip prior + * to executing the batch and avoid stalling the CPU. + */ + flips = 0; + list_for_each_entry(obj, objects, exec_list) { + if (obj->base.write_domain) + flips |= atomic_read(&obj->pending_flip); + } + if (flips) { + int plane, flip_mask, ret; + + for (plane = 0; flips >> plane; plane++) { + if (((flips >> plane) & 1) == 0) + continue; + + if (plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } + } + + return 0; +} + +static void +i915_gem_execbuffer_move_to_active(struct list_head *objects, + struct intel_ring_buffer *ring, + u32 seqno) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, objects, exec_list) { + obj->base.read_domains = obj->base.pending_read_domains; + obj->base.write_domain = obj->base.pending_write_domain; + obj->fenced_gpu_access = obj->pending_fenced_gpu_access; + + i915_gem_object_move_to_active(obj, ring, seqno); + if (obj->base.write_domain) { + obj->dirty = 1; + obj->pending_gpu_write = true; + list_move_tail(&obj->gpu_write_list, + &ring->gpu_write_list); + intel_mark_busy(ring->dev, obj); + } + + trace_i915_gem_object_change_domain(obj, + obj->base.read_domains, + obj->base.write_domain); + } +} + +static void +i915_gem_execbuffer_retire_commands(struct drm_device *dev, + struct drm_file *file, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_request *request; + u32 flush_domains; + + /* + * Ensure that the commands in the batch buffer are + * finished before the interrupt fires. + * + * The sampler always gets flushed on i965 (sigh). + */ + flush_domains = 0; + if (INTEL_INFO(dev)->gen >= 4) + flush_domains |= I915_GEM_DOMAIN_SAMPLER; + + ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); + + /* Add a breadcrumb for the completion of the batch buffer */ + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL || i915_add_request(dev, file, request, ring)) { + i915_gem_next_request_seqno(dev, ring); + kfree(request); + } +} + +static int +i915_gem_do_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args, + struct drm_i915_gem_exec_object2 *exec) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct list_head objects; + struct eb_objects *eb; + struct drm_i915_gem_object *batch_obj; + struct drm_clip_rect *cliprects = NULL; + struct intel_ring_buffer *ring; + u32 exec_start, exec_len; + u32 seqno; + int ret, mode, i; + + if (!i915_gem_check_execbuffer(args)) { + DRM_ERROR("execbuf with invalid offset/length\n"); + return -EINVAL; + } + + ret = validate_exec_list(exec, args->buffer_count); + if (ret) + return ret; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + switch (args->flags & I915_EXEC_RING_MASK) { + case I915_EXEC_DEFAULT: + case I915_EXEC_RENDER: + ring = &dev_priv->ring[RCS]; + break; + case I915_EXEC_BSD: + if (!HAS_BSD(dev)) { + DRM_ERROR("execbuf with invalid ring (BSD)\n"); + return -EINVAL; + } + ring = &dev_priv->ring[VCS]; + break; + case I915_EXEC_BLT: + if (!HAS_BLT(dev)) { + DRM_ERROR("execbuf with invalid ring (BLT)\n"); + return -EINVAL; + } + ring = &dev_priv->ring[BCS]; + break; + default: + DRM_ERROR("execbuf with unknown ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); + return -EINVAL; + } + + mode = args->flags & I915_EXEC_CONSTANTS_MASK; + switch (mode) { + case I915_EXEC_CONSTANTS_REL_GENERAL: + case I915_EXEC_CONSTANTS_ABSOLUTE: + case I915_EXEC_CONSTANTS_REL_SURFACE: + if (ring == &dev_priv->ring[RCS] && + mode != dev_priv->relative_constants_mode) { + if (INTEL_INFO(dev)->gen < 4) + return -EINVAL; + + if (INTEL_INFO(dev)->gen > 5 && + mode == I915_EXEC_CONSTANTS_REL_SURFACE) + return -EINVAL; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, INSTPM); + intel_ring_emit(ring, + I915_EXEC_CONSTANTS_MASK << 16 | mode); + intel_ring_advance(ring); + + dev_priv->relative_constants_mode = mode; + } + break; + default: + DRM_ERROR("execbuf with unknown constants: %d\n", mode); + return -EINVAL; + } + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + if (args->num_cliprects != 0) { + if (ring != &dev_priv->ring[RCS]) { + DRM_ERROR("clip rectangles are only valid with the render ring\n"); + return -EINVAL; + } + + cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), + GFP_KERNEL); + if (cliprects == NULL) { + ret = -ENOMEM; + goto pre_mutex_err; + } + + if (copy_from_user(cliprects, + (struct drm_clip_rect __user *)(uintptr_t) + args->cliprects_ptr, + sizeof(*cliprects)*args->num_cliprects)) { + ret = -EFAULT; + goto pre_mutex_err; + } + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto pre_mutex_err; + + if (dev_priv->mm.suspended) { + mutex_unlock(&dev->struct_mutex); + ret = -EBUSY; + goto pre_mutex_err; + } + + eb = eb_create(args->buffer_count); + if (eb == NULL) { + mutex_unlock(&dev->struct_mutex); + ret = -ENOMEM; + goto pre_mutex_err; + } + + /* Look up object handles */ + INIT_LIST_HEAD(&objects); + for (i = 0; i < args->buffer_count; i++) { + struct drm_i915_gem_object *obj; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, + exec[i].handle)); + if (obj == NULL) { + DRM_ERROR("Invalid object handle %d at index %d\n", + exec[i].handle, i); + /* prevent error path from reading uninitialized data */ + ret = -ENOENT; + goto err; + } + + if (!list_empty(&obj->exec_list)) { + DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n", + obj, exec[i].handle, i); + ret = -EINVAL; + goto err; + } + + list_add_tail(&obj->exec_list, &objects); + obj->exec_handle = exec[i].handle; + eb_add_object(eb, obj); + } + + /* Move the objects en-masse into the GTT, evicting if necessary. */ + ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec); + if (ret) + goto err; + + /* The objects are in their final locations, apply the relocations. */ + ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec); + if (ret) { + if (ret == -EFAULT) { + ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, + &objects, eb, + exec, + args->buffer_count); + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + } + if (ret) + goto err; + } + + /* Set the pending read domains for the batch buffer to COMMAND */ + batch_obj = list_entry(objects.prev, + struct drm_i915_gem_object, + exec_list); + if (batch_obj->base.pending_write_domain) { + DRM_ERROR("Attempting to use self-modifying batch buffer\n"); + ret = -EINVAL; + goto err; + } + batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; + + ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); + if (ret) + goto err; + + ret = i915_gem_execbuffer_wait_for_flips(ring, &objects); + if (ret) + goto err; + + seqno = i915_gem_next_request_seqno(dev, ring); + for (i = 0; i < I915_NUM_RINGS-1; i++) { + if (seqno < ring->sync_seqno[i]) { + /* The GPU can not handle its semaphore value wrapping, + * so every billion or so execbuffers, we need to stall + * the GPU in order to reset the counters. + */ + ret = i915_gpu_idle(dev); + if (ret) + goto err; + + BUG_ON(ring->sync_seqno[i]); + } + } + + exec_start = batch_obj->gtt_offset + args->batch_start_offset; + exec_len = args->batch_len; + if (cliprects) { + for (i = 0; i < args->num_cliprects; i++) { + ret = i915_emit_box(dev, &cliprects[i], + args->DR1, args->DR4); + if (ret) + goto err; + + ret = ring->dispatch_execbuffer(ring, + exec_start, exec_len); + if (ret) + goto err; + } + } else { + ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); + if (ret) + goto err; + } + + i915_gem_execbuffer_move_to_active(&objects, ring, seqno); + i915_gem_execbuffer_retire_commands(dev, file, ring); + +err: + eb_destroy(eb); + while (!list_empty(&objects)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&objects, + struct drm_i915_gem_object, + exec_list); + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); + } + + mutex_unlock(&dev->struct_mutex); + +pre_mutex_err: + kfree(cliprects); + return ret; +} + +/* + * Legacy execbuffer just creates an exec2 list from the original exec object + * list array and passes it to the real function. + */ +int +i915_gem_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_execbuffer *args = data; + struct drm_i915_gem_execbuffer2 exec2; + struct drm_i915_gem_exec_object *exec_list = NULL; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + int ret, i; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + /* Copy in the exec list from userland */ + exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + if (exec_list == NULL || exec2_list == NULL) { + DRM_ERROR("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + drm_free_large(exec_list); + drm_free_large(exec2_list); + return -ENOMEM; + } + ret = copy_from_user(exec_list, + (struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + sizeof(*exec_list) * args->buffer_count); + if (ret != 0) { + DRM_ERROR("copy %d exec entries failed %d\n", + args->buffer_count, ret); + drm_free_large(exec_list); + drm_free_large(exec2_list); + return -EFAULT; + } + + for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].handle = exec_list[i].handle; + exec2_list[i].relocation_count = exec_list[i].relocation_count; + exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; + exec2_list[i].alignment = exec_list[i].alignment; + exec2_list[i].offset = exec_list[i].offset; + if (INTEL_INFO(dev)->gen < 4) + exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; + else + exec2_list[i].flags = 0; + } + + exec2.buffers_ptr = args->buffers_ptr; + exec2.buffer_count = args->buffer_count; + exec2.batch_start_offset = args->batch_start_offset; + exec2.batch_len = args->batch_len; + exec2.DR1 = args->DR1; + exec2.DR4 = args->DR4; + exec2.num_cliprects = args->num_cliprects; + exec2.cliprects_ptr = args->cliprects_ptr; + exec2.flags = I915_EXEC_RENDER; + + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + for (i = 0; i < args->buffer_count; i++) + exec_list[i].offset = exec2_list[i].offset; + /* ... and back out to userspace */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } + + drm_free_large(exec_list); + drm_free_large(exec2_list); + return ret; +} + +int +i915_gem_execbuffer2(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_execbuffer2 *args = data; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + int ret; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + if (exec2_list == NULL) { + DRM_ERROR("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + return -ENOMEM; + } + ret = copy_from_user(exec2_list, + (struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + sizeof(*exec2_list) * args->buffer_count); + if (ret != 0) { + DRM_ERROR("copy %d exec entries failed %d\n", + args->buffer_count, ret); + drm_free_large(exec2_list); + return -EFAULT; + } + + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec2_list, + sizeof(*exec2_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } + + drm_free_large(exec2_list); + return ret; +} + diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c new file mode 100644 index 000000000000..86673e77d7cb --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -0,0 +1,99 @@ +/* + * Copyright © 2010 Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" + +void i915_gem_restore_gtt_mappings(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + i915_gem_clflush_object(obj); + + if (dev_priv->mm.gtt->needs_dmar) { + BUG_ON(!obj->sg_list); + + intel_gtt_insert_sg_entries(obj->sg_list, + obj->num_sg, + obj->gtt_space->start + >> PAGE_SHIFT, + obj->agp_type); + } else + intel_gtt_insert_pages(obj->gtt_space->start + >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, + obj->pages, + obj->agp_type); + } + + intel_gtt_chipset_flush(); +} + +int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + if (dev_priv->mm.gtt->needs_dmar) { + ret = intel_gtt_map_memory(obj->pages, + obj->base.size >> PAGE_SHIFT, + &obj->sg_list, + &obj->num_sg); + if (ret != 0) + return ret; + + intel_gtt_insert_sg_entries(obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, + obj->agp_type); + } else + intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, + obj->pages, + obj->agp_type); + + return 0; +} + +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->mm.gtt->needs_dmar) { + intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); + obj->sg_list = NULL; + obj->num_sg = 0; + } + + intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); +} diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index af352de70be1..22a32b9932c5 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } /* Check pitch constriants for all chips & tiling formats */ -bool +static bool i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) { int tile_width; @@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) return true; } -bool -i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) +/* Is the current GTT allocation valid for the change in tiling? */ +static bool +i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - - if (obj_priv->gtt_space == NULL) - return true; + u32 size; if (tiling_mode == I915_TILING_NONE) return true; - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_INFO(obj->base.dev)->gen >= 4) return true; - if (obj_priv->gtt_offset & (obj->size - 1)) - return false; - - if (IS_GEN3(dev)) { - if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) + if (INTEL_INFO(obj->base.dev)->gen == 3) { + if (obj->gtt_offset & ~I915_FENCE_START_MASK) return false; } else { - if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) + if (obj->gtt_offset & ~I830_FENCE_START_MASK) return false; } + /* + * Previous chips need to be aligned to the size of the smallest + * fence register that can contain the object. + */ + if (INTEL_INFO(obj->base.dev)->gen == 3) + size = 1024*1024; + else + size = 512*1024; + + while (size < obj->base.size) + size <<= 1; + + if (obj->gtt_space->size != size) + return false; + + if (obj->gtt_offset & (size - 1)) + return false; + return true; } @@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) */ int i915_gem_set_tiling(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_gem_check_is_wedged(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) return -ENOENT; - obj_priv = to_intel_bo(obj); - if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { - drm_gem_object_unreference_unlocked(obj); + if (!i915_tiling_ok(dev, + args->stride, obj->base.size, args->tiling_mode)) { + drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } - if (obj_priv->pin_count) { - drm_gem_object_unreference_unlocked(obj); + if (obj->pin_count) { + drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } @@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } mutex_lock(&dev->struct_mutex); - if (args->tiling_mode != obj_priv->tiling_mode || - args->stride != obj_priv->stride) { + if (args->tiling_mode != obj->tiling_mode || + args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. */ - if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) - ret = i915_gem_object_unbind(obj); - else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - ret = i915_gem_object_put_fence_reg(obj, true); - else - i915_gem_release_mmap(obj); + i915_gem_release_mmap(obj); - if (ret != 0) { - args->tiling_mode = obj_priv->tiling_mode; - args->stride = obj_priv->stride; - goto err; - } + obj->map_and_fenceable = + obj->gtt_space == NULL || + (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && + i915_gem_object_fence_ok(obj, args->tiling_mode)); - obj_priv->tiling_mode = args->tiling_mode; - obj_priv->stride = args->stride; + obj->tiling_changed = true; + obj->tiling_mode = args->tiling_mode; + obj->stride = args->stride; } -err: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } /** @@ -359,22 +364,20 @@ err: */ int i915_gem_get_tiling(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) return -ENOENT; - obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); - args->tiling_mode = obj_priv->tiling_mode; - switch (obj_priv->tiling_mode) { + args->tiling_mode = obj->tiling_mode; + switch (obj->tiling_mode) { case I915_TILING_X: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; break; @@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); return 0; @@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page) } void -i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) +i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size >> PAGE_SHIFT; + int page_count = obj->base.size >> PAGE_SHIFT; int i; if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) return; - if (obj_priv->bit_17 == NULL) + if (obj->bit_17 == NULL) return; for (i = 0; i < page_count; i++) { - char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; + char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; if ((new_bit_17 & 0x1) != - (test_bit(i, obj_priv->bit_17) != 0)) { - i915_gem_swizzle_page(obj_priv->pages[i]); - set_page_dirty(obj_priv->pages[i]); + (test_bit(i, obj->bit_17) != 0)) { + i915_gem_swizzle_page(obj->pages[i]); + set_page_dirty(obj->pages[i]); } } } void -i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) +i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size >> PAGE_SHIFT; + int page_count = obj->base.size >> PAGE_SHIFT; int i; if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) return; - if (obj_priv->bit_17 == NULL) { - obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * + if (obj->bit_17 == NULL) { + obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * sizeof(long), GFP_KERNEL); - if (obj_priv->bit_17 == NULL) { + if (obj->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); return; @@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) } for (i = 0; i < page_count; i++) { - if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) - __set_bit(i, obj_priv->bit_17); + if (page_to_phys(obj->pages[i]) & (1 << 17)) + __set_bit(i, obj->bit_17); else - __clear_bit(i, obj_priv->bit_17); + __clear_bit(i, obj->bit_17); } } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 729fd0c91d7b..0dadc025b77b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -67,20 +67,20 @@ void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->gt_irq_mask_reg & mask) != 0) { - dev_priv->gt_irq_mask_reg &= ~mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); - (void) I915_READ(GTIMR); + if ((dev_priv->gt_irq_mask & mask) != 0) { + dev_priv->gt_irq_mask &= ~mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); } } void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->gt_irq_mask_reg & mask) != mask) { - dev_priv->gt_irq_mask_reg |= mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); - (void) I915_READ(GTIMR); + if ((dev_priv->gt_irq_mask & mask) != mask) { + dev_priv->gt_irq_mask |= mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); } } @@ -88,40 +88,40 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) static void ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != 0) { - dev_priv->irq_mask_reg &= ~mask; - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - (void) I915_READ(DEIMR); + if ((dev_priv->irq_mask & mask) != 0) { + dev_priv->irq_mask &= ~mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); + POSTING_READ(DEIMR); } } static inline void ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != mask) { - dev_priv->irq_mask_reg |= mask; - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - (void) I915_READ(DEIMR); + if ((dev_priv->irq_mask & mask) != mask) { + dev_priv->irq_mask |= mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); + POSTING_READ(DEIMR); } } void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != 0) { - dev_priv->irq_mask_reg &= ~mask; - I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); + if ((dev_priv->irq_mask & mask) != 0) { + dev_priv->irq_mask &= ~mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ(IMR); } } void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != mask) { - dev_priv->irq_mask_reg |= mask; - I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); + if ((dev_priv->irq_mask & mask) != mask) { + dev_priv->irq_mask |= mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ(IMR); } } @@ -144,7 +144,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) dev_priv->pipestat[pipe] |= mask; /* Enable the interrupt, clear any pending status */ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); - (void) I915_READ(reg); + POSTING_READ(reg); } } @@ -156,16 +156,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) dev_priv->pipestat[pipe] &= ~mask; I915_WRITE(reg, dev_priv->pipestat[pipe]); - (void) I915_READ(reg); + POSTING_READ(reg); } } /** * intel_enable_asle - enable ASLE interrupt for OpRegion */ -void intel_enable_asle (struct drm_device *dev) +void intel_enable_asle(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, DE_GSE); @@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev) i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); } + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } /** @@ -243,6 +248,92 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) return I915_READ(reg); } +int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, + int *vpos, int *hpos) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 vbl = 0, position = 0; + int vbl_start, vbl_end, htotal, vtotal; + bool in_vbl = true; + int ret = 0; + + if (!i915_pipe_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " + "pipe %d\n", pipe); + return 0; + } + + /* Get vtotal. */ + vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); + + if (INTEL_INFO(dev)->gen >= 4) { + /* No obvious pixelcount register. Only query vertical + * scanout position from Display scan line register. + */ + position = I915_READ(PIPEDSL(pipe)); + + /* Decode into vertical scanout position. Don't have + * horizontal scanout position. + */ + *vpos = position & 0x1fff; + *hpos = 0; + } else { + /* Have access to pixelcount since start of frame. + * We can split this into vertical and horizontal + * scanout position. + */ + position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + + htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); + *vpos = position / htotal; + *hpos = position - (*vpos * htotal); + } + + /* Query vblank area. */ + vbl = I915_READ(VBLANK(pipe)); + + /* Test position against vblank region. */ + vbl_start = vbl & 0x1fff; + vbl_end = (vbl >> 16) & 0x1fff; + + if ((*vpos < vbl_start) || (*vpos > vbl_end)) + in_vbl = false; + + /* Inside "upper part" of vblank area? Apply corrective offset: */ + if (in_vbl && (*vpos >= vbl_start)) + *vpos = *vpos - vtotal; + + /* Readouts valid? */ + if (vbl > 0) + ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_INVBL; + + return ret; +} + +int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags) +{ + struct drm_crtc *drmcrtc; + + if (crtc < 0 || crtc >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + /* Get drm_crtc to timestamp: */ + drmcrtc = intel_get_crtc_for_pipe(dev, crtc); + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, + vblank_time, flags, drmcrtc); +} + /* * Handle hotplug events outside the interrupt handler proper. */ @@ -297,8 +388,8 @@ static void notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 seqno = ring->get_seqno(dev, ring); - ring->irq_gem_seqno = seqno; + u32 seqno = ring->get_seqno(ring); + ring->irq_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); wake_up_all(&ring->irq_queue); dev_priv->hangcheck_count = 0; @@ -306,11 +397,49 @@ static void notify_ring(struct drm_device *dev, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } +static void gen6_pm_irq_handler(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u8 new_delay = dev_priv->cur_delay; + u32 pm_iir; + + pm_iir = I915_READ(GEN6_PMIIR); + if (!pm_iir) + return; + + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { + if (dev_priv->cur_delay != dev_priv->max_delay) + new_delay = dev_priv->cur_delay + 1; + if (new_delay > dev_priv->max_delay) + new_delay = dev_priv->max_delay; + } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { + if (dev_priv->cur_delay != dev_priv->min_delay) + new_delay = dev_priv->cur_delay - 1; + if (new_delay < dev_priv->min_delay) { + new_delay = dev_priv->min_delay; + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + I915_READ(GEN6_RP_INTERRUPT_LIMITS) | + ((new_delay << 16) & 0x3f0000)); + } else { + /* Make sure we continue to get down interrupts + * until we hit the minimum frequency */ + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); + } + + } + + gen6_set_rps(dev, new_delay); + dev_priv->cur_delay = new_delay; + + I915_WRITE(GEN6_PMIIR, pm_iir); +} + static irqreturn_t ironlake_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pch_iir; + u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 hotplug_mask; struct drm_i915_master_private *master_priv; u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; @@ -321,13 +450,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - (void)I915_READ(DEIER); + POSTING_READ(DEIER); de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); pch_iir = I915_READ(SDEIIR); + pm_iir = I915_READ(GEN6_PMIIR); - if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) + if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && + (!IS_GEN6(dev) || pm_iir == 0)) goto done; if (HAS_PCH_CPT(dev)) @@ -344,12 +475,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_PIPE_NOTIFY) - notify_ring(dev, &dev_priv->render_ring); + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & bsd_usr_interrupt) - notify_ring(dev, &dev_priv->bsd_ring); - if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(dev, &dev_priv->blt_ring); + notify_ring(dev, &dev_priv->ring[VCS]); + if (gt_iir & GT_BLT_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[BCS]); if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); @@ -379,6 +510,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) i915_handle_rps_change(dev); } + if (IS_GEN6(dev)) + gen6_pm_irq_handler(dev); + /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); I915_WRITE(GTIIR, gt_iir); @@ -386,7 +520,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) done: I915_WRITE(DEIER, de_ier); - (void)I915_READ(DEIER); + POSTING_READ(DEIER); return ret; } @@ -423,28 +557,23 @@ static void i915_error_work_func(struct work_struct *work) #ifdef CONFIG_DEBUG_FS static struct drm_i915_error_object * i915_error_object_create(struct drm_device *dev, - struct drm_gem_object *src) + struct drm_i915_gem_object *src) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_error_object *dst; - struct drm_i915_gem_object *src_priv; int page, page_count; u32 reloc_offset; - if (src == NULL) - return NULL; - - src_priv = to_intel_bo(src); - if (src_priv->pages == NULL) + if (src == NULL || src->pages == NULL) return NULL; - page_count = src->size / PAGE_SIZE; + page_count = src->base.size / PAGE_SIZE; dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); if (dst == NULL) return NULL; - reloc_offset = src_priv->gtt_offset; + reloc_offset = src->gtt_offset; for (page = 0; page < page_count; page++) { unsigned long flags; void __iomem *s; @@ -466,7 +595,7 @@ i915_error_object_create(struct drm_device *dev, reloc_offset += PAGE_SIZE; } dst->page_count = page_count; - dst->gtt_offset = src_priv->gtt_offset; + dst->gtt_offset = src->gtt_offset; return dst; @@ -520,36 +649,96 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring) } static u32 -i915_ringbuffer_last_batch(struct drm_device *dev) +i915_ringbuffer_last_batch(struct drm_device *dev, + struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; u32 head, bbaddr; - u32 *ring; + u32 *val; /* Locate the current position in the ringbuffer and walk back * to find the most recently dispatched batch buffer. */ - bbaddr = 0; - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring = (u32 *)(dev_priv->render_ring.virtual_start + head); + head = I915_READ_HEAD(ring) & HEAD_ADDR; - while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { - bbaddr = i915_get_bbaddr(dev, ring); + val = (u32 *)(ring->virtual_start + head); + while (--val >= (u32 *)ring->virtual_start) { + bbaddr = i915_get_bbaddr(dev, val); if (bbaddr) - break; + return bbaddr; } - if (bbaddr == 0) { - ring = (u32 *)(dev_priv->render_ring.virtual_start - + dev_priv->render_ring.size); - while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { - bbaddr = i915_get_bbaddr(dev, ring); - if (bbaddr) - break; - } + val = (u32 *)(ring->virtual_start + ring->size); + while (--val >= (u32 *)ring->virtual_start) { + bbaddr = i915_get_bbaddr(dev, val); + if (bbaddr) + return bbaddr; } - return bbaddr; + return 0; +} + +static u32 capture_bo_list(struct drm_i915_error_buffer *err, + int count, + struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, mm_list) { + err->size = obj->base.size; + err->name = obj->base.name; + err->seqno = obj->last_rendering_seqno; + err->gtt_offset = obj->gtt_offset; + err->read_domains = obj->base.read_domains; + err->write_domain = obj->base.write_domain; + err->fence_reg = obj->fence_reg; + err->pinned = 0; + if (obj->pin_count > 0) + err->pinned = 1; + if (obj->user_pin_count > 0) + err->pinned = -1; + err->tiling = obj->tiling_mode; + err->dirty = obj->dirty; + err->purgeable = obj->madv != I915_MADV_WILLNEED; + err->ring = obj->ring ? obj->ring->id : 0; + + if (++i == count) + break; + + err++; + } + + return i; +} + +static void i915_gem_record_fences(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + + } } /** @@ -564,9 +753,9 @@ i915_ringbuffer_last_batch(struct drm_device *dev) static void i915_capture_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_i915_error_state *error; - struct drm_gem_object *batchbuffer[2]; + struct drm_i915_gem_object *batchbuffer[2]; unsigned long flags; u32 bbaddr; int count; @@ -585,20 +774,33 @@ static void i915_capture_error_state(struct drm_device *dev) DRM_DEBUG_DRIVER("generating error event\n"); - error->seqno = - dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); + error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); error->pipebstat = I915_READ(PIPEBSTAT); error->instpm = I915_READ(INSTPM); - if (INTEL_INFO(dev)->gen < 4) { - error->ipeir = I915_READ(IPEIR); - error->ipehr = I915_READ(IPEHR); - error->instdone = I915_READ(INSTDONE); - error->acthd = I915_READ(ACTHD); - error->bbaddr = 0; - } else { + error->error = 0; + if (INTEL_INFO(dev)->gen >= 6) { + error->error = I915_READ(ERROR_GEN6); + + error->bcs_acthd = I915_READ(BCS_ACTHD); + error->bcs_ipehr = I915_READ(BCS_IPEHR); + error->bcs_ipeir = I915_READ(BCS_IPEIR); + error->bcs_instdone = I915_READ(BCS_INSTDONE); + error->bcs_seqno = 0; + if (dev_priv->ring[BCS].get_seqno) + error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); + + error->vcs_acthd = I915_READ(VCS_ACTHD); + error->vcs_ipehr = I915_READ(VCS_IPEHR); + error->vcs_ipeir = I915_READ(VCS_IPEIR); + error->vcs_instdone = I915_READ(VCS_INSTDONE); + error->vcs_seqno = 0; + if (dev_priv->ring[VCS].get_seqno) + error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); + } + if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); error->ipehr = I915_READ(IPEHR_I965); error->instdone = I915_READ(INSTDONE_I965); @@ -606,42 +808,45 @@ static void i915_capture_error_state(struct drm_device *dev) error->instdone1 = I915_READ(INSTDONE1); error->acthd = I915_READ(ACTHD_I965); error->bbaddr = I915_READ64(BB_ADDR); + } else { + error->ipeir = I915_READ(IPEIR); + error->ipehr = I915_READ(IPEHR); + error->instdone = I915_READ(INSTDONE); + error->acthd = I915_READ(ACTHD); + error->bbaddr = 0; } + i915_gem_record_fences(dev, error); - bbaddr = i915_ringbuffer_last_batch(dev); + bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); /* Grab the current batchbuffer, most likely to have crashed. */ batchbuffer[0] = NULL; batchbuffer[1] = NULL; count = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; count++; } /* Scan the other lists for completeness for those bizarre errors. */ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; if (batchbuffer[0] && batchbuffer[1]) @@ -649,17 +854,15 @@ static void i915_capture_error_state(struct drm_device *dev) } } if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; if (batchbuffer[0] && batchbuffer[1]) @@ -678,46 +881,41 @@ static void i915_capture_error_state(struct drm_device *dev) /* Record the ringbuffer */ error->ringbuffer = i915_error_object_create(dev, - dev_priv->render_ring.gem_object); + dev_priv->ring[RCS].obj); - /* Record buffers on the active list. */ + /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; - error->active_bo_count = 0; + error->pinned_bo = NULL; - if (count) + error->active_bo_count = count; + list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) + count++; + error->pinned_bo_count = count - error->active_bo_count; + + if (count) { error->active_bo = kmalloc(sizeof(*error->active_bo)*count, GFP_ATOMIC); - - if (error->active_bo) { - int i = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - - error->active_bo[i].size = obj->size; - error->active_bo[i].name = obj->name; - error->active_bo[i].seqno = obj_priv->last_rendering_seqno; - error->active_bo[i].gtt_offset = obj_priv->gtt_offset; - error->active_bo[i].read_domains = obj->read_domains; - error->active_bo[i].write_domain = obj->write_domain; - error->active_bo[i].fence_reg = obj_priv->fence_reg; - error->active_bo[i].pinned = 0; - if (obj_priv->pin_count > 0) - error->active_bo[i].pinned = 1; - if (obj_priv->user_pin_count > 0) - error->active_bo[i].pinned = -1; - error->active_bo[i].tiling = obj_priv->tiling_mode; - error->active_bo[i].dirty = obj_priv->dirty; - error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; - - if (++i == count) - break; - } - error->active_bo_count = i; + if (error->active_bo) + error->pinned_bo = + error->active_bo + error->active_bo_count; } + if (error->active_bo) + error->active_bo_count = + capture_bo_list(error->active_bo, + error->active_bo_count, + &dev_priv->mm.active_list); + + if (error->pinned_bo) + error->pinned_bo_count = + capture_bo_list(error->pinned_bo, + error->pinned_bo_count, + &dev_priv->mm.pinned_list); + do_gettimeofday(&error->time); error->overlay = intel_overlay_capture_error_state(dev); + error->display = intel_display_capture_error_state(dev); spin_lock_irqsave(&dev_priv->error_lock, flags); if (dev_priv->first_error == NULL) { @@ -775,7 +973,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); - (void)I915_READ(IPEIR_I965); + POSTING_READ(IPEIR_I965); } if (eir & GM45_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); @@ -783,7 +981,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); - (void)I915_READ(PGTBL_ER); + POSTING_READ(PGTBL_ER); } } @@ -794,7 +992,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); - (void)I915_READ(PGTBL_ER); + POSTING_READ(PGTBL_ER); } } @@ -825,7 +1023,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD)); I915_WRITE(IPEIR, ipeir); - (void)I915_READ(IPEIR); + POSTING_READ(IPEIR); } else { u32 ipeir = I915_READ(IPEIR_I965); @@ -842,12 +1040,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); - (void)I915_READ(IPEIR_I965); + POSTING_READ(IPEIR_I965); } } I915_WRITE(EIR, eir); - (void)I915_READ(EIR); + POSTING_READ(EIR); eir = I915_READ(EIR); if (eir) { /* @@ -870,7 +1068,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -static void i915_handle_error(struct drm_device *dev, bool wedged) +void i915_handle_error(struct drm_device *dev, bool wedged) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -884,11 +1082,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) /* * Wakeup waiting processes so they don't hang */ - wake_up_all(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->ring[RCS].irq_queue); if (HAS_BSD(dev)) - wake_up_all(&dev_priv->bsd_ring.irq_queue); + wake_up_all(&dev_priv->ring[VCS].irq_queue); if (HAS_BLT(dev)) - wake_up_all(&dev_priv->blt_ring.irq_queue); + wake_up_all(&dev_priv->ring[BCS].irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); @@ -899,7 +1097,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct intel_unpin_work *work; unsigned long flags; bool stall_detected; @@ -918,13 +1116,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) } /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ - obj_priv = to_intel_bo(work->pending_flip_obj); + obj = work->pending_flip_obj; if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; - stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; + stall_detected = I915_READ(dspsurf) == obj->gtt_offset; } else { int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; - stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + + stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + crtc->y * crtc->fb->pitch + crtc->x * crtc->fb->bits_per_pixel/8); } @@ -970,7 +1168,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); pipea_stats = I915_READ(PIPEASTAT); pipeb_stats = I915_READ(PIPEBSTAT); @@ -993,7 +1191,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) I915_WRITE(PIPEBSTAT, pipeb_stats); irq_received = 1; } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); if (!irq_received) break; @@ -1026,9 +1224,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) } if (iir & I915_USER_INTERRUPT) - notify_ring(dev, &dev_priv->render_ring); - if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) - notify_ring(dev, &dev_priv->bsd_ring); + notify_ring(dev, &dev_priv->ring[RCS]); + if (iir & I915_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); @@ -1101,12 +1299,13 @@ static int i915_emit_irq(struct drm_device * dev) if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(MI_USER_INTERRUPT); + ADVANCE_LP_RING(); + } return dev_priv->counter; } @@ -1114,12 +1313,11 @@ static int i915_emit_irq(struct drm_device * dev) void i915_trace_irq_get(struct drm_device *dev, u32 seqno) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); - if (dev_priv->trace_irq_seqno == 0) - render_ring->user_irq_get(dev, render_ring); - - dev_priv->trace_irq_seqno = seqno; + if (dev_priv->trace_irq_seqno == 0 && + ring->irq_get(ring)) + dev_priv->trace_irq_seqno = seqno; } static int i915_wait_irq(struct drm_device * dev, int irq_nr) @@ -1127,7 +1325,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret = 0; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv)); @@ -1141,10 +1339,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - render_ring->user_irq_get(dev, render_ring); - DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, - READ_BREADCRUMB(dev_priv) >= irq_nr); - render_ring->user_irq_put(dev, render_ring); + ret = -ENODEV; + if (ring->irq_get(ring)) { + DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, + READ_BREADCRUMB(dev_priv) >= irq_nr); + ring->irq_put(ring); + } if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", @@ -1163,7 +1363,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, drm_i915_irq_emit_t *emit = data; int result; - if (!dev_priv || !dev_priv->render_ring.virtual_start) { + if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } @@ -1209,9 +1409,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, @@ -1219,7 +1419,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) else i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } @@ -1231,15 +1431,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE | PIPE_START_VBLANK_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } void i915_enable_interrupt (struct drm_device *dev) @@ -1306,12 +1506,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } -static struct drm_i915_gem_request * -i915_get_tail_request(struct drm_device *dev) +static u32 +ring_last_seqno(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - return list_entry(dev_priv->render_ring.request_list.prev, - struct drm_i915_gem_request, list); + return list_entry(ring->request_list.prev, + struct drm_i915_gem_request, list)->seqno; +} + +static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) +{ + if (list_empty(&ring->request_list) || + i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { + /* Issue a wake-up to catch stuck h/w. */ + if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { + DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", + ring->name, + ring->waiting_seqno, + ring->get_seqno(ring)); + wake_up_all(&ring->irq_queue); + *err = true; + } + return true; + } + return false; +} + +static bool kick_ring(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) { + DRM_ERROR("Kicking stuck wait on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + if (IS_GEN6(dev) && + (tmp & RING_WAIT_SEMAPHORE)) { + DRM_ERROR("Kicking stuck semaphore on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + return false; } /** @@ -1325,6 +1563,17 @@ void i915_hangcheck_elapsed(unsigned long data) struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; uint32_t acthd, instdone, instdone1; + bool err = false; + + /* If all work is done then ACTHD clearly hasn't advanced. */ + if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && + i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && + i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { + dev_priv->hangcheck_count = 0; + if (err) + goto repeat; + return; + } if (INTEL_INFO(dev)->gen < 4) { acthd = I915_READ(ACTHD); @@ -1336,38 +1585,6 @@ void i915_hangcheck_elapsed(unsigned long data) instdone1 = I915_READ(INSTDONE1); } - /* If all work is done then ACTHD clearly hasn't advanced. */ - if (list_empty(&dev_priv->render_ring.request_list) || - i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), - i915_get_tail_request(dev)->seqno)) { - bool missed_wakeup = false; - - dev_priv->hangcheck_count = 0; - - /* Issue a wake-up to catch stuck h/w. */ - if (dev_priv->render_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->render_ring.irq_queue)) { - wake_up_all(&dev_priv->render_ring.irq_queue); - missed_wakeup = true; - } - - if (dev_priv->bsd_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { - wake_up_all(&dev_priv->bsd_ring.irq_queue); - missed_wakeup = true; - } - - if (dev_priv->blt_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->blt_ring.irq_queue)) { - wake_up_all(&dev_priv->blt_ring.irq_queue); - missed_wakeup = true; - } - - if (missed_wakeup) - DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); - return; - } - if (dev_priv->last_acthd == acthd && dev_priv->last_instdone == instdone && dev_priv->last_instdone1 == instdone1) { @@ -1380,12 +1597,17 @@ void i915_hangcheck_elapsed(unsigned long data) * and break the hang. This should work on * all but the second generation chipsets. */ - u32 tmp = I915_READ(PRB0_CTL); - if (tmp & RING_WAIT) { - I915_WRITE(PRB0_CTL, tmp); - POSTING_READ(PRB0_CTL); - goto out; - } + + if (kick_ring(&dev_priv->ring[RCS])) + goto repeat; + + if (HAS_BSD(dev) && + kick_ring(&dev_priv->ring[VCS])) + goto repeat; + + if (HAS_BLT(dev) && + kick_ring(&dev_priv->ring[BCS])) + goto repeat; } i915_handle_error(dev, true); @@ -1399,7 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->last_instdone1 = instdone1; } -out: +repeat: /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); @@ -1417,17 +1639,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev) I915_WRITE(DEIMR, 0xffffffff); I915_WRITE(DEIER, 0x0); - (void) I915_READ(DEIER); + POSTING_READ(DEIER); /* and GT */ I915_WRITE(GTIMR, 0xffffffff); I915_WRITE(GTIER, 0x0); - (void) I915_READ(GTIER); + POSTING_READ(GTIER); /* south display irq */ I915_WRITE(SDEIMR, 0xffffffff); I915_WRITE(SDEIER, 0x0); - (void) I915_READ(SDEIER); + POSTING_READ(SDEIER); } static int ironlake_irq_postinstall(struct drm_device *dev) @@ -1436,38 +1658,39 @@ static int ironlake_irq_postinstall(struct drm_device *dev) /* enable kind of interrupts always enabled */ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; - u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; + u32 render_irqs; u32 hotplug_mask; - dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; + dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); - (void) I915_READ(DEIER); + I915_WRITE(DEIMR, dev_priv->irq_mask); + I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); + POSTING_READ(DEIER); - if (IS_GEN6(dev)) { - render_mask = - GT_PIPE_NOTIFY | - GT_GEN6_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; - } - - dev_priv->gt_irq_mask_reg = ~render_mask; - dev_priv->gt_irq_enable_reg = render_mask; + dev_priv->gt_irq_mask = ~0; I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); if (IS_GEN6(dev)) { - I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); - I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); + I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT); + I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT); I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); } - I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); - (void) I915_READ(GTIER); + if (IS_GEN6(dev)) + render_irqs = + GT_USER_INTERRUPT | + GT_GEN6_BSD_USER_INTERRUPT | + GT_BLT_USER_INTERRUPT; + else + render_irqs = + GT_USER_INTERRUPT | + GT_PIPE_NOTIFY | + GT_BSD_USER_INTERRUPT; + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); if (HAS_PCH_CPT(dev)) { hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | @@ -1477,13 +1700,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev) SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; } - dev_priv->pch_irq_mask_reg = ~hotplug_mask; - dev_priv->pch_irq_enable_reg = hotplug_mask; + dev_priv->pch_irq_mask = ~hotplug_mask; I915_WRITE(SDEIIR, I915_READ(SDEIIR)); - I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); - I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); - (void) I915_READ(SDEIER); + I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); + I915_WRITE(SDEIER, hotplug_mask); + POSTING_READ(SDEIER); if (IS_IRONLAKE_M(dev)) { /* Clear & enable PCU event interrupts */ @@ -1519,7 +1741,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) I915_WRITE(PIPEBSTAT, 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); - (void) I915_READ(IER); + POSTING_READ(IER); } /* @@ -1532,11 +1754,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev) u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; u32 error_mask; - DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); if (HAS_BSD(dev)) - DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); if (HAS_BLT(dev)) - DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; @@ -1544,7 +1766,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) return ironlake_irq_postinstall(dev); /* Unmask the interrupts that we always want on. */ - dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; + dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; @@ -1553,7 +1775,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ - dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; + dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; } /* @@ -1571,9 +1793,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev) } I915_WRITE(EMR, error_mask); - I915_WRITE(IMR, dev_priv->irq_mask_reg); + I915_WRITE(IMR, dev_priv->irq_mask); I915_WRITE(IER, enable_mask); - (void) I915_READ(IER); + POSTING_READ(IER); if (I915_HAS_HOTPLUG(dev)) { u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 25ed911a3112..d60860ec8cf4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -78,6 +78,12 @@ #define GRDOM_RENDER (1<<2) #define GRDOM_MEDIA (3<<2) +#define GEN6_GDRST 0x941c +#define GEN6_GRDOM_FULL (1 << 0) +#define GEN6_GRDOM_RENDER (1 << 1) +#define GEN6_GRDOM_MEDIA (1 << 2) +#define GEN6_GRDOM_BLT (1 << 3) + /* VGA stuff */ #define VGA_ST01_MDA 0x3ba @@ -158,12 +164,23 @@ #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX_SHIFT 2 -#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) +/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: + * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw + * simply ignores the register load under certain conditions. + * - One can actually load arbitrary many arbitrary registers: Simply issue x + * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! + */ +#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) +#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ +#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) +#define MI_SEMAPHORE_UPDATE (1<<21) +#define MI_SEMAPHORE_COMPARE (1<<20) +#define MI_SEMAPHORE_REGISTER (1<<18) /* * 3D instructions used by the kernel */ @@ -256,10 +273,6 @@ * Instruction and interrupt control regs */ #define PGTBL_ER 0x02024 -#define PRB0_TAIL 0x02030 -#define PRB0_HEAD 0x02034 -#define PRB0_START 0x02038 -#define PRB0_CTL 0x0203c #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 @@ -268,9 +281,13 @@ #define RING_HEAD(base) ((base)+0x34) #define RING_START(base) ((base)+0x38) #define RING_CTL(base) ((base)+0x3c) +#define RING_SYNC_0(base) ((base)+0x40) +#define RING_SYNC_1(base) ((base)+0x44) +#define RING_MAX_IDLE(base) ((base)+0x54) #define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_ACTHD(base) ((base)+0x74) +#define RING_NOPID(base) ((base)+0x94) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -285,10 +302,17 @@ #define RING_INVALID 0x00000000 #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ +#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ +#if 0 +#define PRB0_TAIL 0x02030 +#define PRB0_HEAD 0x02034 +#define PRB0_START 0x02038 +#define PRB0_CTL 0x0203c #define PRB1_TAIL 0x02040 /* 915+ only */ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ +#endif #define IPEIR_I965 0x02064 #define IPEHR_I965 0x02068 #define INSTDONE_I965 0x0206c @@ -305,11 +329,42 @@ #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 +#define VCS_INSTDONE 0x1206C +#define VCS_IPEIR 0x12064 +#define VCS_IPEHR 0x12068 +#define VCS_ACTHD 0x12074 +#define BCS_INSTDONE 0x2206C +#define BCS_IPEIR 0x22064 +#define BCS_IPEHR 0x22068 +#define BCS_ACTHD 0x22074 + +#define ERROR_GEN6 0x040a0 + +/* GM45+ chicken bits -- debug workaround bits that may be required + * for various sorts of correct behavior. The top 16 bits of each are + * the enables for writing to the corresponding low bit. + */ +#define _3D_CHICKEN 0x02084 +#define _3D_CHICKEN2 0x0208c +/* Disables pipelining of read flushes past the SF-WIZ interface. + * Required on all Ironlake steppings according to the B-Spec, but the + * particular danger of not doing so is not specified. + */ +# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) +#define _3D_CHICKEN3 0x02090 #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 11) +#define GFX_MODE 0x02520 +#define GFX_RUN_LIST_ENABLE (1<<15) +#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) +#define GFX_SURFACE_FAULT_ENABLE (1<<12) +#define GFX_REPLAY_MODE (1<<11) +#define GFX_PSMI_GRANULARITY (1<<10) +#define GFX_PPGTT_ENABLE (1<<9) + #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 @@ -461,7 +516,7 @@ #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) #define GEN6_BSD_IMR 0x120a8 -#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) +#define GEN6_BSD_USER_INTERRUPT (1 << 12) #define GEN6_BSD_RNCID 0x12198 @@ -541,6 +596,18 @@ #define ILK_DISPLAY_CHICKEN1 0x42000 #define ILK_FBCQ_DIS (1<<22) +#define ILK_PABSTRETCH_DIS (1<<21) + + +/* + * Framebuffer compression for Sandybridge + * + * The following two registers are of type GTTMMADR + */ +#define SNB_DPFC_CTL_SA 0x100100 +#define SNB_CPU_FENCE_ENABLE (1<<29) +#define DPFC_CPU_FENCE_OFFSET 0x100104 + /* * GPIO regs @@ -900,6 +967,8 @@ */ #define MCHBAR_MIRROR_BASE 0x10000 +#define MCHBAR_MIRROR_BASE_SNB 0x140000 + /** 915-945 and GM965 MCH register controlling DRAM channel access */ #define DCC 0x10200 #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) @@ -1119,6 +1188,10 @@ #define DDRMPLL1 0X12c20 #define PEG_BAND_GAP_DATA 0x14d68 +#define GEN6_GT_PERF_STATUS 0x145948 +#define GEN6_RP_STATE_LIMITS 0x145994 +#define GEN6_RP_STATE_CAP 0x145998 + /* * Logical Context regs */ @@ -1168,7 +1241,6 @@ #define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) #define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) #define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) -#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) #define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) /* VGA port control */ @@ -2182,8 +2254,10 @@ #define PIPE_6BPC (2 << 5) #define PIPE_12BPC (3 << 5) +#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) +#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL) #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) @@ -2291,6 +2365,40 @@ #define ILK_FIFO_LINE_SIZE 64 +/* define the WM info on Sandybridge */ +#define SNB_DISPLAY_FIFO 128 +#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */ +#define SNB_DISPLAY_DFTWM 8 +#define SNB_CURSOR_FIFO 32 +#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */ +#define SNB_CURSOR_DFTWM 8 + +#define SNB_DISPLAY_SR_FIFO 512 +#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */ +#define SNB_DISPLAY_DFT_SRWM 0x3f +#define SNB_CURSOR_SR_FIFO 64 +#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */ +#define SNB_CURSOR_DFT_SRWM 8 + +#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */ + +#define SNB_FIFO_LINE_SIZE 64 + + +/* the address where we get all kinds of latency value */ +#define SSKPD 0x5d10 +#define SSKPD_WM_MASK 0x3f +#define SSKPD_WM0_SHIFT 0 +#define SSKPD_WM1_SHIFT 8 +#define SSKPD_WM2_SHIFT 16 +#define SSKPD_WM3_SHIFT 24 + +#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) +#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) +#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) +#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) +#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) + /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code @@ -2351,6 +2459,10 @@ #define CURBBASE 0x700c4 #define CURBPOS 0x700c8 +#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR) +#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE) +#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS) + /* Display A control */ #define DSPACNTR 0x70180 #define DISPLAY_PLANE_ENABLE (1<<31) @@ -2586,10 +2698,14 @@ #define GTIER 0x4401c #define ILK_DISPLAY_CHICKEN2 0x42004 +/* Required on all Ironlake and Sandybridge according to the B-Spec. */ +#define ILK_ELPIN_409_SELECT (1 << 25) #define ILK_DPARB_GATE (1<<22) #define ILK_VSDPFD_FULL (1<<21) #define ILK_DSPCLK_GATE 0x42020 #define ILK_DPARB_CLK_GATE (1<<5) +#define ILK_DPFD_CLK_GATE (1<<7) + /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ #define ILK_CLK_FBC (1<<7) #define ILK_DPFC_DIS1 (1<<8) @@ -2669,6 +2785,7 @@ #define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) #define PCH_FPA0 0xc6040 +#define FP_CB_TUNE (0x3<<22) #define PCH_FPA1 0xc6044 #define PCH_FPB0 0xc6048 #define PCH_FPB1 0xc604c @@ -3033,6 +3150,7 @@ #define TRANS_DP_10BPC (1<<9) #define TRANS_DP_6BPC (2<<9) #define TRANS_DP_12BPC (3<<9) +#define TRANS_DP_BPC_MASK (3<<9) #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) #define TRANS_DP_VSYNC_ACTIVE_LOW 0 #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) @@ -3052,4 +3170,66 @@ #define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) +#define FORCEWAKE 0xA18C +#define FORCEWAKE_ACK 0x130090 + +#define GEN6_RPNSWREQ 0xA008 +#define GEN6_TURBO_DISABLE (1<<31) +#define GEN6_FREQUENCY(x) ((x)<<25) +#define GEN6_OFFSET(x) ((x)<<19) +#define GEN6_AGGRESSIVE_TURBO (0<<15) +#define GEN6_RC_VIDEO_FREQ 0xA00C +#define GEN6_RC_CONTROL 0xA090 +#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) +#define GEN6_RC_CTL_RC6p_ENABLE (1<<17) +#define GEN6_RC_CTL_RC6_ENABLE (1<<18) +#define GEN6_RC_CTL_RC1e_ENABLE (1<<20) +#define GEN6_RC_CTL_RC7_ENABLE (1<<22) +#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) +#define GEN6_RC_CTL_HW_ENABLE (1<<31) +#define GEN6_RP_DOWN_TIMEOUT 0xA010 +#define GEN6_RP_INTERRUPT_LIMITS 0xA014 +#define GEN6_RPSTAT1 0xA01C +#define GEN6_RP_CONTROL 0xA024 +#define GEN6_RP_MEDIA_TURBO (1<<11) +#define GEN6_RP_USE_NORMAL_FREQ (1<<9) +#define GEN6_RP_MEDIA_IS_GFX (1<<8) +#define GEN6_RP_ENABLE (1<<7) +#define GEN6_RP_UP_BUSY_MAX (0x2<<3) +#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0) +#define GEN6_RP_UP_THRESHOLD 0xA02C +#define GEN6_RP_DOWN_THRESHOLD 0xA030 +#define GEN6_RP_UP_EI 0xA068 +#define GEN6_RP_DOWN_EI 0xA06C +#define GEN6_RP_IDLE_HYSTERSIS 0xA070 +#define GEN6_RC_STATE 0xA094 +#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 +#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C +#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 +#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 +#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC +#define GEN6_RC_SLEEP 0xA0B0 +#define GEN6_RC1e_THRESHOLD 0xA0B4 +#define GEN6_RC6_THRESHOLD 0xA0B8 +#define GEN6_RC6p_THRESHOLD 0xA0BC +#define GEN6_RC6pp_THRESHOLD 0xA0C0 +#define GEN6_PMINTRMSK 0xA168 + +#define GEN6_PMISR 0x44020 +#define GEN6_PMIMR 0x44024 +#define GEN6_PMIIR 0x44028 +#define GEN6_PMIER 0x4402C +#define GEN6_PM_MBOX_EVENT (1<<25) +#define GEN6_PM_THERMAL_EVENT (1<<24) +#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) +#define GEN6_PM_RP_UP_THRESHOLD (1<<5) +#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) +#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) +#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) + +#define GEN6_PCODE_MAILBOX 0x138124 +#define GEN6_PCODE_READY (1<<31) +#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 +#define GEN6_PCODE_DATA 0x138128 + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 454c064f8ef7..410772466fa7 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -235,10 +235,21 @@ static void i915_restore_vga(struct drm_device *dev) static void i915_save_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; + /* Cursor state */ + dev_priv->saveCURACNTR = I915_READ(CURACNTR); + dev_priv->saveCURAPOS = I915_READ(CURAPOS); + dev_priv->saveCURABASE = I915_READ(CURABASE); + dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); + dev_priv->saveCURBPOS = I915_READ(CURBPOS); + dev_priv->saveCURBBASE = I915_READ(CURBBASE); + if (IS_GEN2(dev)) + dev_priv->saveCURSIZE = I915_READ(CURSIZE); + if (HAS_PCH_SPLIT(dev)) { dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); @@ -357,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev) } i915_save_palette(dev, PIPE_B); dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + } + return; } @@ -365,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int dpll_a_reg, fpa0_reg, fpa1_reg; int dpll_b_reg, fpb0_reg, fpb1_reg; + int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); + break; + case 3: + case 2: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); + break; + } + + if (HAS_PCH_SPLIT(dev)) { dpll_a_reg = PCH_DPLL_A; dpll_b_reg = PCH_DPLL_B; @@ -529,6 +585,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); + /* Cursor state */ + I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); + I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); + I915_WRITE(CURABASE, dev_priv->saveCURABASE); + I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); + I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); + I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); + if (IS_GEN2(dev)) + I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); + return; } @@ -543,16 +609,6 @@ void i915_save_display(struct drm_device *dev) /* Don't save them in KMS mode */ i915_save_modeset_reg(dev); - /* Cursor state */ - dev_priv->saveCURACNTR = I915_READ(CURACNTR); - dev_priv->saveCURAPOS = I915_READ(CURAPOS); - dev_priv->saveCURABASE = I915_READ(CURABASE); - dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); - dev_priv->saveCURBPOS = I915_READ(CURBPOS); - dev_priv->saveCURBBASE = I915_READ(CURBBASE); - if (IS_GEN2(dev)) - dev_priv->saveCURSIZE = I915_READ(CURSIZE); - /* CRT state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveADPA = I915_READ(PCH_ADPA); @@ -657,16 +713,6 @@ void i915_restore_display(struct drm_device *dev) /* Don't restore them in KMS mode */ i915_restore_modeset_reg(dev); - /* Cursor state */ - I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); - I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); - I915_WRITE(CURABASE, dev_priv->saveCURABASE); - I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); - I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); - I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); - if (IS_GEN2(dev)) - I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); - /* CRT state */ if (HAS_PCH_SPLIT(dev)) I915_WRITE(PCH_ADPA, dev_priv->saveADPA); @@ -771,8 +817,14 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveIMR = I915_READ(IMR); } - if (HAS_PCH_SPLIT(dev)) + if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); + if (IS_GEN6(dev)) + gen6_disable_rps(dev); + + /* XXX disabling the clock gating breaks suspend on gm45 + intel_disable_clock_gating(dev); + */ /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); @@ -788,28 +840,6 @@ int i915_save_state(struct drm_device *dev) for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 6: - for (i = 0; i < 16; i++) - dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); - break; - case 3: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); - case 2: - for (i = 0; i < 8; i++) - dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); - break; - - } - return 0; } @@ -823,27 +853,6 @@ int i915_restore_state(struct drm_device *dev) /* Hardware status page */ I915_WRITE(HWS_PGA, dev_priv->saveHWS); - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 6: - for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); - break; - case 3: - case 2: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); - break; - } - i915_restore_display(dev); /* Interrupt state */ @@ -860,13 +869,16 @@ int i915_restore_state(struct drm_device *dev) } /* Clock gating state */ - intel_init_clock_gating(dev); + intel_enable_clock_gating(dev); - if (HAS_PCH_SPLIT(dev)) { + if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); intel_init_emon(dev); } + if (IS_GEN6(dev)) + gen6_enable_rps(dev_priv); + /* Cache mode state */ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fea97a21cc14..7f0fc3ed61aa 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -6,6 +6,7 @@ #include <linux/tracepoint.h> #include <drm/drmP.h> +#include "i915_drv.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM i915 @@ -16,18 +17,18 @@ TRACE_EVENT(i915_gem_object_create, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, size) ), TP_fast_assign( __entry->obj = obj; - __entry->size = obj->size; + __entry->size = obj->base.size; ), TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) @@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create, TRACE_EVENT(i915_gem_object_bind, - TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), + TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), - TP_ARGS(obj, gtt_offset), + TP_ARGS(obj, gtt_offset, mappable), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, gtt_offset) + __field(bool, mappable) ), TP_fast_assign( __entry->obj = obj; __entry->gtt_offset = gtt_offset; + __entry->mappable = mappable; ), - TP_printk("obj=%p, gtt_offset=%08x", - __entry->obj, __entry->gtt_offset) + TP_printk("obj=%p, gtt_offset=%08x%s", + __entry->obj, __entry->gtt_offset, + __entry->mappable ? ", mappable" : "") ); TRACE_EVENT(i915_gem_object_change_domain, - TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), + TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), TP_ARGS(obj, old_read_domains, old_write_domain), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, read_domains) __field(u32, write_domain) ), TP_fast_assign( __entry->obj = obj; - __entry->read_domains = obj->read_domains | (old_read_domains << 16); - __entry->write_domain = obj->write_domain | (old_write_domain << 16); + __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); + __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); ), TP_printk("obj=%p, read=%04x, write=%04x", @@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain, __entry->read_domains, __entry->write_domain) ); -TRACE_EVENT(i915_gem_object_get_fence, - - TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), - - TP_ARGS(obj, fence, tiling_mode), - - TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) - __field(int, fence) - __field(int, tiling_mode) - ), - - TP_fast_assign( - __entry->obj = obj; - __entry->fence = fence; - __entry->tiling_mode = tiling_mode; - ), - - TP_printk("obj=%p, fence=%d, tiling=%d", - __entry->obj, __entry->fence, __entry->tiling_mode) -); - DECLARE_EVENT_CLASS(i915_gem_object, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( @@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object, DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); @@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end, ); TRACE_EVENT(i915_flip_request, - TP_PROTO(int plane, struct drm_gem_object *obj), + TP_PROTO(int plane, struct drm_i915_gem_object *obj), TP_ARGS(plane, obj), TP_STRUCT__entry( __field(int, plane) - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( @@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request, ); TRACE_EVENT(i915_flip_complete, - TP_PROTO(int plane, struct drm_gem_object *obj), + TP_PROTO(int plane, struct drm_i915_gem_object *obj), TP_ARGS(plane, obj), TP_STRUCT__entry( __field(int, plane) - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( @@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete, TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) ); +TRACE_EVENT(i915_reg_rw, + TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len), + + TP_ARGS(cmd, reg, val, len), + + TP_STRUCT__entry( + __field(int, cmd) + __field(uint32_t, reg) + __field(uint64_t, val) + __field(int, len) + ), + + TP_fast_assign( + __entry->cmd = cmd; + __entry->reg = reg; + __entry->val = (uint64_t)val; + __entry->len = len; + ), + + TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d", + __entry->cmd, __entry->reg, __entry->val, __entry->len) +); + #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 65c88f9ba12c..2cb8e0b9f1ee 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -190,37 +190,6 @@ out: kfree(output.pointer); } -static int intel_dsm_switchto(enum vga_switcheroo_client_id id) -{ - return 0; -} - -static int intel_dsm_power_state(enum vga_switcheroo_client_id id, - enum vga_switcheroo_state state) -{ - return 0; -} - -static int intel_dsm_init(void) -{ - return 0; -} - -static int intel_dsm_get_client_id(struct pci_dev *pdev) -{ - if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) - return VGA_SWITCHEROO_IGD; - else - return VGA_SWITCHEROO_DIS; -} - -static struct vga_switcheroo_handler intel_dsm_handler = { - .switchto = intel_dsm_switchto, - .power_state = intel_dsm_power_state, - .init = intel_dsm_init, - .get_client_id = intel_dsm_get_client_id, -}; - static bool intel_dsm_pci_probe(struct pci_dev *pdev) { acpi_handle dhandle, intel_handle; @@ -276,11 +245,8 @@ void intel_register_dsm_handler(void) { if (!intel_dsm_detect()) return; - - vga_switcheroo_register_handler(&intel_dsm_handler); } void intel_unregister_dsm_handler(void) { - vga_switcheroo_unregister_handler(); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bee24b1a58e8..880659680d0a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .find_pll = intel_find_pll_ironlake_dp, }; -static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) +static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, + int refclk) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; - int refclk = 120; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) - refclk = 100; - if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) { /* LVDS dual channel */ - if (refclk == 100) + if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; else limit = &intel_limits_ironlake_dual_lvds; } else { - if (refclk == 100) + if (refclk == 100000) limit = &intel_limits_ironlake_single_lvds_100m; else limit = &intel_limits_ironlake_single_lvds; @@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) return limit; } -static const intel_limit_t *intel_limit(struct drm_crtc *crtc) +static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) { struct drm_device *dev = crtc->dev; const intel_limit_t *limit; if (HAS_PCH_SPLIT(dev)) - limit = intel_ironlake_limit(crtc); + limit = intel_ironlake_limit(crtc, refclk); else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); } else if (IS_PINEVIEW(dev)) { @@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type) * the given connectors. */ -static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) +static bool intel_PLL_is_valid(struct drm_device *dev, + const intel_limit_t *limit, + const intel_clock_t *clock) { - const intel_limit_t *limit = intel_limit (crtc); - struct drm_device *dev = crtc->dev; - if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid ("p1 out of range\n"); if (clock->p < limit->p.min || limit->p.max < clock->p) @@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int this_err; intel_clock(dev, refclk, &clock); - - if (!intel_PLL_is_valid(crtc, &clock)) + if (!intel_PLL_is_valid(dev, limit, + &clock)) continue; this_err = abs(clock.dot - target); @@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int this_err; intel_clock(dev, refclk, &clock); - if (!intel_PLL_is_valid(crtc, &clock)) + if (!intel_PLL_is_valid(dev, limit, + &clock)) continue; - this_err = abs(clock.dot - target) ; + + this_err = abs(clock.dot - target); if (this_err < err_most) { *best_clock = clock; err_most = this_err; @@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane, i; u32 fbc_ctl, fbc_ctl2; if (fb->pitch == dev_priv->cfb_pitch && - obj_priv->fence_reg == dev_priv->cfb_fence && + obj->fence_reg == dev_priv->cfb_fence && intel_crtc->plane == dev_priv->cfb_plane && I915_READ(FBC_CONTROL) & FBC_CTL_EN) return; @@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* FBC_CTL wants 64B units */ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; @@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) fbc_ctl2 |= FBC_CTL_CPU_FENCE; I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_FENCE_OFF, crtc->y); @@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) fbc_ctl |= dev_priv->cfb_fence; I915_WRITE(FBC_CONTROL, fbc_ctl); @@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; @@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && - dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_fence == obj->fence_reg && dev_priv->cfb_plane == intel_crtc->plane && dev_priv->cfb_y == crtc->y) return; @@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) } dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; dev_priv->cfb_y = crtc->y; dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); } else { @@ -1221,7 +1219,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; @@ -1230,9 +1228,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && - dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_fence == obj->fence_reg && dev_priv->cfb_plane == intel_crtc->plane && - dev_priv->cfb_offset == obj_priv->gtt_offset && + dev_priv->cfb_offset == obj->gtt_offset && dev_priv->cfb_y == crtc->y) return; @@ -1242,14 +1240,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) } dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; - dev_priv->cfb_offset = obj_priv->gtt_offset; + dev_priv->cfb_offset = obj->gtt_offset; dev_priv->cfb_y = crtc->y; dpfc_ctl &= DPFC_RESERVED; dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); } else { @@ -1260,10 +1258,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); - I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); + I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + if (IS_GEN6(dev)) { + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); + } + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); } @@ -1345,7 +1349,7 @@ static void intel_update_fbc(struct drm_device *dev) struct intel_crtc *intel_crtc; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; DRM_DEBUG_KMS("\n"); @@ -1384,9 +1388,9 @@ static void intel_update_fbc(struct drm_device *dev) intel_crtc = to_intel_crtc(crtc); fb = crtc->fb; intel_fb = to_intel_framebuffer(fb); - obj_priv = to_intel_bo(intel_fb->obj); + obj = intel_fb->obj; - if (intel_fb->obj->size > dev_priv->cfb_size) { + if (intel_fb->obj->base.size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " "compression\n"); dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; @@ -1410,7 +1414,7 @@ static void intel_update_fbc(struct drm_device *dev) dev_priv->no_fbc_reason = FBC_BAD_PLANE; goto out_disable; } - if (obj_priv->tiling_mode != I915_TILING_X) { + if (obj->tiling_mode != I915_TILING_X) { DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); dev_priv->no_fbc_reason = FBC_NOT_TILED; goto out_disable; @@ -1433,14 +1437,13 @@ out_disable: int intel_pin_and_fence_fb_obj(struct drm_device *dev, - struct drm_gem_object *obj, - bool pipelined) + struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 alignment; int ret; - switch (obj_priv->tiling_mode) { + switch (obj->tiling_mode) { case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) alignment = 128 * 1024; @@ -1461,7 +1464,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, BUG(); } - ret = i915_gem_object_pin(obj, alignment); + ret = i915_gem_object_pin(obj, alignment, true); if (ret) return ret; @@ -1474,9 +1477,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, * framebuffer compression. For simplicity, we always install * a fence as the cost is not that onerous. */ - if (obj_priv->fence_reg == I915_FENCE_REG_NONE && - obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj, false); + if (obj->tiling_mode != I915_TILING_NONE) { + ret = i915_gem_object_get_fence(obj, pipelined, false); if (ret) goto err_unpin; } @@ -1497,8 +1499,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; unsigned long Start, Offset; u32 dspcntr; @@ -1515,7 +1516,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - obj_priv = to_intel_bo(obj); reg = DSPCNTR(plane); dspcntr = I915_READ(reg); @@ -1540,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; @@ -1552,7 +1552,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, I915_WRITE(reg, dspcntr); - Start = obj_priv->gtt_offset; + Start = obj->gtt_offset; Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", @@ -1598,7 +1598,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, to_intel_framebuffer(crtc->fb)->obj, - false); + NULL); if (ret != 0) { mutex_unlock(&dev->struct_mutex); return ret; @@ -1606,18 +1606,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj_priv->pending_flip) == 0); + atomic_read(&obj->pending_flip) == 0); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. */ - ret = i915_gem_object_flush_gpu(obj_priv, false); + ret = i915_gem_object_flush_gpu(obj, false); if (ret) { i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); @@ -1633,8 +1632,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - if (old_fb) + if (old_fb) { + intel_wait_for_vblank(dev, intel_crtc->pipe); i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); + } mutex_unlock(&dev->struct_mutex); @@ -1996,31 +1997,31 @@ static void intel_flush_display_plane(struct drm_device *dev, static void intel_clear_scanline_wait(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; u32 tmp; if (IS_GEN2(dev)) /* Can't break the hang on i8xx */ return; - tmp = I915_READ(PRB0_CTL); - if (tmp & RING_WAIT) { - I915_WRITE(PRB0_CTL, tmp); - POSTING_READ(PRB0_CTL); - } + ring = LP_RING(dev_priv); + tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) + I915_WRITE_CTL(ring, tmp); } static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_i915_private *dev_priv; if (crtc->fb == NULL) return; - obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); + obj = to_intel_framebuffer(crtc->fb)->obj; dev_priv = crtc->dev->dev_private; wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj_priv->pending_flip) == 0); + atomic_read(&obj->pending_flip) == 0); } static void ironlake_crtc_enable(struct drm_crtc *crtc) @@ -2120,9 +2121,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK); + TRANS_DP_SYNC_MASK | + TRANS_DP_BPC_MASK); temp |= (TRANS_DP_OUTPUT_ENABLE | TRANS_DP_ENH_FRAMING); + temp |= TRANS_DP_8BPC; if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; @@ -2712,27 +2715,19 @@ fdi_reduce_ratio(u32 *num, u32 *den) } } -#define DATA_N 0x800000 -#define LINK_N 0x80000 - static void ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct fdi_m_n *m_n) { - u64 temp; - m_n->tu = 64; /* default size */ - temp = (u64) DATA_N * pixel_clock; - temp = div_u64(temp, link_clock); - m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); - m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */ - m_n->gmch_n = DATA_N; + /* BUG_ON(pixel_clock > INT_MAX / 36); */ + m_n->gmch_m = bits_per_pixel * pixel_clock; + m_n->gmch_n = link_clock * nlanes * 8; fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); - temp = (u64) LINK_N * pixel_clock; - m_n->link_m = div_u64(temp, link_clock); - m_n->link_n = LINK_N; + m_n->link_m = pixel_clock; + m_n->link_n = link_clock; fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); } @@ -2856,6 +2851,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { ILK_FIFO_LINE_SIZE }; +static struct intel_watermark_params sandybridge_display_wm_info = { + SNB_DISPLAY_FIFO, + SNB_DISPLAY_MAXWM, + SNB_DISPLAY_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; + +static struct intel_watermark_params sandybridge_cursor_wm_info = { + SNB_CURSOR_FIFO, + SNB_CURSOR_MAXWM, + SNB_CURSOR_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; + +static struct intel_watermark_params sandybridge_display_srwm_info = { + SNB_DISPLAY_SR_FIFO, + SNB_DISPLAY_MAX_SRWM, + SNB_DISPLAY_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; + +static struct intel_watermark_params sandybridge_cursor_srwm_info = { + SNB_CURSOR_SR_FIFO, + SNB_CURSOR_MAX_SRWM, + SNB_CURSOR_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; + + /** * intel_calculate_wm - calculate watermark level * @clock_in_khz: pixel clock @@ -3389,6 +3417,10 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, static bool ironlake_compute_wm0(struct drm_device *dev, int pipe, + const struct intel_watermark_params *display, + int display_latency, + const struct intel_watermark_params *cursor, + int cursor_latency, int *plane_wm, int *cursor_wm) { @@ -3406,22 +3438,20 @@ static bool ironlake_compute_wm0(struct drm_device *dev, pixel_size = crtc->fb->bits_per_pixel / 8; /* Use the small buffer method to calculate plane watermark */ - entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000; - entries = DIV_ROUND_UP(entries, - ironlake_display_wm_info.cacheline_size); - *plane_wm = entries + ironlake_display_wm_info.guard_size; - if (*plane_wm > (int)ironlake_display_wm_info.max_wm) - *plane_wm = ironlake_display_wm_info.max_wm; + entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *plane_wm = entries + display->guard_size; + if (*plane_wm > (int)display->max_wm) + *plane_wm = display->max_wm; /* Use the large buffer method to calculate cursor watermark */ line_time_us = ((htotal * 1000) / clock); - line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; + line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000; entries = line_count * 64 * pixel_size; - entries = DIV_ROUND_UP(entries, - ironlake_cursor_wm_info.cacheline_size); - *cursor_wm = entries + ironlake_cursor_wm_info.guard_size; - if (*cursor_wm > ironlake_cursor_wm_info.max_wm) - *cursor_wm = ironlake_cursor_wm_info.max_wm; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + if (*cursor_wm > (int)cursor->max_wm) + *cursor_wm = (int)cursor->max_wm; return true; } @@ -3436,7 +3466,12 @@ static void ironlake_update_wm(struct drm_device *dev, int tmp; enabled = 0; - if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) { + if (ironlake_compute_wm0(dev, 0, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEA_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); DRM_DEBUG_KMS("FIFO watermarks For pipe A -" @@ -3445,7 +3480,12 @@ static void ironlake_update_wm(struct drm_device *dev, enabled++; } - if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) { + if (ironlake_compute_wm0(dev, 1, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEB_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); DRM_DEBUG_KMS("FIFO watermarks For pipe B -" @@ -3459,7 +3499,7 @@ static void ironlake_update_wm(struct drm_device *dev, * display plane is used. */ tmp = 0; - if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) { + if (enabled == 1) { unsigned long line_time_us; int small, large, plane_fbc; int sr_clock, entries; @@ -3511,6 +3551,197 @@ static void ironlake_update_wm(struct drm_device *dev, /* XXX setup WM2 and WM3 */ } +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + * + * Also return true if all of those watermark values is 0, which is set by + * sandybridge_compute_srwm, to indicate the latency is ZERO. + */ +static bool sandybridge_check_srwm(struct drm_device *dev, int level, + int fbc_wm, int display_wm, int cursor_wm) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," + " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); + + if (fbc_wm > SNB_FBC_MAX_SRWM) { + DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", + fbc_wm, SNB_FBC_MAX_SRWM, level); + + /* fbc has it's own way to disable FBC WM */ + I915_WRITE(DISP_ARB_CTL, + I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); + return false; + } + + if (display_wm > SNB_DISPLAY_MAX_SRWM) { + DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", + display_wm, SNB_DISPLAY_MAX_SRWM, level); + return false; + } + + if (cursor_wm > SNB_CURSOR_MAX_SRWM) { + DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", + cursor_wm, SNB_CURSOR_MAX_SRWM, level); + return false; + } + + if (!(fbc_wm || display_wm || cursor_wm)) { + DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); + return false; + } + + return true; +} + +/* + * Compute watermark values of WM[1-3], + */ +static bool sandybridge_compute_srwm(struct drm_device *dev, int level, + int hdisplay, int htotal, int pixel_size, + int clock, int latency_ns, int *fbc_wm, + int *display_wm, int *cursor_wm) +{ + + unsigned long line_time_us; + int small, large; + int entries; + int line_count, line_size; + + if (!latency_ns) { + *fbc_wm = *display_wm = *cursor_wm = 0; + return false; + } + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), + sandybridge_display_srwm_info.cacheline_size); + *display_wm = entries + sandybridge_display_srwm_info.guard_size; + + /* + * Spec said: + * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 + */ + *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, + sandybridge_cursor_srwm_info.cacheline_size); + *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size; + + return sandybridge_check_srwm(dev, level, + *fbc_wm, *display_wm, *cursor_wm); +} + +static void sandybridge_update_wm(struct drm_device *dev, + int planea_clock, int planeb_clock, + int hdisplay, int htotal, + int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY(); + int fbc_wm, plane_wm, cursor_wm, enabled; + int clock; + + enabled = 0; + if (ironlake_compute_wm0(dev, 0, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEA_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled++; + } + + if (ironlake_compute_wm0(dev, 1, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEB_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled++; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + * + * SNB support 3 levels of watermark. + * + * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, + * and disabled in the descending order + * + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (enabled != 1) + return; + + clock = planea_clock ? planea_clock : planeb_clock; + + /* WM1 */ + if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, + clock, SNB_READ_WM1_LATENCY() * 500, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!sandybridge_compute_srwm(dev, 2, + hdisplay, htotal, pixel_size, + clock, SNB_READ_WM2_LATENCY() * 500, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM3 */ + if (!sandybridge_compute_srwm(dev, 3, + hdisplay, htotal, pixel_size, + clock, SNB_READ_WM3_LATENCY() * 500, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM3_LP_ILK, + WM3_LP_EN | + (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); +} + /** * intel_update_watermarks - update FIFO watermark values based on current modes * @@ -3666,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ - limit = intel_limit(crtc); + limit = intel_limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); @@ -3716,6 +3947,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* FDI link */ if (HAS_PCH_SPLIT(dev)) { + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int lane = 0, link_bw, bpp; /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ @@ -3799,6 +4031,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, intel_crtc->fdi_lanes = lane; + if (pixel_multiplier > 1) + link_bw *= pixel_multiplier; ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); } @@ -3860,6 +4094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, reduced_clock.m2; } + /* Enable autotuning of the PLL clock (if permissible) */ + if (HAS_PCH_SPLIT(dev)) { + int factor = 21; + + if (is_lvds) { + if ((dev_priv->lvds_use_ssc && + dev_priv->lvds_ssc_freq == 100) || + (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) + factor = 25; + } else if (is_sdvo && is_tv) + factor = 20; + + if (clock.m1 < factor * clock.n) + fp |= FP_CB_TUNE; + } + dpll = 0; if (!HAS_PCH_SPLIT(dev)) dpll = DPLL_VGA_MODE_DIS; @@ -4074,7 +4324,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { - I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); /* Wait for the clocks to stabilize. */ @@ -4092,13 +4341,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } I915_WRITE(DPLL_MD(pipe), temp); } else { - /* write it again -- the BIOS does, after all */ + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ I915_WRITE(dpll_reg, dpll); } - - /* Wait for the clocks to stabilize. */ - POSTING_READ(dpll_reg); - udelay(150); } intel_crtc->lowfreq_avail = false; @@ -4334,15 +4583,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, } static int intel_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, + struct drm_file *file, uint32_t handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_gem_object *bo; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; uint32_t addr; int ret; @@ -4352,7 +4600,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DRM_DEBUG_KMS("cursor off\n"); addr = 0; - bo = NULL; + obj = NULL; mutex_lock(&dev->struct_mutex); goto finish; } @@ -4363,13 +4611,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - bo = drm_gem_object_lookup(dev, file_priv, handle); - if (!bo) + obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); + if (!obj) return -ENOENT; - obj_priv = to_intel_bo(bo); - - if (bo->size < width * height * 4) { + if (obj->base.size < width * height * 4) { DRM_ERROR("buffer is to small\n"); ret = -ENOMEM; goto fail; @@ -4378,29 +4624,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!dev_priv->info->cursor_needs_physical) { - ret = i915_gem_object_pin(bo, PAGE_SIZE); + if (obj->tiling_mode) { + DRM_ERROR("cursor cannot be tiled\n"); + ret = -EINVAL; + goto fail_locked; + } + + ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); goto fail_locked; } - ret = i915_gem_object_set_to_gtt_domain(bo, 0); + ret = i915_gem_object_set_to_gtt_domain(obj, 0); if (ret) { DRM_ERROR("failed to move cursor bo into the GTT\n"); goto fail_unpin; } - addr = obj_priv->gtt_offset; + ret = i915_gem_object_put_fence(obj); + if (ret) { + DRM_ERROR("failed to move cursor bo into the GTT\n"); + goto fail_unpin; + } + + addr = obj->gtt_offset; } else { int align = IS_I830(dev) ? 16 * 1024 : 256; - ret = i915_gem_attach_phys_object(dev, bo, + ret = i915_gem_attach_phys_object(dev, obj, (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, align); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; } - addr = obj_priv->phys_obj->handle->busaddr; + addr = obj->phys_obj->handle->busaddr; } if (IS_GEN2(dev)) @@ -4409,17 +4667,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, finish: if (intel_crtc->cursor_bo) { if (dev_priv->info->cursor_needs_physical) { - if (intel_crtc->cursor_bo != bo) + if (intel_crtc->cursor_bo != obj) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else i915_gem_object_unpin(intel_crtc->cursor_bo); - drm_gem_object_unreference(intel_crtc->cursor_bo); + drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } mutex_unlock(&dev->struct_mutex); intel_crtc->cursor_addr = addr; - intel_crtc->cursor_bo = bo; + intel_crtc->cursor_bo = obj; intel_crtc->cursor_width = width; intel_crtc->cursor_height = height; @@ -4427,11 +4685,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail_unpin: - i915_gem_object_unpin(bo); + i915_gem_object_unpin(obj); fail_locked: mutex_unlock(&dev->struct_mutex); fail: - drm_gem_object_unreference_unlocked(bo); + drm_gem_object_unreference_unlocked(&obj->base); return ret; } @@ -4742,8 +5000,14 @@ static void intel_gpu_idle_timer(unsigned long arg) struct drm_device *dev = (struct drm_device *)arg; drm_i915_private_t *dev_priv = dev->dev_private; - dev_priv->busy = false; + if (!list_empty(&dev_priv->mm.active_list)) { + /* Still processing requests, so just re-arm the timer. */ + mod_timer(&dev_priv->idle_timer, jiffies + + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); + return; + } + dev_priv->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); } @@ -4754,9 +5018,17 @@ static void intel_crtc_idle_timer(unsigned long arg) struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; struct drm_crtc *crtc = &intel_crtc->base; drm_i915_private_t *dev_priv = crtc->dev->dev_private; + struct intel_framebuffer *intel_fb; - intel_crtc->busy = false; + intel_fb = to_intel_framebuffer(crtc->fb); + if (intel_fb && intel_fb->obj->active) { + /* The framebuffer is still being accessed by the GPU. */ + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); + return; + } + intel_crtc->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); } @@ -4891,7 +5163,7 @@ static void intel_idle_update(struct work_struct *work) * buffer), we'll also mark the display as busy, so we know to increase its * clock frequency. */ -void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) +void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = NULL; @@ -4972,8 +5244,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_lock(&work->dev->struct_mutex); i915_gem_object_unpin(work->old_fb_obj); - drm_gem_object_unreference(work->pending_flip_obj); - drm_gem_object_unreference(work->old_fb_obj); + drm_gem_object_unreference(&work->pending_flip_obj->base); + drm_gem_object_unreference(&work->old_fb_obj->base); + mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -4984,15 +5257,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev, drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_pending_vblank_event *e; - struct timeval now; + struct timeval tnow, tvbl; unsigned long flags; /* Ignore early vblank irqs */ if (intel_crtc == NULL) return; + do_gettimeofday(&tnow); + spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { @@ -5001,26 +5276,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev, } intel_crtc->unpin_work = NULL; - drm_vblank_put(dev, intel_crtc->pipe); if (work->event) { e = work->event; - do_gettimeofday(&now); - e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); - e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; + e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); + + /* Called before vblank count and timestamps have + * been updated for the vblank interval of flip + * completion? Need to increment vblank count and + * add one videorefresh duration to returned timestamp + * to account for this. We assume this happened if we + * get called over 0.9 frame durations after the last + * timestamped vblank. + * + * This calculation can not be used with vrefresh rates + * below 5Hz (10Hz to be on the safe side) without + * promoting to 64 integers. + */ + if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > + 9 * crtc->framedur_ns) { + e->event.sequence++; + tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + + crtc->framedur_ns); + } + + e->event.tv_sec = tvbl.tv_sec; + e->event.tv_usec = tvbl.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); } + drm_vblank_put(dev, intel_crtc->pipe); + spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = to_intel_bo(work->old_fb_obj); + obj = work->old_fb_obj; + atomic_clear_mask(1 << intel_crtc->plane, - &obj_priv->pending_flip.counter); - if (atomic_read(&obj_priv->pending_flip) == 0) + &obj->pending_flip.counter); + if (atomic_read(&obj->pending_flip) == 0) wake_up(&dev_priv->pending_flip_queue); + schedule_work(&work->work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); @@ -5066,8 +5364,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags, offset; @@ -5101,13 +5398,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj, true); + ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) goto cleanup_work; /* Reference the objects for the scheduled work. */ - drm_gem_object_reference(work->old_fb_obj); - drm_gem_object_reference(obj); + drm_gem_object_reference(&work->old_fb_obj->base); + drm_gem_object_reference(&obj->base); crtc->fb = fb; @@ -5115,22 +5412,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (ret) goto cleanup_objs; - /* Block clients from rendering to the new back buffer until - * the flip occurs and the object is no longer visible. - */ - atomic_add(1 << intel_crtc->plane, - &to_intel_bo(work->old_fb_obj)->pending_flip); - - work->pending_flip_obj = obj; - obj_priv = to_intel_bo(obj); - if (IS_GEN3(dev) || IS_GEN2(dev)) { u32 flip_mask; /* Can't queue multiple flips, so wait for the previous * one to finish before executing the next. */ - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) + goto cleanup_objs; + if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else @@ -5140,18 +5431,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ADVANCE_LP_RING(); } + work->pending_flip_obj = obj; + work->enable_stall_check = true; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; - BEGIN_LP_RING(4); - switch(INTEL_INFO(dev)->gen) { + ret = BEGIN_LP_RING(4); + if (ret) + goto cleanup_objs; + + /* Block clients from rendering to the new back buffer until + * the flip occurs and the object is no longer visible. + */ + atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); + + switch (INTEL_INFO(dev)->gen) { case 2: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset + offset); + OUT_RING(obj->gtt_offset + offset); OUT_RING(MI_NOOP); break; @@ -5159,7 +5460,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset + offset); + OUT_RING(obj->gtt_offset + offset); OUT_RING(MI_NOOP); break; @@ -5172,7 +5473,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); + OUT_RING(obj->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. @@ -5186,8 +5487,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, case 6: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - OUT_RING(fb->pitch | obj_priv->tiling_mode); - OUT_RING(obj_priv->gtt_offset); + OUT_RING(fb->pitch | obj->tiling_mode); + OUT_RING(obj->gtt_offset); pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; @@ -5203,8 +5504,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_objs: - drm_gem_object_unreference(work->old_fb_obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&work->old_fb_obj->base); + drm_gem_object_unreference(&obj->base); cleanup_work: mutex_unlock(&dev->struct_mutex); @@ -5236,6 +5537,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { .page_flip = intel_crtc_page_flip, }; +static void intel_sanitize_modesetting(struct drm_device *dev, + int pipe, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg, val; + + if (HAS_PCH_SPLIT(dev)) + return; + + /* Who knows what state these registers were left in by the BIOS or + * grub? + * + * If we leave the registers in a conflicting state (e.g. with the + * display plane reading from the other pipe than the one we intend + * to use) then when we attempt to teardown the active mode, we will + * not disable the pipes and planes in the correct order -- leaving + * a plane reading from a disabled pipe and possibly leading to + * undefined behaviour. + */ + + reg = DSPCNTR(plane); + val = I915_READ(reg); + + if ((val & DISPLAY_PLANE_ENABLE) == 0) + return; + if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) + return; + + /* This display plane is active and attached to the other CPU pipe. */ + pipe = !pipe; + + /* Disable the plane and wait for it to stop reading from the pipe. */ + I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev, plane); + + if (IS_GEN2(dev)) + intel_wait_for_vblank(dev, pipe); + + if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + return; + + /* Switch off the pipe. */ + reg = PIPECONF(pipe); + val = I915_READ(reg); + if (val & PIPECONF_ENABLE) { + I915_WRITE(reg, val & ~PIPECONF_ENABLE); + intel_wait_for_pipe_off(dev, pipe); + } +} static void intel_crtc_init(struct drm_device *dev, int pipe) { @@ -5287,10 +5637,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, (unsigned long)intel_crtc); + + intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; @@ -5336,9 +5688,14 @@ static void intel_setup_outputs(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; bool dpd_is_edp = false; + bool has_lvds = false; if (IS_MOBILE(dev) && !IS_I830(dev)) - intel_lvds_init(dev); + has_lvds = intel_lvds_init(dev); + if (!has_lvds && !HAS_PCH_SPLIT(dev)) { + /* disable the panel fitter on everything but LVDS */ + I915_WRITE(PFIT_CONTROL, 0); + } if (HAS_PCH_SPLIT(dev)) { dpd_is_edp = intel_dpd_is_edp(dev); @@ -5435,19 +5792,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(intel_fb->obj); + drm_gem_object_unreference_unlocked(&intel_fb->obj->base); kfree(intel_fb); } static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, + struct drm_file *file, unsigned int *handle) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_gem_object *object = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb->obj; - return drm_gem_handle_create(file_priv, object, handle); + return drm_gem_handle_create(file, &obj->base, handle); } static const struct drm_framebuffer_funcs intel_fb_funcs = { @@ -5458,12 +5815,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *intel_fb, struct drm_mode_fb_cmd *mode_cmd, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; - if (obj_priv->tiling_mode == I915_TILING_Y) + if (obj->tiling_mode == I915_TILING_Y) return -EINVAL; if (mode_cmd->pitch & 63) @@ -5495,11 +5851,11 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, struct drm_mode_fb_cmd *mode_cmd) { - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; struct intel_framebuffer *intel_fb; int ret; - obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); if (!obj) return ERR_PTR(-ENOENT); @@ -5507,10 +5863,9 @@ intel_user_framebuffer_create(struct drm_device *dev, if (!intel_fb) return ERR_PTR(-ENOMEM); - ret = intel_framebuffer_init(dev, intel_fb, - mode_cmd, obj); + ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference_unlocked(&obj->base); kfree(intel_fb); return ERR_PTR(ret); } @@ -5523,10 +5878,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .output_poll_changed = intel_fb_output_poll_changed, }; -static struct drm_gem_object * +static struct drm_i915_gem_object * intel_alloc_context_page(struct drm_device *dev) { - struct drm_gem_object *ctx; + struct drm_i915_gem_object *ctx; int ret; ctx = i915_gem_alloc_object(dev, 4096); @@ -5536,7 +5891,7 @@ intel_alloc_context_page(struct drm_device *dev) } mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(ctx, 4096); + ret = i915_gem_object_pin(ctx, 4096, true); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; @@ -5554,7 +5909,7 @@ intel_alloc_context_page(struct drm_device *dev) err_unpin: i915_gem_object_unpin(ctx); err_unref: - drm_gem_object_unreference(ctx); + drm_gem_object_unreference(&ctx->base); mutex_unlock(&dev->struct_mutex); return NULL; } @@ -5666,6 +6021,25 @@ void ironlake_disable_drps(struct drm_device *dev) } +void gen6_set_rps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 swreq; + + swreq = (val & 0x3ff) << 25; + I915_WRITE(GEN6_RPNSWREQ, swreq); +} + +void gen6_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RPNSWREQ, 1 << 31); + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMIER, 0); + I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); +} + static unsigned long intel_pxfreq(u32 vidfreq) { unsigned long freq; @@ -5752,7 +6126,96 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } -void intel_init_clock_gating(struct drm_device *dev) +void gen6_enable_rps(struct drm_i915_private *dev_priv) +{ + int i; + + /* Here begins a magic sequence of register writes to enable + * auto-downclocking. + * + * Perhaps there might be some value in exposing these to + * userspace... + */ + I915_WRITE(GEN6_RC_STATE, 0); + __gen6_force_wake_get(dev_priv); + + /* disable the counters and set deterministic thresholds */ + I915_WRITE(GEN6_RC_CONTROL, 0); + + I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); + I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); + I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); + I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); + I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); + + for (i = 0; i < I915_NUM_RINGS; i++) + I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); + + I915_WRITE(GEN6_RC_SLEEP, 0); + I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); + I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); + I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + + I915_WRITE(GEN6_RC_CONTROL, + GEN6_RC_CTL_RC6p_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_EI_MODE(1) | + GEN6_RC_CTL_HW_ENABLE); + + I915_WRITE(GEN6_RPNSWREQ, + GEN6_FREQUENCY(10) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + I915_WRITE(GEN6_RC_VIDEO_FREQ, + GEN6_FREQUENCY(12)); + + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + 18 << 24 | + 6 << 16); + I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000); + I915_WRITE(GEN6_RP_UP_EI, 100000); + I915_WRITE(GEN6_RP_DOWN_EI, 300000); + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_USE_NORMAL_FREQ | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_MAX | + GEN6_RP_DOWN_BUSY_MIN); + + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + + I915_WRITE(GEN6_PCODE_DATA, 0); + I915_WRITE(GEN6_PCODE_MAILBOX, + GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + + /* requires MSI enabled */ + I915_WRITE(GEN6_PMIER, + GEN6_PM_MBOX_EVENT | + GEN6_PM_THERMAL_EVENT | + GEN6_PM_RP_DOWN_TIMEOUT | + GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_UP_EI_EXPIRED | + GEN6_PM_RP_DOWN_EI_EXPIRED); + I915_WRITE(GEN6_PMIMR, 0); + /* enable all PM interrupts */ + I915_WRITE(GEN6_PMINTRMSK, 0); + + __gen6_force_wake_put(dev_priv); +} + +void intel_enable_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5800,9 +6263,9 @@ void intel_init_clock_gating(struct drm_device *dev) I915_WRITE(DISP_ARB_CTL, (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); } /* * Based on the document from hardware guys the following bits @@ -5824,7 +6287,49 @@ void intel_init_clock_gating(struct drm_device *dev) ILK_DPFC_DIS2 | ILK_CLK_FBC); } - return; + + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_ELPIN_409_SELECT); + + if (IS_GEN5(dev)) { + I915_WRITE(_3D_CHICKEN2, + _3D_CHICKEN2_WM_READ_PIPELINED << 16 | + _3D_CHICKEN2_WM_READ_PIPELINED); + } + + if (IS_GEN6(dev)) { + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* + * According to the spec the following bits should be + * set in order to enable memory self-refresh and fbc: + * The bit21 and bit22 of 0x42000 + * The bit21 and bit22 of 0x42004 + * The bit5 and bit7 of 0x42020 + * The bit14 of 0x70180 + * The bit14 of 0x71180 + */ + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE | + ILK_DPFD_CLK_GATE); + + I915_WRITE(DSPACNTR, + I915_READ(DSPACNTR) | + DISPPLANE_TRICKLE_FEED_DISABLE); + I915_WRITE(DSPBCNTR, + I915_READ(DSPBCNTR) | + DISPPLANE_TRICKLE_FEED_DISABLE); + } } else if (IS_G4X(dev)) { uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); @@ -5867,20 +6372,18 @@ void intel_init_clock_gating(struct drm_device *dev) * GPU can automatically power down the render unit if given a page * to save state. */ - if (IS_IRONLAKE_M(dev)) { + if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */ if (dev_priv->renderctx == NULL) dev_priv->renderctx = intel_alloc_context_page(dev); if (dev_priv->renderctx) { - struct drm_i915_gem_object *obj_priv; - obj_priv = to_intel_bo(dev_priv->renderctx); - if (obj_priv) { - BEGIN_LP_RING(4); + struct drm_i915_gem_object *obj = dev_priv->renderctx; + if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_SET_CONTEXT); - OUT_RING(obj_priv->gtt_offset | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); + OUT_RING(obj->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); OUT_RING(MI_NOOP); OUT_RING(MI_FLUSH); ADVANCE_LP_RING(); @@ -5890,29 +6393,45 @@ void intel_init_clock_gating(struct drm_device *dev) "Disable RC6\n"); } - if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { - struct drm_i915_gem_object *obj_priv = NULL; - + if (IS_GEN4(dev) && IS_MOBILE(dev)) { + if (dev_priv->pwrctx == NULL) + dev_priv->pwrctx = intel_alloc_context_page(dev); if (dev_priv->pwrctx) { - obj_priv = to_intel_bo(dev_priv->pwrctx); - } else { - struct drm_gem_object *pwrctx; - - pwrctx = intel_alloc_context_page(dev); - if (pwrctx) { - dev_priv->pwrctx = pwrctx; - obj_priv = to_intel_bo(pwrctx); - } - } - - if (obj_priv) { - I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); + struct drm_i915_gem_object *obj = dev_priv->pwrctx; + I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN); I915_WRITE(MCHBAR_RENDER_STANDBY, I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); } } } +void intel_disable_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->renderctx) { + struct drm_i915_gem_object *obj = dev_priv->renderctx; + + I915_WRITE(CCID, 0); + POSTING_READ(CCID); + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + dev_priv->renderctx = NULL; + } + + if (dev_priv->pwrctx) { + struct drm_i915_gem_object *obj = dev_priv->pwrctx; + + I915_WRITE(PWRCTXA, 0); + POSTING_READ(PWRCTXA); + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + dev_priv->pwrctx = NULL; + } +} + /* Set up chip specific display functions */ static void intel_init_display(struct drm_device *dev) { @@ -5925,7 +6444,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.dpms = i9xx_crtc_dpms; if (I915_HAS_FBC(dev)) { - if (IS_IRONLAKE_M(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->display.fbc_enabled = ironlake_fbc_enabled; dev_priv->display.enable_fbc = ironlake_enable_fbc; dev_priv->display.disable_fbc = ironlake_disable_fbc; @@ -5974,6 +6493,14 @@ static void intel_init_display(struct drm_device *dev) "Disable CxSR\n"); dev_priv->display.update_wm = NULL; } + } else if (IS_GEN6(dev)) { + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } } else dev_priv->display.update_wm = NULL; } else if (IS_PINEVIEW(dev)) { @@ -6139,7 +6666,7 @@ void intel_modeset_init(struct drm_device *dev) intel_setup_outputs(dev); - intel_init_clock_gating(dev); + intel_enable_clock_gating(dev); /* Just disable it once at startup */ i915_disable_vga(dev); @@ -6149,6 +6676,9 @@ void intel_modeset_init(struct drm_device *dev) intel_init_emon(dev); } + if (IS_GEN6(dev)) + gen6_enable_rps(dev_priv); + INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, (unsigned long)dev); @@ -6180,28 +6710,12 @@ void intel_modeset_cleanup(struct drm_device *dev) if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - if (dev_priv->renderctx) { - struct drm_i915_gem_object *obj_priv; - - obj_priv = to_intel_bo(dev_priv->renderctx); - I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); - I915_READ(CCID); - i915_gem_object_unpin(dev_priv->renderctx); - drm_gem_object_unreference(dev_priv->renderctx); - } - - if (dev_priv->pwrctx) { - struct drm_i915_gem_object *obj_priv; - - obj_priv = to_intel_bo(dev_priv->pwrctx); - I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); - I915_READ(PWRCTXA); - i915_gem_object_unpin(dev_priv->pwrctx); - drm_gem_object_unreference(dev_priv->pwrctx); - } - if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); + if (IS_GEN6(dev)) + gen6_disable_rps(dev); + + intel_disable_clock_gating(dev); mutex_unlock(&dev->struct_mutex); @@ -6253,3 +6767,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); return 0; } + +#ifdef CONFIG_DEBUG_FS +#include <linux/seq_file.h> + +struct intel_display_error_state { + struct intel_cursor_error_state { + u32 control; + u32 position; + u32 base; + u32 size; + } cursor[2]; + + struct intel_pipe_error_state { + u32 conf; + u32 source; + + u32 htotal; + u32 hblank; + u32 hsync; + u32 vtotal; + u32 vblank; + u32 vsync; + } pipe[2]; + + struct intel_plane_error_state { + u32 control; + u32 stride; + u32 size; + u32 pos; + u32 addr; + u32 surface; + u32 tile_offset; + } plane[2]; +}; + +struct intel_display_error_state * +intel_display_capture_error_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_display_error_state *error; + int i; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (error == NULL) + return NULL; + + for (i = 0; i < 2; i++) { + error->cursor[i].control = I915_READ(CURCNTR(i)); + error->cursor[i].position = I915_READ(CURPOS(i)); + error->cursor[i].base = I915_READ(CURBASE(i)); + + error->plane[i].control = I915_READ(DSPCNTR(i)); + error->plane[i].stride = I915_READ(DSPSTRIDE(i)); + error->plane[i].size = I915_READ(DSPSIZE(i)); + error->plane[i].pos= I915_READ(DSPPOS(i)); + error->plane[i].addr = I915_READ(DSPADDR(i)); + if (INTEL_INFO(dev)->gen >= 4) { + error->plane[i].surface = I915_READ(DSPSURF(i)); + error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); + } + + error->pipe[i].conf = I915_READ(PIPECONF(i)); + error->pipe[i].source = I915_READ(PIPESRC(i)); + error->pipe[i].htotal = I915_READ(HTOTAL(i)); + error->pipe[i].hblank = I915_READ(HBLANK(i)); + error->pipe[i].hsync = I915_READ(HSYNC(i)); + error->pipe[i].vtotal = I915_READ(VTOTAL(i)); + error->pipe[i].vblank = I915_READ(VBLANK(i)); + error->pipe[i].vsync = I915_READ(VSYNC(i)); + } + + return error; +} + +void +intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error) +{ + int i; + + for (i = 0; i < 2; i++) { + seq_printf(m, "Pipe [%d]:\n", i); + seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); + seq_printf(m, " SRC: %08x\n", error->pipe[i].source); + seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); + seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); + seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); + seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); + seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); + seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); + + seq_printf(m, "Plane [%d]:\n", i); + seq_printf(m, " CNTR: %08x\n", error->plane[i].control); + seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); + seq_printf(m, " SIZE: %08x\n", error->plane[i].size); + seq_printf(m, " POS: %08x\n", error->plane[i].pos); + seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); + if (INTEL_INFO(dev)->gen >= 4) { + seq_printf(m, " SURF: %08x\n", error->plane[i].surface); + seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); + } + + seq_printf(m, "Cursor [%d]:\n", i); + seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); + seq_printf(m, " POS: %08x\n", error->cursor[i].position); + seq_printf(m, " BASE: %08x\n", error->cursor[i].base); + } +} +#endif diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index c8e005553310..1dc60408d5b8 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -479,6 +479,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, uint16_t address = algo_data->address; uint8_t msg[5]; uint8_t reply[2]; + unsigned retry; int msg_bytes; int reply_bytes; int ret; @@ -513,14 +514,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, break; } - for (;;) { - ret = intel_dp_aux_ch(intel_dp, - msg, msg_bytes, - reply, reply_bytes); + for (retry = 0; retry < 5; retry++) { + ret = intel_dp_aux_ch(intel_dp, + msg, msg_bytes, + reply, reply_bytes); if (ret < 0) { DRM_DEBUG_KMS("aux_ch failed %d\n", ret); return ret; } + + switch (reply[0] & AUX_NATIVE_REPLY_MASK) { + case AUX_NATIVE_REPLY_ACK: + /* I2C-over-AUX Reply field is only valid + * when paired with AUX ACK. + */ + break; + case AUX_NATIVE_REPLY_NACK: + DRM_DEBUG_KMS("aux_ch native nack\n"); + return -EREMOTEIO; + case AUX_NATIVE_REPLY_DEFER: + udelay(100); + continue; + default: + DRM_ERROR("aux_ch invalid native reply 0x%02x\n", + reply[0]); + return -EREMOTEIO; + } + switch (reply[0] & AUX_I2C_REPLY_MASK) { case AUX_I2C_REPLY_ACK: if (mode == MODE_I2C_READ) { @@ -528,17 +548,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } return reply_bytes - 1; case AUX_I2C_REPLY_NACK: - DRM_DEBUG_KMS("aux_ch nack\n"); + DRM_DEBUG_KMS("aux_i2c nack\n"); return -EREMOTEIO; case AUX_I2C_REPLY_DEFER: - DRM_DEBUG_KMS("aux_ch defer\n"); + DRM_DEBUG_KMS("aux_i2c defer\n"); udelay(100); break; default: - DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); + DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); return -EREMOTEIO; } } + + DRM_ERROR("too many retries, giving up\n"); + return -EREMOTEIO; } static int @@ -584,17 +607,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, mode->clock = dev_priv->panel_fixed_mode->clock; } - /* Just use VBT values for eDP */ - if (is_edp(intel_dp)) { - intel_dp->lane_count = dev_priv->edp.lanes; - intel_dp->link_bw = dev_priv->edp.rate; - adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); - DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", - intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock); - return true; - } - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); @@ -613,6 +625,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } + if (is_edp(intel_dp)) { + /* okay we failed just pick the highest */ + intel_dp->lane_count = max_lane_count; + intel_dp->link_bw = bws[max_clock]; + adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); + DRM_DEBUG_KMS("Force picking display port link bw %02x lane " + "count %d clock %d\n", + intel_dp->link_bw, intel_dp->lane_count, + adjusted_mode->clock); + + return true; + } + return false; } @@ -1087,21 +1112,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp) } static uint32_t -intel_dp_signal_levels(struct intel_dp *intel_dp) +intel_dp_signal_levels(uint8_t train_set, int lane_count) { - struct drm_device *dev = intel_dp->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t signal_levels = 0; - u8 train_set = intel_dp->train_set[0]; - u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; - u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; + uint32_t signal_levels = 0; - if (is_edp(intel_dp)) { - vswing = dev_priv->edp.vswing; - preemphasis = dev_priv->edp.preemphasis; - } - - switch (vswing) { + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: default: signal_levels |= DP_VOLTAGE_0_4; @@ -1116,7 +1131,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp) signal_levels |= DP_VOLTAGE_1_2; break; } - switch (preemphasis) { + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPHASIS_0: default: signal_levels |= DP_PRE_EMPHASIS_0; @@ -1203,18 +1218,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp) } static bool -intel_dp_aux_handshake_required(struct intel_dp *intel_dp) -{ - struct drm_device *dev = intel_dp->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - if (is_edp(intel_dp) && dev_priv->no_aux_handshake) - return false; - - return true; -} - -static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat) @@ -1226,9 +1229,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); - if (!intel_dp_aux_handshake_required(intel_dp)) - return true; - intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); @@ -1261,11 +1261,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) POSTING_READ(intel_dp->output_reg); intel_wait_for_vblank(dev, intel_crtc->pipe); - if (intel_dp_aux_handshake_required(intel_dp)) - /* Write the link configuration data */ - intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, - intel_dp->link_configuration, - DP_LINK_CONFIGURATION_SIZE); + /* Write the link configuration data */ + intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, + intel_dp->link_configuration, + DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) @@ -1283,7 +1282,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1297,37 +1296,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) break; /* Set training pattern 1 */ - udelay(500); - if (intel_dp_aux_handshake_required(intel_dp)) { + udelay(100); + if (!intel_dp_get_link_status(intel_dp)) break; - } else { - if (!intel_dp_get_link_status(intel_dp)) - break; - if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { - clock_recovery = true; - break; - } + if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + clock_recovery = true; + break; + } - /* Check to see if we've tried the max voltage */ - for (i = 0; i < intel_dp->lane_count; i++) - if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) - break; - if (i == intel_dp->lane_count) + /* Check to see if we've tried the max voltage */ + for (i = 0; i < intel_dp->lane_count; i++) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; + if (i == intel_dp->lane_count) + break; - /* Check to see if we've tried the same voltage 5 times */ - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++tries; - if (tries == 5) - break; - } else - tries = 0; - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + /* Check to see if we've tried the same voltage 5 times */ + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) + break; + } else + tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); - } + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); } intel_dp->DP = DP; @@ -1354,7 +1349,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1368,28 +1363,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP_TRAINING_PATTERN_2)) break; - udelay(500); - - if (!intel_dp_aux_handshake_required(intel_dp)) { + udelay(400); + if (!intel_dp_get_link_status(intel_dp)) break; - } else { - if (!intel_dp_get_link_status(intel_dp)) - break; - if (intel_channel_eq_ok(intel_dp)) { - channel_eq = true; - break; - } + if (intel_channel_eq_ok(intel_dp)) { + channel_eq = true; + break; + } - /* Try 5 times */ - if (tries > 5) - break; + /* Try 5 times */ + if (tries > 5) + break; - /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); - ++tries; - } + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); + ++tries; } + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_OFF_CPT; else @@ -1408,6 +1399,9 @@ intel_dp_link_down(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; + if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) + return; + DRM_DEBUG_KMS("\n"); if (is_edp(intel_dp)) { @@ -1430,6 +1424,27 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (is_edp(intel_dp)) DP |= DP_LINK_TRAIN_OFF; + + if (!HAS_PCH_CPT(dev) && + I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); + /* Hardware workaround: leaving our transcoder select + * set to transcoder B while it's off will prevent the + * corresponding HDMI output on transcoder A. + * + * Combine this with another hardware workaround: + * transcoder select bit can only be cleared while the + * port is enabled. + */ + DP &= ~DP_PIPEB_SELECT; + I915_WRITE(intel_dp->output_reg, DP); + + /* Changes to enable or select take place the vblank + * after being written. + */ + intel_wait_for_vblank(dev, intel_crtc->pipe); + } + I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); POSTING_READ(intel_dp->output_reg); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 21551fe74541..d782ad9fd6db 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) struct intel_framebuffer { struct drm_framebuffer base; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; }; struct intel_fbdev { @@ -166,7 +166,7 @@ struct intel_crtc { struct intel_unpin_work *unpin_work; int fdi_lanes; - struct drm_gem_object *cursor_bo; + struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; @@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) struct intel_unpin_work { struct work_struct work; struct drm_device *dev; - struct drm_gem_object *old_fb_obj; - struct drm_gem_object *pending_flip_obj; + struct drm_i915_gem_object *old_fb_obj; + struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; int pending; bool enable_stall_check; @@ -236,8 +236,9 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); -extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); -extern void intel_lvds_init(struct drm_device *dev); +extern void intel_mark_busy(struct drm_device *dev, + struct drm_i915_gem_object *obj); +extern bool intel_lvds_init(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, @@ -293,19 +294,22 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); -extern void intel_init_clock_gating(struct drm_device *dev); +extern void intel_enable_clock_gating(struct drm_device *dev); +extern void intel_disable_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); +extern void gen6_enable_rps(struct drm_i915_private *dev_priv); +extern void gen6_disable_rps(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, - struct drm_gem_object *obj, - bool pipelined); + struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined); extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd *mode_cmd, - struct drm_gem_object *obj); + struct drm_i915_gem_object *obj); extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index ced3eef8da07..67738f32dfd4 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd mode_cmd; - struct drm_gem_object *fbo = NULL; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct device *device = &dev->pdev->dev; int size, ret; @@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev, size = mode_cmd.pitch * mode_cmd.height; size = ALIGN(size, PAGE_SIZE); - fbo = i915_gem_alloc_object(dev, size); - if (!fbo) { + obj = i915_gem_alloc_object(dev, size); + if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } - obj_priv = to_intel_bo(fbo); mutex_lock(&dev->struct_mutex); /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(dev, fbo, false); + ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; @@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, info->par = ifbdev; - ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); + ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; @@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev, else info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); - info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; + info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; - info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, - size); + info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; @@ -164,7 +161,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", fb->width, fb->height, - obj_priv->gtt_offset, fbo); + obj->gtt_offset, obj); mutex_unlock(&dev->struct_mutex); @@ -172,9 +169,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, return 0; out_unpin: - i915_gem_object_unpin(fbo); + i915_gem_object_unpin(obj); out_unref: - drm_gem_object_unreference(fbo); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); out: return ret; @@ -221,7 +218,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) { - drm_gem_object_unreference_unlocked(ifb->obj); + drm_gem_object_unreference_unlocked(&ifb->obj->base); ifb->obj = NULL; } } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 3dba086e7eea..58040f68ed7a 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio) /* On most chips, these bits must be preserved in software. */ if (!IS_I830(dev) && !IS_845G(dev)) - reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | - GPIO_CLOCK_PULLUP_DISABLE); + reserved = I915_READ_NOTRACE(gpio->reg) & + (GPIO_DATA_PULLUP_DISABLE | + GPIO_CLOCK_PULLUP_DISABLE); return reserved; } @@ -96,9 +97,9 @@ static int get_clock(void *data) struct intel_gpio *gpio = data; struct drm_i915_private *dev_priv = gpio->dev_priv; u32 reserved = get_reserved(gpio); - I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); - I915_WRITE(gpio->reg, reserved); - return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; + I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); + I915_WRITE_NOTRACE(gpio->reg, reserved); + return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) @@ -106,9 +107,9 @@ static int get_data(void *data) struct intel_gpio *gpio = data; struct drm_i915_private *dev_priv = gpio->dev_priv; u32 reserved = get_reserved(gpio); - I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); - I915_WRITE(gpio->reg, reserved); - return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; + I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); + I915_WRITE_NOTRACE(gpio->reg, reserved); + return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) @@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high) clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; - I915_WRITE(gpio->reg, reserved | clock_bits); + I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits); POSTING_READ(gpio->reg); } @@ -141,7 +142,7 @@ static void set_data(void *data, int state_high) data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; - I915_WRITE(gpio->reg, reserved | data_bits); + I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits); POSTING_READ(gpio->reg); } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 4324a326f98e..aa2307080be2 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) /** * Sets the power state for the panel. */ -static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) +static void intel_lvds_enable(struct intel_lvds *intel_lvds) { struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) lvds_reg = LVDS; } - if (on) { - I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); - intel_panel_set_backlight(dev, dev_priv->backlight_level); - } else { - dev_priv->backlight_level = intel_panel_get_backlight(dev); - - intel_panel_set_backlight(dev, 0); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); + I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); - if (intel_lvds->pfit_control) { - if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) - DRM_ERROR("timed out waiting for panel to power off\n"); - I915_WRITE(PFIT_CONTROL, 0); - intel_lvds->pfit_control = 0; + if (intel_lvds->pfit_dirty) { + /* + * Enable automatic panel scaling so that non-native modes + * fill the screen. The panel fitter should only be + * adjusted whilst the pipe is disabled, according to + * register description and PRM. + */ + DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", + intel_lvds->pfit_control, + intel_lvds->pfit_pgm_ratios); + if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { + DRM_ERROR("timed out waiting for panel to power off\n"); + } else { + I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); intel_lvds->pfit_dirty = false; } + } + + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); + POSTING_READ(lvds_reg); - I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); + intel_panel_set_backlight(dev, dev_priv->backlight_level); +} + +static void intel_lvds_disable(struct intel_lvds *intel_lvds) +{ + struct drm_device *dev = intel_lvds->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 ctl_reg, lvds_reg; + + if (HAS_PCH_SPLIT(dev)) { + ctl_reg = PCH_PP_CONTROL; + lvds_reg = PCH_LVDS; + } else { + ctl_reg = PP_CONTROL; + lvds_reg = LVDS; } + + dev_priv->backlight_level = intel_panel_get_backlight(dev); + intel_panel_set_backlight(dev, 0); + + I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); + + if (intel_lvds->pfit_control) { + if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) + DRM_ERROR("timed out waiting for panel to power off\n"); + + I915_WRITE(PFIT_CONTROL, 0); + intel_lvds->pfit_dirty = true; + } + + I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); POSTING_READ(lvds_reg); } @@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) struct intel_lvds *intel_lvds = to_intel_lvds(encoder); if (mode == DRM_MODE_DPMS_ON) - intel_lvds_set_power(intel_lvds, true); + intel_lvds_enable(intel_lvds); else - intel_lvds_set_power(intel_lvds, false); + intel_lvds_disable(intel_lvds); /* XXX: We never power down the LVDS pairs. */ } @@ -269,14 +304,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; - pfit_control |= PFIT_ENABLE; /* 965+ is easy, it does everything in hw */ if (scaled_width > scaled_height) - pfit_control |= PFIT_SCALING_PILLAR; + pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR; else if (scaled_width < scaled_height) - pfit_control |= PFIT_SCALING_LETTER; - else - pfit_control |= PFIT_SCALING_AUTO; + pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER; + else if (adjusted_mode->hdisplay != mode->hdisplay) + pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; } else { u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; @@ -323,13 +357,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * Full scaling, even if it changes the aspect ratio. * Fortunately this is all done for us in hw. */ - pfit_control |= PFIT_ENABLE; - if (INTEL_INFO(dev)->gen >= 4) - pfit_control |= PFIT_SCALING_AUTO; - else - pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); + if (mode->vdisplay != adjusted_mode->vdisplay || + mode->hdisplay != adjusted_mode->hdisplay) { + pfit_control |= PFIT_ENABLE; + if (INTEL_INFO(dev)->gen >= 4) + pfit_control |= PFIT_SCALING_AUTO; + else + pfit_control |= (VERT_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_AUTO_SCALE | + HORIZ_INTERP_BILINEAR); + } break; default: @@ -411,43 +449,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder) /* Always do a full power on as we do not know what state * we were left in. */ - intel_lvds_set_power(intel_lvds, true); + intel_lvds_enable(intel_lvds); } static void intel_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - /* * The LVDS pin pair will already have been turned on in the * intel_crtc_mode_set since it has a large impact on the DPLL * settings. */ - - if (HAS_PCH_SPLIT(dev)) - return; - - if (!intel_lvds->pfit_dirty) - return; - - /* - * Enable automatic panel scaling so that non-native modes fill the - * screen. Should be enabled before the pipe is enabled, according to - * register description and PRM. - */ - DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", - intel_lvds->pfit_control, - intel_lvds->pfit_pgm_ratios); - if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) - DRM_ERROR("timed out waiting for panel to power off\n"); - - I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); - I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); - intel_lvds->pfit_dirty = false; } /** @@ -837,7 +850,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin) * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ -void intel_lvds_init(struct drm_device *dev) +bool intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds *intel_lvds; @@ -853,37 +866,37 @@ void intel_lvds_init(struct drm_device *dev) /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) - return; + return false; pin = GMBUS_PORT_PANEL; if (!lvds_is_present_in_vbt(dev, &pin)) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); - return; + return false; } if (HAS_PCH_SPLIT(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) - return; + return false; if (dev_priv->edp.support) { DRM_DEBUG_KMS("disable LVDS for eDP support\n"); - return; + return false; } } if (!intel_lvds_ddc_probe(dev, pin)) { DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); - return; + return false; } intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); if (!intel_lvds) { - return; + return false; } intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { kfree(intel_lvds); - return; + return false; } if (!HAS_PCH_SPLIT(dev)) { @@ -904,6 +917,8 @@ void intel_lvds_init(struct drm_device *dev) intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); intel_encoder->crtc_mask = (1 << 1); + if (INTEL_INFO(dev)->gen >= 5) + intel_encoder->crtc_mask |= (1 << 0); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; @@ -1009,10 +1024,18 @@ void intel_lvds_init(struct drm_device *dev) out: if (HAS_PCH_SPLIT(dev)) { u32 pwm; - /* make sure PWM is enabled */ + + pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0; + + /* make sure PWM is enabled and locked to the LVDS pipe */ pwm = I915_READ(BLC_PWM_CPU_CTL2); - pwm |= (PWM_ENABLE | PWM_PIPE_B); - I915_WRITE(BLC_PWM_CPU_CTL2, pwm); + if (pipe == 0 && (pwm & PWM_PIPE_B)) + I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE); + if (pipe) + pwm |= PWM_PIPE_B; + else + pwm &= ~PWM_PIPE_B; + I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE); pwm = I915_READ(BLC_PWM_PCH_CTL1); pwm |= PWM_PCH_ENABLE; @@ -1026,7 +1049,7 @@ out: /* keep the LVDS connector */ dev_priv->int_lvds_connector = connector; drm_sysfs_connector_add(connector); - return; + return true; failed: DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); @@ -1034,4 +1057,5 @@ failed: drm_encoder_cleanup(encoder); kfree(intel_lvds); kfree(intel_connector); + return false; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 9b0d9a867aea..f295a7aaadf9 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev) struct opregion_asle *asle = dev_priv->opregion.asle; if (asle) { - if (IS_MOBILE(dev)) { - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (IS_MOBILE(dev)) intel_enable_asle(dev); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, - irqflags); - } asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ASLE_PFMB_EN; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 02ff0a481f47..3fbb98b948d6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, int ret; BUG_ON(overlay->last_flip_req); - overlay->last_flip_req = - i915_add_request(dev, NULL, request, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - + ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); + if (ret) { + kfree(request); + return ret; + } + overlay->last_flip_req = request->seqno; overlay->flip_tail = tail; ret = i915_do_wait_request(dev, overlay->last_flip_req, true, - &dev_priv->render_ring); + LP_RING(dev_priv)); if (ret) return ret; @@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev) static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_request *request; int pipe_a_quirk = 0; int ret; @@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay) goto out; } - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) { + kfree(request); + goto out; + } + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); OUT_RING(overlay->flip_addr | OFC_UPDATE); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); @@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, struct drm_i915_gem_request *request; u32 flip_addr = overlay->flip_addr; u32 tmp; + int ret; BUG_ON(!overlay->active); @@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay, if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) { + kfree(request); + return ret; + } OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, request, &dev_priv->render_ring); + ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); + if (ret) { + kfree(request); + return ret; + } + + overlay->last_flip_req = request->seqno; return 0; } static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) { - struct drm_gem_object *obj = &overlay->old_vid_bo->base; + struct drm_i915_gem_object *obj = overlay->old_vid_bo; i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); overlay->old_vid_bo = NULL; } static void intel_overlay_off_tail(struct intel_overlay *overlay) { - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj = overlay->vid_bo; /* never have the overlay hw on without showing a frame */ BUG_ON(!overlay->vid_bo); - obj = &overlay->vid_bo->base; i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); overlay->vid_bo = NULL; overlay->crtc->overlay = NULL; @@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay, bool interruptible) { struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = dev->dev_private; u32 flip_addr = overlay->flip_addr; struct drm_i915_gem_request *request; + int ret; BUG_ON(!overlay->active); @@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay, * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; - BEGIN_LP_RING(6); + ret = BEGIN_LP_RING(6); + if (ret) { + kfree(request); + return ret; + } /* wait for overlay to go idle */ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); @@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, return 0; ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, &dev_priv->render_ring); + interruptible, LP_RING(dev_priv)); if (ret) return ret; @@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) if (request == NULL) return -ENOMEM; - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) { + kfree(request); + return ret; + } + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); @@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params) } static int intel_overlay_do_put_image(struct intel_overlay *overlay, - struct drm_gem_object *new_bo, + struct drm_i915_gem_object *new_bo, struct put_image_params *params) { int ret, tmp_width; struct overlay_registers *regs; bool scale_changed = false; - struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); struct drm_device *dev = overlay->dev; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin(new_bo, PAGE_SIZE); + ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); if (ret != 0) return ret; @@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) goto out_unpin; + ret = i915_gem_object_put_fence(new_bo); + if (ret) + goto out_unpin; + if (!overlay->active) { regs = intel_overlay_map_regs(overlay); if (!regs) { @@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->SWIDTHSW = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); regs->SHEIGHT = params->src_h; - regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; + regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y; regs->OSTRIDE = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { @@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, params->src_w/uv_hscale); regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; - regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; - regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; + regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; + regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; regs->OSTRIDE |= params->stride_UV << 16; } @@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; overlay->old_vid_bo = overlay->vid_bo; - overlay->vid_bo = to_intel_bo(new_bo); + overlay->vid_bo = new_bo; return 0; @@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec) static int check_overlay_src(struct drm_device *dev, struct drm_intel_overlay_put_image *rec, - struct drm_gem_object *new_bo) + struct drm_i915_gem_object *new_bo) { int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); @@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; tmp = rec->stride_Y*rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) + if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; break; @@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; tmp = rec->stride_Y * rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) + if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; tmp = rec->stride_UV * (rec->src_height / uv_vscale); - if (rec->offset_U + tmp > new_bo->size || - rec->offset_V + tmp > new_bo->size) + if (rec->offset_U + tmp > new_bo->base.size || + rec->offset_V + tmp > new_bo->base.size) return -EINVAL; break; } @@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, struct intel_overlay *overlay; struct drm_mode_object *drmmode_obj; struct intel_crtc *crtc; - struct drm_gem_object *new_bo; + struct drm_i915_gem_object *new_bo; struct put_image_params *params; int ret; @@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, } crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); - new_bo = drm_gem_object_lookup(dev, file_priv, - put_image_rec->bo_handle); + new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, + put_image_rec->bo_handle)); if (!new_bo) { ret = -ENOENT; goto out_free; @@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); + if (new_bo->tiling_mode) { + DRM_ERROR("buffer used for overlay image can not be tiled\n"); + ret = -EINVAL; + goto out_unlock; + } + ret = intel_overlay_recover_from_interrupt(overlay, true); if (ret != 0) goto out_unlock; @@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, out_unlock: mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); - drm_gem_object_unreference_unlocked(new_bo); + drm_gem_object_unreference_unlocked(&new_bo->base); out_free: kfree(params); @@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_overlay *overlay; - struct drm_gem_object *reg_bo; + struct drm_i915_gem_object *reg_bo; struct overlay_registers *regs; int ret; @@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev) reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); if (!reg_bo) goto out_free; - overlay->reg_bo = to_intel_bo(reg_bo); + overlay->reg_bo = reg_bo; if (OVERLAY_NEEDS_PHYSICAL(dev)) { ret = i915_gem_attach_phys_object(dev, reg_bo, @@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev) DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; } - overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; + overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; } - overlay->flip_addr = overlay->reg_bo->gtt_offset; + overlay->flip_addr = reg_bo->gtt_offset; ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { @@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev) out_unpin_bo: i915_gem_object_unpin(reg_bo); out_free_bo: - drm_gem_object_unreference(reg_bo); + drm_gem_object_unreference(®_bo->base); out_free: kfree(overlay); return; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 92ff8f385278..7350ec2515c6 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev) return 0; } +static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) +{ + u32 val; + + /* Restore the CTL value if it lost, e.g. GPU reset */ + + if (HAS_PCH_SPLIT(dev_priv->dev)) { + val = I915_READ(BLC_PWM_PCH_CTL2); + if (dev_priv->saveBLC_PWM_CTL2 == 0) { + dev_priv->saveBLC_PWM_CTL2 = val; + } else if (val == 0) { + I915_WRITE(BLC_PWM_PCH_CTL2, + dev_priv->saveBLC_PWM_CTL); + val = dev_priv->saveBLC_PWM_CTL; + } + } else { + val = I915_READ(BLC_PWM_CTL); + if (dev_priv->saveBLC_PWM_CTL == 0) { + dev_priv->saveBLC_PWM_CTL = val; + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + } else if (val == 0) { + I915_WRITE(BLC_PWM_CTL, + dev_priv->saveBLC_PWM_CTL); + I915_WRITE(BLC_PWM_CTL2, + dev_priv->saveBLC_PWM_CTL2); + val = dev_priv->saveBLC_PWM_CTL; + } + } + + return val; +} + u32 intel_panel_get_max_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 max; + max = i915_read_blc_pwm_ctl(dev_priv); + if (max == 0) { + /* XXX add code here to query mode clock or hardware clock + * and program max PWM appropriately. + */ + printk_once(KERN_WARNING "fixme: max PWM is zero.\n"); + return 1; + } + if (HAS_PCH_SPLIT(dev)) { - max = I915_READ(BLC_PWM_PCH_CTL2) >> 16; + max >>= 16; } else { - max = I915_READ(BLC_PWM_CTL); if (IS_PINEVIEW(dev)) { max >>= 17; } else { @@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) max *= 0xff; } - if (max == 0) { - /* XXX add code here to query mode clock or hardware clock - * and program max PWM appropriately. - */ - DRM_ERROR("fixme: max PWM is zero.\n"); - max = 1; - } - DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); return max; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b83306f9244b..56bc95c056dd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) } static void -render_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, +render_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 cmd; @@ -109,79 +109,83 @@ render_ring_flush(struct drm_device *dev, if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) cmd |= MI_EXE_FLUSH; + if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && + (IS_G4X(dev) || IS_GEN5(dev))) + cmd |= MI_INVALIDATE_ISP; + #if WATCH_EXEC DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); #endif - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, cmd); - intel_ring_emit(dev, ring, MI_NOOP); - intel_ring_advance(dev, ring); + if (intel_ring_begin(ring, 2) == 0) { + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } } } -static void ring_write_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, +static void ring_write_tail(struct intel_ring_buffer *ring, u32 value) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; I915_WRITE_TAIL(ring, value); } -u32 intel_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) +u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? + drm_i915_private_t *dev_priv = ring->dev->dev_private; + u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? RING_ACTHD(ring->mmio_base) : ACTHD; return I915_READ(acthd_reg); } -static int init_ring_common(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int init_ring_common(struct intel_ring_buffer *ring) { + drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj = ring->obj; u32 head; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - obj_priv = to_intel_bo(ring->gem_object); /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); - ring->write_tail(dev, ring, 0); + ring->write_tail(ring, 0); /* Initialize the ring. */ - I915_WRITE_START(ring, obj_priv->gtt_offset); + I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ if (head != 0) { - DRM_ERROR("%s head not reset to zero " - "ctl %08x head %08x tail %08x start %08x\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); + DRM_DEBUG_KMS("%s head not reset to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); I915_WRITE_HEAD(ring, 0); - DRM_ERROR("%s head forced to zero " - "ctl %08x head %08x tail %08x start %08x\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); + if (I915_READ_HEAD(ring) & HEAD_ADDR) { + DRM_ERROR("failed to set %s head to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); + } } I915_WRITE_CTL(ring, - ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) + ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_REPORT_64K | RING_VALID); - head = I915_READ_HEAD(ring) & HEAD_ADDR; /* If the head is still not zero, the ring is dead */ - if (head != 0) { + if ((I915_READ_CTL(ring) & RING_VALID) == 0 || + I915_READ_START(ring) != obj->gtt_offset || + (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, @@ -192,8 +196,8 @@ static int init_ring_common(struct drm_device *dev, return -EIO; } - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_kernel_lost_context(dev); + if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) + i915_kernel_lost_context(ring->dev); else { ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; @@ -201,335 +205,500 @@ static int init_ring_common(struct drm_device *dev, if (ring->space < 0) ring->space += ring->size; } + return 0; } -static int init_render_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) +/* + * 965+ support PIPE_CONTROL commands, which provide finer grained control + * over cache flushing. + */ +struct pipe_control { + struct drm_i915_gem_object *obj; + volatile u32 *cpu_page; + u32 gtt_offset; +}; + +static int +init_pipe_control(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - int ret = init_ring_common(dev, ring); - int mode; + struct pipe_control *pc; + struct drm_i915_gem_object *obj; + int ret; + + if (ring->private) + return 0; + + pc = kmalloc(sizeof(*pc), GFP_KERNEL); + if (!pc) + return -ENOMEM; + + obj = i915_gem_alloc_object(ring->dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate seqno page\n"); + ret = -ENOMEM; + goto err; + } + obj->agp_type = AGP_USER_CACHED_MEMORY; + + ret = i915_gem_object_pin(obj, 4096, true); + if (ret) + goto err_unref; + + pc->gtt_offset = obj->gtt_offset; + pc->cpu_page = kmap(obj->pages[0]); + if (pc->cpu_page == NULL) + goto err_unpin; + + pc->obj = obj; + ring->private = pc; + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(&obj->base); +err: + kfree(pc); + return ret; +} + +static void +cleanup_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + struct drm_i915_gem_object *obj; + + if (!ring->private) + return; + + obj = pc->obj; + kunmap(obj->pages[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + + kfree(pc); + ring->private = NULL; +} + +static int init_render_ring(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = init_ring_common(ring); if (INTEL_INFO(dev)->gen > 3) { - mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; + int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; if (IS_GEN6(dev)) mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; I915_WRITE(MI_MODE, mode); } + + if (INTEL_INFO(dev)->gen >= 6) { + } else if (IS_GEN5(dev)) { + ret = init_pipe_control(ring); + if (ret) + return ret; + } + return ret; } -#define PIPE_CONTROL_FLUSH(addr) \ +static void render_ring_cleanup(struct intel_ring_buffer *ring) +{ + if (!ring->private) + return; + + cleanup_pipe_control(ring); +} + +static void +update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int id; + + /* + * cs -> 1 = vcs, 0 = bcs + * vcs -> 1 = bcs, 0 = cs, + * bcs -> 1 = cs, 0 = vcs. + */ + id = ring - dev_priv->ring; + id += 2 - i; + id %= 3; + + intel_ring_emit(ring, + MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_REGISTER | + MI_SEMAPHORE_UPDATE); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, + RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); +} + +static int +gen6_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + u32 seqno; + int ret; + + ret = intel_ring_begin(ring, 10); + if (ret) + return ret; + + seqno = i915_gem_get_seqno(ring->dev); + update_semaphore(ring, 0, seqno); + update_semaphore(ring, 1, seqno); + + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + + *result = seqno; + return 0; +} + +int +intel_ring_sync(struct intel_ring_buffer *ring, + struct intel_ring_buffer *to, + u32 seqno) +{ + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, + MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_REGISTER | + intel_ring_sync_index(ring, to) << 17 | + MI_SEMAPHORE_COMPARE); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + +#define PIPE_CONTROL_FLUSH(ring__, addr__) \ do { \ - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ + intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ PIPE_CONTROL_DEPTH_STALL | 2); \ - OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ - OUT_RING(0); \ - OUT_RING(0); \ + intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ + intel_ring_emit(ring__, 0); \ + intel_ring_emit(ring__, 0); \ } while (0) -/** - * Creates a new sequence number, emitting a write of it to the status page - * plus an interrupt, which will trigger i915_user_interrupt_handler. - * - * Must be called with struct_lock held. - * - * Returned sequence numbers are nonzero on success. - */ -static u32 -render_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 flush_domains) +static int +pc_render_add_request(struct intel_ring_buffer *ring, + u32 *result) { - drm_i915_private_t *dev_priv = dev->dev_private; - u32 seqno; + struct drm_device *dev = ring->dev; + u32 seqno = i915_gem_get_seqno(dev); + struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; + int ret; - seqno = i915_gem_get_seqno(dev); - - if (IS_GEN6(dev)) { - BEGIN_LP_RING(6); - OUT_RING(GFX_OP_PIPE_CONTROL | 3); - OUT_RING(PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | - PIPE_CONTROL_NOTIFY); - OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - OUT_RING(seqno); - OUT_RING(0); - OUT_RING(0); - ADVANCE_LP_RING(); - } else if (HAS_PIPE_CONTROL(dev)) { - u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; + /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently + * incoherent with writes to memory, i.e. completely fubar, + * so we need to use PIPE_NOTIFY instead. + * + * However, we also need to workaround the qword write + * incoherence by flushing the 6 PIPE_NOTIFY buffers out to + * memory before requesting an interrupt. + */ + ret = intel_ring_begin(ring, 32); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + *result = seqno; + return 0; +} - /* - * Workaround qword write incoherence by flushing the - * PIPE_NOTIFY buffers out to memory before requesting - * an interrupt. - */ - BEGIN_LP_RING(32); - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - OUT_RING(seqno); - OUT_RING(0); - PIPE_CONTROL_FLUSH(scratch_addr); - scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(scratch_addr); - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | - PIPE_CONTROL_NOTIFY); - OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - OUT_RING(seqno); - OUT_RING(0); - ADVANCE_LP_RING(); - } else { - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(seqno); +static int +render_ring_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + struct drm_device *dev = ring->dev; + u32 seqno = i915_gem_get_seqno(dev); + int ret; - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); - } - return seqno; + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + + *result = seqno; + return 0; } static u32 -render_ring_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +ring_get_seqno(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - if (HAS_PIPE_CONTROL(dev)) - return ((volatile u32 *)(dev_priv->seqno_page))[0]; - else - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } -static void -render_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +static u32 +pc_render_get_seqno(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; + struct pipe_control *pc = ring->private; + return pc->cpu_page[0]; +} + +static bool +render_ring_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + + if (!dev->irq_enabled) + return false; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { + if (atomic_inc_return(&ring->irq_refcount) == 1) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + ironlake_enable_graphics_irq(dev_priv, + GT_PIPE_NOTIFY | GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + + return true; } static void -render_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +render_ring_put_irq(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; + struct drm_device *dev = ring->dev; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); - if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { + if (atomic_dec_and_test(&ring->irq_refcount)) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + ironlake_disable_graphics_irq(dev_priv, + GT_USER_INTERRUPT | + GT_PIPE_NOTIFY); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } -void intel_ring_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) +void intel_ring_setup_status_page(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - if (IS_GEN6(dev)) { - I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), - ring->status_page.gfx_addr); - I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ - } else { - I915_WRITE(RING_HWS_PGA(ring->mmio_base), - ring->status_page.gfx_addr); - I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ - } - + drm_i915_private_t *dev_priv = ring->dev->dev_private; + u32 mmio = IS_GEN6(ring->dev) ? + RING_HWS_PGA_GEN6(ring->mmio_base) : + RING_HWS_PGA(ring->mmio_base); + I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); + POSTING_READ(mmio); } static void -bsd_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) +bsd_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, MI_FLUSH); - intel_ring_emit(dev, ring, MI_NOOP); - intel_ring_advance(dev, ring); -} + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; -static int init_bsd_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - return init_ring_common(dev, ring); + if (intel_ring_begin(ring, 2) == 0) { + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } } -static u32 -ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 flush_domains) +static int +ring_add_request(struct intel_ring_buffer *ring, + u32 *result) { u32 seqno; + int ret; - seqno = i915_gem_get_seqno(dev); + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - intel_ring_begin(dev, ring, 4); - intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(dev, ring, - I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(dev, ring, seqno); - intel_ring_emit(dev, ring, MI_USER_INTERRUPT); - intel_ring_advance(dev, ring); + seqno = i915_gem_get_seqno(ring->dev); - DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); - return seqno; + DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); + *result = seqno; + return 0; } -static void -bsd_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +static bool +ring_get_irq(struct intel_ring_buffer *ring, u32 flag) { - /* do nothing */ + struct drm_device *dev = ring->dev; + + if (!dev->irq_enabled) + return false; + + if (atomic_inc_return(&ring->irq_refcount) == 1) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_enable_graphics_irq(dev_priv, flag); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } + + return true; } + static void -bsd_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +ring_put_irq(struct intel_ring_buffer *ring, u32 flag) { - /* do nothing */ + struct drm_device *dev = ring->dev; + + if (atomic_dec_and_test(&ring->irq_refcount)) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_disable_graphics_irq(dev_priv, flag); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } } -static u32 -ring_status_page_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +static bool +bsd_ring_get_irq(struct intel_ring_buffer *ring) { - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); +} +static void +bsd_ring_put_irq(struct intel_ring_buffer *ring) +{ + ring_put_irq(ring, GT_BSD_USER_INTERRUPT); } static int -ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) { - uint32_t exec_start; - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | - (2 << 6) | MI_BATCH_NON_SECURE_I965); - intel_ring_emit(dev, ring, exec_start); - intel_ring_advance(dev, ring); + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | (2 << 6) | + MI_BATCH_NON_SECURE_I965); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); + return 0; } static int -render_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int nbox = exec->num_cliprects; - int i = 0, count; - uint32_t exec_start, exec_len; - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - exec_len = (uint32_t) exec->batch_len; + int ret; trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); - count = nbox ? nbox : 1; + if (IS_I830(dev) || IS_845G(dev)) { + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - for (i = 0; i < count; i++) { - if (i < nbox) { - int ret = i915_emit_box(dev, cliprects, i, - exec->DR1, exec->DR4); - if (ret) - return ret; - } + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, 0); + } else { + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; - if (IS_I830(dev) || IS_845G(dev)) { - intel_ring_begin(dev, ring, 4); - intel_ring_emit(dev, ring, MI_BATCH_BUFFER); - intel_ring_emit(dev, ring, - exec_start | MI_BATCH_NON_SECURE); - intel_ring_emit(dev, ring, exec_start + exec_len - 4); - intel_ring_emit(dev, ring, 0); + if (INTEL_INFO(dev)->gen >= 4) { + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | (2 << 6) | + MI_BATCH_NON_SECURE_I965); + intel_ring_emit(ring, offset); } else { - intel_ring_begin(dev, ring, 2); - if (INTEL_INFO(dev)->gen >= 4) { - intel_ring_emit(dev, ring, - MI_BATCH_BUFFER_START | (2 << 6) - | MI_BATCH_NON_SECURE_I965); - intel_ring_emit(dev, ring, exec_start); - } else { - intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START - | (2 << 6)); - intel_ring_emit(dev, ring, exec_start | - MI_BATCH_NON_SECURE); - } + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | (2 << 6)); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); } - intel_ring_advance(dev, ring); } - - if (IS_G4X(dev) || IS_GEN5(dev)) { - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, MI_FLUSH | - MI_NO_WRITE_FLUSH | - MI_INVALIDATE_ISP ); - intel_ring_emit(dev, ring, MI_NOOP); - intel_ring_advance(dev, ring); - } - /* XXX breadcrumb */ + intel_ring_advance(ring); return 0; } -static void cleanup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) +static void cleanup_status_page(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj; obj = ring->status_page.obj; if (obj == NULL) return; - obj_priv = to_intel_bo(obj); - kunmap(obj_priv->pages[0]); + kunmap(obj->pages[0]); i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); } -static int init_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int init_status_page(struct intel_ring_buffer *ring) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; obj = i915_gem_alloc_object(dev, 4096); @@ -538,16 +707,15 @@ static int init_status_page(struct drm_device *dev, ret = -ENOMEM; goto err; } - obj_priv = to_intel_bo(obj); - obj_priv->agp_type = AGP_USER_CACHED_MEMORY; + obj->agp_type = AGP_USER_CACHED_MEMORY; - ret = i915_gem_object_pin(obj, 4096); + ret = i915_gem_object_pin(obj, 4096, true); if (ret != 0) { goto err_unref; } - ring->status_page.gfx_addr = obj_priv->gtt_offset; - ring->status_page.page_addr = kmap(obj_priv->pages[0]); + ring->status_page.gfx_addr = obj->gtt_offset; + ring->status_page.page_addr = kmap(obj->pages[0]); if (ring->status_page.page_addr == NULL) { memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); goto err_unpin; @@ -555,7 +723,7 @@ static int init_status_page(struct drm_device *dev, ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); - intel_ring_setup_status_page(dev, ring); + intel_ring_setup_status_page(ring); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); @@ -564,7 +732,7 @@ static int init_status_page(struct drm_device *dev, err_unpin: i915_gem_object_unpin(obj); err_unref: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); err: return ret; } @@ -572,9 +740,7 @@ err: int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret; ring->dev = dev; @@ -583,7 +749,7 @@ int intel_init_ring_buffer(struct drm_device *dev, INIT_LIST_HEAD(&ring->gpu_write_list); if (I915_NEED_GFX_HWS(dev)) { - ret = init_status_page(dev, ring); + ret = init_status_page(ring); if (ret) return ret; } @@ -595,15 +761,14 @@ int intel_init_ring_buffer(struct drm_device *dev, goto err_hws; } - ring->gem_object = obj; + ring->obj = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE); + ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) goto err_unref; - obj_priv = to_intel_bo(obj); ring->map.size = ring->size; - ring->map.offset = dev->agp->base + obj_priv->gtt_offset; + ring->map.offset = dev->agp->base + obj->gtt_offset; ring->map.type = 0; ring->map.flags = 0; ring->map.mtrr = 0; @@ -616,60 +781,57 @@ int intel_init_ring_buffer(struct drm_device *dev, } ring->virtual_start = ring->map.handle; - ret = ring->init(dev, ring); + ret = ring->init(ring); if (ret) goto err_unmap; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_kernel_lost_context(dev); - else { - ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; - ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->size; - } - return ret; + return 0; err_unmap: drm_core_ioremapfree(&ring->map, dev); err_unpin: i915_gem_object_unpin(obj); err_unref: - drm_gem_object_unreference(obj); - ring->gem_object = NULL; + drm_gem_object_unreference(&obj->base); + ring->obj = NULL; err_hws: - cleanup_status_page(dev, ring); + cleanup_status_page(ring); return ret; } -void intel_cleanup_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) +void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) { - if (ring->gem_object == NULL) + struct drm_i915_private *dev_priv; + int ret; + + if (ring->obj == NULL) return; - drm_core_ioremapfree(&ring->map, dev); + /* Disable the ring buffer. The ring must be idle at this point */ + dev_priv = ring->dev->dev_private; + ret = intel_wait_ring_buffer(ring, ring->size - 8); + I915_WRITE_CTL(ring, 0); - i915_gem_object_unpin(ring->gem_object); - drm_gem_object_unreference(ring->gem_object); - ring->gem_object = NULL; + drm_core_ioremapfree(&ring->map, ring->dev); + + i915_gem_object_unpin(ring->obj); + drm_gem_object_unreference(&ring->obj->base); + ring->obj = NULL; if (ring->cleanup) ring->cleanup(ring); - cleanup_status_page(dev, ring); + cleanup_status_page(ring); } -static int intel_wrap_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) { unsigned int *virt; int rem; rem = ring->size - ring->tail; if (ring->space < rem) { - int ret = intel_wait_ring_buffer(dev, ring, rem); + int ret = intel_wait_ring_buffer(ring, rem); if (ret) return ret; } @@ -687,32 +849,29 @@ static int intel_wrap_ring_buffer(struct drm_device *dev, return 0; } -int intel_wait_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring, int n) +int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) { + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long end; - drm_i915_private_t *dev_priv = dev->dev_private; u32 head; - head = intel_read_status_page(ring, 4); - if (head) { - ring->head = head & HEAD_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->size; - if (ring->space >= n) - return 0; - } - trace_i915_ring_wait_begin (dev); end = jiffies + 3 * HZ; do { - ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; + /* If the reported head position has wrapped or hasn't advanced, + * fallback to the slow and accurate path. + */ + head = intel_read_status_page(ring, 4); + if (head < ring->actual_head) + head = I915_READ_HEAD(ring); + ring->actual_head = head; + ring->head = head & HEAD_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; if (ring->space >= n) { - trace_i915_ring_wait_end (dev); + trace_i915_ring_wait_end(dev); return 0; } @@ -723,29 +882,39 @@ int intel_wait_ring_buffer(struct drm_device *dev, } msleep(1); + if (atomic_read(&dev_priv->mm.wedged)) + return -EAGAIN; } while (!time_after(jiffies, end)); trace_i915_ring_wait_end (dev); return -EBUSY; } -void intel_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, - int num_dwords) +int intel_ring_begin(struct intel_ring_buffer *ring, + int num_dwords) { int n = 4*num_dwords; - if (unlikely(ring->tail + n > ring->size)) - intel_wrap_ring_buffer(dev, ring); - if (unlikely(ring->space < n)) - intel_wait_ring_buffer(dev, ring, n); + int ret; + + if (unlikely(ring->tail + n > ring->size)) { + ret = intel_wrap_ring_buffer(ring); + if (unlikely(ret)) + return ret; + } + + if (unlikely(ring->space < n)) { + ret = intel_wait_ring_buffer(ring, n); + if (unlikely(ret)) + return ret; + } ring->space -= n; + return 0; } -void intel_ring_advance(struct drm_device *dev, - struct intel_ring_buffer *ring) +void intel_ring_advance(struct intel_ring_buffer *ring) { ring->tail &= ring->size - 1; - ring->write_tail(dev, ring, ring->tail); + ring->write_tail(ring, ring->tail); } static const struct intel_ring_buffer render_ring = { @@ -757,10 +926,11 @@ static const struct intel_ring_buffer render_ring = { .write_tail = ring_write_tail, .flush = render_ring_flush, .add_request = render_ring_add_request, - .get_seqno = render_ring_get_seqno, - .user_irq_get = render_ring_get_user_irq, - .user_irq_put = render_ring_put_user_irq, - .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, + .get_seqno = ring_get_seqno, + .irq_get = render_ring_get_irq, + .irq_put = render_ring_put_irq, + .dispatch_execbuffer = render_ring_dispatch_execbuffer, + .cleanup = render_ring_cleanup, }; /* ring buffer for bit-stream decoder */ @@ -770,22 +940,21 @@ static const struct intel_ring_buffer bsd_ring = { .id = RING_BSD, .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, - .init = init_bsd_ring, + .init = init_ring_common, .write_tail = ring_write_tail, .flush = bsd_ring_flush, .add_request = ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = bsd_ring_get_user_irq, - .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, + .get_seqno = ring_get_seqno, + .irq_get = bsd_ring_get_irq, + .irq_put = bsd_ring_put_irq, + .dispatch_execbuffer = ring_dispatch_execbuffer, }; -static void gen6_bsd_ring_write_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, +static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, u32 value) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; /* Every tail move must follow the sequence below */ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, @@ -804,69 +973,80 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); } -static void gen6_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, +static void gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { - intel_ring_begin(dev, ring, 4); - intel_ring_emit(dev, ring, MI_FLUSH_DW); - intel_ring_emit(dev, ring, 0); - intel_ring_emit(dev, ring, 0); - intel_ring_emit(dev, ring, 0); - intel_ring_advance(dev, ring); + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; + + if (intel_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_FLUSH_DW); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } } static int -gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len) { - uint32_t exec_start; + int ret; - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, - MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(dev, ring, exec_start); - intel_ring_advance(dev, ring); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); return 0; } +static bool +gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) +{ + return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); +} + +static void +gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) +{ + ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); +} + /* ring buffer for Video Codec for Gen6+ */ static const struct intel_ring_buffer gen6_bsd_ring = { - .name = "gen6 bsd ring", - .id = RING_BSD, - .mmio_base = GEN6_BSD_RING_BASE, - .size = 32 * PAGE_SIZE, - .init = init_bsd_ring, - .write_tail = gen6_bsd_ring_write_tail, - .flush = gen6_ring_flush, - .add_request = ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = bsd_ring_get_user_irq, - .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, + .name = "gen6 bsd ring", + .id = RING_BSD, + .mmio_base = GEN6_BSD_RING_BASE, + .size = 32 * PAGE_SIZE, + .init = init_ring_common, + .write_tail = gen6_bsd_ring_write_tail, + .flush = gen6_ring_flush, + .add_request = gen6_add_request, + .get_seqno = ring_get_seqno, + .irq_get = gen6_bsd_ring_get_irq, + .irq_put = gen6_bsd_ring_put_irq, + .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, }; /* Blitter support (SandyBridge+) */ -static void -blt_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +static bool +blt_ring_get_irq(struct intel_ring_buffer *ring) { - /* do nothing */ + return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); } + static void -blt_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +blt_ring_put_irq(struct intel_ring_buffer *ring) { - /* do nothing */ + ring_put_irq(ring, GT_BLT_USER_INTERRUPT); } @@ -884,32 +1064,31 @@ to_blt_workaround(struct intel_ring_buffer *ring) return ring->private; } -static int blt_ring_init(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int blt_ring_init(struct intel_ring_buffer *ring) { - if (NEED_BLT_WORKAROUND(dev)) { + if (NEED_BLT_WORKAROUND(ring->dev)) { struct drm_i915_gem_object *obj; - u32 __iomem *ptr; + u32 *ptr; int ret; - obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); + obj = i915_gem_alloc_object(ring->dev, 4096); if (obj == NULL) return -ENOMEM; - ret = i915_gem_object_pin(&obj->base, 4096); + ret = i915_gem_object_pin(obj, 4096, true); if (ret) { drm_gem_object_unreference(&obj->base); return ret; } ptr = kmap(obj->pages[0]); - iowrite32(MI_BATCH_BUFFER_END, ptr); - iowrite32(MI_NOOP, ptr+1); + *ptr++ = MI_BATCH_BUFFER_END; + *ptr++ = MI_NOOP; kunmap(obj->pages[0]); - ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); + ret = i915_gem_object_set_to_gtt_domain(obj, false); if (ret) { - i915_gem_object_unpin(&obj->base); + i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); return ret; } @@ -917,51 +1096,39 @@ static int blt_ring_init(struct drm_device *dev, ring->private = obj; } - return init_ring_common(dev, ring); + return init_ring_common(ring); } -static void blt_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, +static int blt_ring_begin(struct intel_ring_buffer *ring, int num_dwords) { if (ring->private) { - intel_ring_begin(dev, ring, num_dwords+2); - intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); - intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); + int ret = intel_ring_begin(ring, num_dwords+2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER_START); + intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); + + return 0; } else - intel_ring_begin(dev, ring, 4); + return intel_ring_begin(ring, 4); } -static void blt_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, +static void blt_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { - blt_ring_begin(dev, ring, 4); - intel_ring_emit(dev, ring, MI_FLUSH_DW); - intel_ring_emit(dev, ring, 0); - intel_ring_emit(dev, ring, 0); - intel_ring_emit(dev, ring, 0); - intel_ring_advance(dev, ring); -} - -static u32 -blt_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 flush_domains) -{ - u32 seqno = i915_gem_get_seqno(dev); - - blt_ring_begin(dev, ring, 4); - intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(dev, ring, - I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(dev, ring, seqno); - intel_ring_emit(dev, ring, MI_USER_INTERRUPT); - intel_ring_advance(dev, ring); + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; - DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); - return seqno; + if (blt_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_FLUSH_DW); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } } static void blt_ring_cleanup(struct intel_ring_buffer *ring) @@ -982,47 +1149,54 @@ static const struct intel_ring_buffer gen6_blt_ring = { .init = blt_ring_init, .write_tail = ring_write_tail, .flush = blt_ring_flush, - .add_request = blt_ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = blt_ring_get_user_irq, - .user_irq_put = blt_ring_put_user_irq, - .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, + .add_request = gen6_add_request, + .get_seqno = ring_get_seqno, + .irq_get = blt_ring_get_irq, + .irq_put = blt_ring_put_irq, + .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, .cleanup = blt_ring_cleanup, }; int intel_init_render_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - - dev_priv->render_ring = render_ring; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + + *ring = render_ring; + if (INTEL_INFO(dev)->gen >= 6) { + ring->add_request = gen6_add_request; + } else if (IS_GEN5(dev)) { + ring->add_request = pc_render_add_request; + ring->get_seqno = pc_render_get_seqno; + } if (!I915_NEED_GFX_HWS(dev)) { - dev_priv->render_ring.status_page.page_addr - = dev_priv->status_page_dmah->vaddr; - memset(dev_priv->render_ring.status_page.page_addr, - 0, PAGE_SIZE); + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); } - return intel_init_ring_buffer(dev, &dev_priv->render_ring); + return intel_init_ring_buffer(dev, ring); } int intel_init_bsd_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; if (IS_GEN6(dev)) - dev_priv->bsd_ring = gen6_bsd_ring; + *ring = gen6_bsd_ring; else - dev_priv->bsd_ring = bsd_ring; + *ring = bsd_ring; - return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); + return intel_init_ring_buffer(dev, ring); } int intel_init_blt_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; - dev_priv->blt_ring = gen6_blt_ring; + *ring = gen6_blt_ring; - return intel_init_ring_buffer(dev, &dev_priv->blt_ring); + return intel_init_ring_buffer(dev, ring); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3126c2681983..8e2e357ad6ee 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -1,22 +1,37 @@ #ifndef _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_ +enum { + RCS = 0x0, + VCS, + BCS, + I915_NUM_RINGS, +}; + struct intel_hw_status_page { - void *page_addr; + u32 __iomem *page_addr; unsigned int gfx_addr; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; }; -#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) +#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) + +#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base)) #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) -#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) + +#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base)) #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) -#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) + +#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base)) #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) -#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base)) + +#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) -struct drm_i915_gem_execbuffer2; +#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) +#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) +#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base)) + struct intel_ring_buffer { const char *name; enum intel_ring_id { @@ -25,44 +40,36 @@ struct intel_ring_buffer { RING_BLT = 0x4, } id; u32 mmio_base; - unsigned long size; void *virtual_start; struct drm_device *dev; - struct drm_gem_object *gem_object; + struct drm_i915_gem_object *obj; - unsigned int head; - unsigned int tail; + u32 actual_head; + u32 head; + u32 tail; int space; + int size; struct intel_hw_status_page status_page; - u32 irq_gem_seqno; /* last seq seem at irq time */ - u32 waiting_gem_seqno; - int user_irq_refcount; - void (*user_irq_get)(struct drm_device *dev, - struct intel_ring_buffer *ring); - void (*user_irq_put)(struct drm_device *dev, - struct intel_ring_buffer *ring); + u32 irq_seqno; /* last seq seem at irq time */ + u32 waiting_seqno; + u32 sync_seqno[I915_NUM_RINGS-1]; + atomic_t irq_refcount; + bool __must_check (*irq_get)(struct intel_ring_buffer *ring); + void (*irq_put)(struct intel_ring_buffer *ring); - int (*init)(struct drm_device *dev, - struct intel_ring_buffer *ring); + int (*init)(struct intel_ring_buffer *ring); - void (*write_tail)(struct drm_device *dev, - struct intel_ring_buffer *ring, + void (*write_tail)(struct intel_ring_buffer *ring, u32 value); - void (*flush)(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains); - u32 (*add_request)(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 flush_domains); - u32 (*get_seqno)(struct drm_device *dev, - struct intel_ring_buffer *ring); - int (*dispatch_gem_execbuffer)(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset); + void (*flush)(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains); + int (*add_request)(struct intel_ring_buffer *ring, + u32 *seqno); + u32 (*get_seqno)(struct intel_ring_buffer *ring); + int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, + u32 offset, u32 length); void (*cleanup)(struct intel_ring_buffer *ring); /** @@ -95,7 +102,7 @@ struct intel_ring_buffer { /** * Do we have some not yet emitted requests outstanding? */ - bool outstanding_lazy_request; + u32 outstanding_lazy_request; wait_queue_head_t irq_queue; drm_local_map_t map; @@ -104,44 +111,54 @@ struct intel_ring_buffer { }; static inline u32 +intel_ring_sync_index(struct intel_ring_buffer *ring, + struct intel_ring_buffer *other) +{ + int idx; + + /* + * cs -> 0 = vcs, 1 = bcs + * vcs -> 0 = bcs, 1 = cs, + * bcs -> 0 = cs, 1 = vcs. + */ + + idx = (other - ring) - 1; + if (idx < 0) + idx += I915_NUM_RINGS; + + return idx; +} + +static inline u32 intel_read_status_page(struct intel_ring_buffer *ring, - int reg) + int reg) { - u32 *regs = ring->status_page.page_addr; - return regs[reg]; + return ioread32(ring->status_page.page_addr + reg); } -int intel_init_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); -void intel_cleanup_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); -int intel_wait_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring, int n); -void intel_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, int n); - -static inline void intel_ring_emit(struct drm_device *dev, - struct intel_ring_buffer *ring, - unsigned int data) +void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); +int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); +int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); + +static inline void intel_ring_emit(struct intel_ring_buffer *ring, + u32 data) { - unsigned int *virt = ring->virtual_start + ring->tail; - *virt = data; + iowrite32(data, ring->virtual_start + ring->tail); ring->tail += 4; } -void intel_ring_advance(struct drm_device *dev, - struct intel_ring_buffer *ring); +void intel_ring_advance(struct intel_ring_buffer *ring); -u32 intel_ring_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring); +u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); +int intel_ring_sync(struct intel_ring_buffer *ring, + struct intel_ring_buffer *to, + u32 seqno); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); int intel_init_blt_ring_buffer(struct drm_device *dev); -u32 intel_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring); -void intel_ring_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring); +u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); +void intel_ring_setup_status_page(struct intel_ring_buffer *ring); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index de158b76bcd5..6c0bb18a26e8 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -107,7 +107,8 @@ struct intel_sdvo { * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; - bool has_audio; + bool has_hdmi_monitor; + bool has_hdmi_audio; /** * This is set if we detect output of sdvo device as LVDS and @@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (!intel_sdvo_set_target_input(intel_sdvo)) return; - if (intel_sdvo->is_hdmi && + if (intel_sdvo->has_hdmi_monitor && !intel_sdvo_set_avi_infoframe(intel_sdvo)) return; @@ -1044,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { - sdvox = SDVO_BORDER_ENABLE; + sdvox = 0; + if (INTEL_INFO(dev)->gen < 5) + sdvox |= SDVO_BORDER_ENABLE; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) sdvox |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) @@ -1063,7 +1066,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, } if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; - if (intel_sdvo->has_audio) + if (intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; if (INTEL_INFO(dev)->gen >= 4) { @@ -1074,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } - if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL) + if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && + INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } @@ -1295,55 +1299,14 @@ intel_sdvo_get_edid(struct drm_connector *connector) return drm_get_edid(connector, &sdvo->ddc); } -static struct drm_connector * -intel_find_analog_connector(struct drm_device *dev) -{ - struct drm_connector *connector; - struct intel_sdvo *encoder; - - list_for_each_entry(encoder, - &dev->mode_config.encoder_list, - base.base.head) { - if (encoder->base.type == INTEL_OUTPUT_ANALOG) { - list_for_each_entry(connector, - &dev->mode_config.connector_list, - head) { - if (&encoder->base == - intel_attached_encoder(connector)) - return connector; - } - } - } - - return NULL; -} - -static int -intel_analog_is_connected(struct drm_device *dev) -{ - struct drm_connector *analog_connector; - - analog_connector = intel_find_analog_connector(dev); - if (!analog_connector) - return false; - - if (analog_connector->funcs->detect(analog_connector, false) == - connector_status_disconnected) - return false; - - return true; -} - /* Mac mini hack -- use the same DDC as the analog connector */ static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; - if (!intel_analog_is_connected(connector->dev)) - return NULL; - - return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); + return drm_get_edid(connector, + &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); } enum drm_connector_status @@ -1388,8 +1351,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) /* DDC bus is shared, match EDID to connector type */ if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; - intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); - intel_sdvo->has_audio = drm_detect_monitor_audio(edid); + if (intel_sdvo->is_hdmi) { + intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); + intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); + } } connector->display_info.raw_edid = NULL; kfree(edid); @@ -1398,7 +1363,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) if (status == connector_status_connected) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->force_audio) - intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; + intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0; } return status; @@ -1415,10 +1380,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) return connector_status_unknown; - if (intel_sdvo->is_tv) { - /* add 30ms delay when the output type is SDVO-TV */ + + /* add 30ms delay when the output type might be TV */ + if (intel_sdvo->caps.output_flags & + (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) mdelay(30); - } + if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; @@ -1472,8 +1439,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { - drm_mode_connector_update_edid_property(connector, edid); - drm_add_edid_modes(connector, edid); + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + drm_mode_connector_update_edid_property(connector, edid); + drm_add_edid_modes(connector, edid); + } connector->display_info.raw_edid = NULL; kfree(edid); } @@ -1713,12 +1682,12 @@ intel_sdvo_set_property(struct drm_connector *connector, intel_sdvo_connector->force_audio = val; - if (val > 0 && intel_sdvo->has_audio) + if (val > 0 && intel_sdvo->has_hdmi_audio) return 0; - if (val < 0 && !intel_sdvo->has_audio) + if (val < 0 && !intel_sdvo->has_hdmi_audio) return 0; - intel_sdvo->has_audio = val > 0; + intel_sdvo->has_hdmi_audio = val > 0; goto done; } @@ -1942,9 +1911,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, speed = mapping->i2c_speed; } - sdvo->i2c = &dev_priv->gmbus[pin].adapter; - intel_gmbus_set_speed(sdvo->i2c, speed); - intel_gmbus_force_bit(sdvo->i2c, true); + if (pin < GMBUS_NUM_PORTS) { + sdvo->i2c = &dev_priv->gmbus[pin].adapter; + intel_gmbus_set_speed(sdvo->i2c, speed); + intel_gmbus_force_bit(sdvo->i2c, true); + } else + sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; } static bool @@ -2070,6 +2042,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + + intel_sdvo_add_hdmi_properties(intel_sdvo_connector); intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | @@ -2077,8 +2051,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); - intel_sdvo_add_hdmi_properties(intel_sdvo_connector); - return true; } diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 2f7681989316..93206e4eaa6f 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv) int type; /* Disable TV interrupts around load detect or we'll recurse */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); save_tv_dac = tv_dac = I915_READ(TV_DAC); save_tv_ctl = tv_ctl = I915_READ(TV_CTL); @@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv) I915_WRITE(TV_CTL, save_tv_ctl); /* Restore interrupt config */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return type; } diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 9b0773b75e86..258fa5e7a2d9 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, base += 3; break; case ATOM_IIO_WRITE: + (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); base += 3; break; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 53bfe3afb0fa..c6a37e036f11 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1198,8 +1198,10 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc mc->vram_end, mc->real_vram_size >> 20); } else { u64 base = 0; - if (rdev->flags & RADEON_IS_IGP) - base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; + if (rdev->flags & RADEON_IS_IGP) { + base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + } radeon_vram_location(rdev, &rdev->mc, base); rdev->mc.gtt_base_align = 0; radeon_gtt_location(rdev, mc); diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 9bebac1ec006..0f90fc3482ce 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -315,7 +315,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { /* the initial DDX does bad things with the CB size occasionally */ /* it rounds up height too far for slice tile max but the BO is smaller */ - tmp = (height - 7) * pitch * bpe; + tmp = (height - 7) * 8 * bpe; if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index d84612ae47e0..33cda016b083 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h @@ -86,6 +86,7 @@ #define R600_HDP_NONSURFACE_BASE 0x2c04 #define R600_BUS_CNTL 0x5420 +# define R600_BIOS_ROM_DIS (1 << 1) #define R600_CONFIG_CNTL 0x5424 #define R600_CONFIG_MEMSIZE 0x5428 #define R600_CONFIG_F0_BASE 0x542C diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index d6c611eee204..45bc750e9ae2 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev } } + /* some DCE3 boards have bad data for this entry */ + if (ASIC_IS_DCE3(rdev)) { + if ((i == 4) && + (gpio->usClkMaskRegisterIndex == 0x1fda) && + (gpio->sucI2cId.ucAccess == 0x94)) + gpio->sucI2cId.ucAccess = 0x14; + } + if (gpio->sucI2cId.ucAccess == id) { i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; @@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) } } + /* some DCE3 boards have bad data for this entry */ + if (ASIC_IS_DCE3(rdev)) { + if ((i == 4) && + (gpio->usClkMaskRegisterIndex == 0x1fda) && + (gpio->sucI2cId.ucAccess == 0x94)) + gpio->sucI2cId.ucAccess = 0x14; + } + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 654787ec43f4..8f2c7b50dcf5 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } return true; } + static bool r700_read_disabled_bios(struct radeon_device *rdev) { uint32_t viph_control; @@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) bool r; viph_control = RREG32(RADEON_VIPH_CONTROL); - bus_cntl = RREG32(RADEON_BUS_CNTL); + bus_cntl = RREG32(R600_BUS_CNTL); d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); @@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) /* disable VIP */ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); /* enable the rom */ - WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); + WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); /* Disable VGA mode */ WREG32(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | @@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) cg_spll_status = RREG32(R600_CG_SPLL_STATUS); } WREG32(RADEON_VIPH_CONTROL, viph_control); - WREG32(RADEON_BUS_CNTL, bus_cntl); + WREG32(R600_BUS_CNTL, bus_cntl); WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); @@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) bool r; viph_control = RREG32(RADEON_VIPH_CONTROL); - bus_cntl = RREG32(RADEON_BUS_CNTL); + bus_cntl = RREG32(R600_BUS_CNTL); d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); @@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) /* disable VIP */ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); /* enable the rom */ - WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); + WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); /* Disable VGA mode */ WREG32(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | @@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) /* restore regs */ WREG32(RADEON_VIPH_CONTROL, viph_control); - WREG32(RADEON_BUS_CNTL, bus_cntl); + WREG32(R600_BUS_CNTL, bus_cntl); WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 111a844c1ecb..591fcae8f224 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -730,7 +730,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) clk = RBIOS8(offset + 3 + (i * 5) + 3); data = RBIOS8(offset + 3 + (i * 5) + 4); i2c = combios_setup_i2c_bus(rdev, DDC_MONID, - clk, data); + (1 << clk), (1 << data)); rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); break; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index f3ba066ded20..5b00f92a50a2 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1178,6 +1178,8 @@ radeon_add_atom_connector(struct drm_device *dev, /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -1193,6 +1195,8 @@ radeon_add_atom_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1229,6 +1233,11 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.load_detect_property, 1); } + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_DVII) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: @@ -1259,6 +1268,11 @@ radeon_add_atom_connector(struct drm_device *dev, 0); } subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_HDMIB) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: @@ -1296,6 +1310,9 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.underscan_vborder_property, 0); } + connector->interlace_allowed = true; + /* in theory with a DP to VGA converter... */ + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: @@ -1311,6 +1328,8 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_atombios_get_tv_info(rdev)); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); @@ -1329,6 +1348,8 @@ radeon_add_atom_connector(struct drm_device *dev, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; } @@ -1406,6 +1427,8 @@ radeon_add_legacy_connector(struct drm_device *dev, /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -1421,6 +1444,8 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1438,6 +1463,11 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); } subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_DVII) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: @@ -1460,6 +1490,8 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_combios_get_tv_info(rdev)); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); @@ -1473,6 +1505,8 @@ radeon_add_legacy_connector(struct drm_device *dev, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 3952cf3d0ee9..86660cb425ab 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -287,7 +287,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 mc->mc_vram_size = mc->aper_size; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", + dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); } @@ -324,7 +324,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; } mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; - dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", + dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8bdf0ba2983a..7d6b8e88f746 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -70,7 +70,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) u32 c = 0; rbo->placement.fpfn = 0; - rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; + rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) @@ -92,7 +92,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, { struct radeon_bo *bo; enum ttm_bo_type type; - int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; + unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; + unsigned long max_size = 0; int r; if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { @@ -105,6 +106,14 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, } *bo_ptr = NULL; + /* maximun bo size is the minimun btw visible vram and gtt size */ + max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); + if ((page_align << PAGE_SHIFT) >= max_size) { + printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n", + __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20); + return -ENOMEM; + } + retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 515345b11ac9..88cb04e7962b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1386,6 +1386,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c index 54b017ad258d..5a1b52e0eb85 100644 --- a/drivers/hid/hid-egalax.c +++ b/drivers/hid/hid-egalax.c @@ -221,7 +221,7 @@ static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id) struct egalax_data *td; struct hid_report *report; - td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL); + td = kzalloc(sizeof(struct egalax_data), GFP_KERNEL); if (!td) { dev_err(&hdev->dev, "cannot allocate eGalax data\n"); return -ENOMEM; diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index bb0b3659437b..d8d372bae3cc 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -174,7 +174,7 @@ static int hidinput_setkeycode(struct input_dev *dev, clear_bit(*old_keycode, dev->keybit); set_bit(usage->code, dev->keybit); - dbg_hid(KERN_DEBUG "Assigned keycode %d to HID usage code %x\n", + dbg_hid("Assigned keycode %d to HID usage code %x\n", usage->code, usage->hid); /* @@ -203,8 +203,8 @@ static int hidinput_setkeycode(struct input_dev *dev, * * as seen in the HID specification v1.11 6.2.2.7 Global Items. * - * Only exponent 1 length units are processed. Centimeters are converted to - * inches. Degrees are converted to radians. + * Only exponent 1 length units are processed. Centimeters and inches are + * converted to millimeters. Degrees are converted to radians. */ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code) { @@ -225,13 +225,16 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code) */ if (code == ABS_X || code == ABS_Y || code == ABS_Z) { if (field->unit == 0x11) { /* If centimeters */ - /* Convert to inches */ - prev = logical_extents; - logical_extents *= 254; - if (logical_extents < prev) + /* Convert to millimeters */ + unit_exponent += 1; + } else if (field->unit == 0x13) { /* If inches */ + /* Convert to millimeters */ + prev = physical_extents; + physical_extents *= 254; + if (physical_extents < prev) return 0; - unit_exponent += 2; - } else if (field->unit != 0x13) { /* If not inches */ + unit_exponent -= 1; + } else { return 0; } } else if (code == ABS_RX || code == ABS_RY || code == ABS_RZ) { diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c index 15434c814793..25be4e1461bd 100644 --- a/drivers/hid/hid-tmff.c +++ b/drivers/hid/hid-tmff.c @@ -256,6 +256,8 @@ static const struct hid_device_id tm_devices[] = { .driver_data = (unsigned long)ff_joystick }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */ .driver_data = (unsigned long)ff_joystick }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a), /* F430 Force Feedback Wheel */ + .driver_data = (unsigned long)ff_joystick }, { } }; MODULE_DEVICE_TABLE(hid, tm_devices); diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index 937983407e2a..c4c40be0edbf 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c @@ -497,12 +497,14 @@ static unsigned long chipset_ids[] = { 0 }; +#ifdef MODULE static struct pci_device_id i5k_amb_ids[] __devinitdata = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) }, { 0, } }; MODULE_DEVICE_TABLE(pci, i5k_amb_ids); +#endif static int __devinit i5k_amb_probe(struct platform_device *pdev) { diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c index 9f4bae07f719..8853afce85ce 100644 --- a/drivers/hwmon/lis3lv02d_i2c.c +++ b/drivers/hwmon/lis3lv02d_i2c.c @@ -186,7 +186,7 @@ static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int lis3lv02d_i2c_suspend(struct device *dev) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); @@ -213,12 +213,9 @@ static int lis3lv02d_i2c_resume(struct device *dev) return 0; } -#else -#define lis3lv02d_i2c_suspend NULL -#define lis3lv02d_i2c_resume NULL -#define lis3lv02d_i2c_shutdown NULL -#endif +#endif /* CONFIG_PM_SLEEP */ +#ifdef CONFIG_PM_RUNTIME static int lis3_i2c_runtime_suspend(struct device *dev) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); @@ -236,6 +233,7 @@ static int lis3_i2c_runtime_resume(struct device *dev) lis3lv02d_poweron(lis3); return 0; } +#endif /* CONFIG_PM_RUNTIME */ static const struct i2c_device_id lis3lv02d_id[] = { {"lis3lv02d", 0 }, diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index b923074b2cbe..30f06e956bfb 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -75,8 +75,7 @@ config I2C_HELPER_AUTO In doubt, say Y. config I2C_SMBUS - tristate - prompt "SMBus-specific protocols" if !I2C_HELPER_AUTO + tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO help Say Y here if you want support for SMBus extensions to the I2C specification. At the moment, the only supported extension is diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig index 3998dd620a03..f1cfe7e5508b 100644 --- a/drivers/i2c/algos/Kconfig +++ b/drivers/i2c/algos/Kconfig @@ -3,7 +3,7 @@ # menu "I2C Algorithms" - depends on !I2C_HELPER_AUTO + visible if !I2C_HELPER_AUTO config I2C_ALGOBIT tristate "I2C bit-banging interfaces" @@ -15,15 +15,3 @@ config I2C_ALGOPCA tristate "I2C PCA 9564 interfaces" endmenu - -# In automatic configuration mode, we still have to define the -# symbols to avoid unmet dependencies. - -if I2C_HELPER_AUTO -config I2C_ALGOBIT - tristate -config I2C_ALGOPCF - tristate -config I2C_ALGOPCA - tristate -endif diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index bb7e19280821..9b737ff133e2 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c @@ -278,36 +278,6 @@ void ib_ud_header_init(int payload_bytes, EXPORT_SYMBOL(ib_ud_header_init); /** - * ib_lrh_header_pack - Pack LRH header struct into wire format - * @lrh:unpacked LRH header struct - * @buf:Buffer to pack into - * - * ib_lrh_header_pack() packs the LRH header structure @lrh into - * wire format in the buffer @buf. - */ -int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf) -{ - ib_pack(lrh_table, ARRAY_SIZE(lrh_table), lrh, buf); - return 0; -} -EXPORT_SYMBOL(ib_lrh_header_pack); - -/** - * ib_lrh_header_unpack - Unpack LRH structure from wire format - * @lrh:unpacked LRH header struct - * @buf:Buffer to pack into - * - * ib_lrh_header_unpack() unpacks the LRH header structure from - * wire format (in buf) into @lrh. - */ -int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh) -{ - ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, lrh); - return 0; -} -EXPORT_SYMBOL(ib_lrh_header_unpack); - -/** * ib_ud_header_pack - Pack UD header struct into wire format * @header:UD header struct * @buf:Buffer to pack into diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index 5440da0e59b4..1b1146f87124 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, dst->grh.sgid_index = src->grh.sgid_index; dst->grh.hop_limit = src->grh.hop_limit; dst->grh.traffic_class = src->grh.traffic_class; + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); dst->dlid = src->dlid; dst->sl = src->sl; dst->src_path_bits = src->src_path_bits; dst->static_rate = src->static_rate; dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; dst->port_num = src->port_num; + dst->reserved = 0; } EXPORT_SYMBOL(ib_copy_ah_attr_to_user); void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, struct ib_qp_attr *src) { + dst->qp_state = src->qp_state; dst->cur_qp_state = src->cur_qp_state; dst->path_mtu = src->path_mtu; dst->path_mig_state = src->path_mig_state; @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, dst->rnr_retry = src->rnr_retry; dst->alt_port_num = src->alt_port_num; dst->alt_timeout = src->alt_timeout; + memset(dst->reserved, 0, sizeof(dst->reserved)); } EXPORT_SYMBOL(ib_copy_qp_attr_to_user); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index bf3e20cd0298..30e09caf0da9 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,7 +219,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, struct net_device *ndev; enum ib_mtu tmp; - props->active_width = IB_WIDTH_4X; + props->active_width = IB_WIDTH_1X; props->active_speed = 4; props->port_cap_flags = IB_PORT_CM_SUP; props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; @@ -242,7 +242,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, tmp = iboe_get_mtu(ndev->mtu); props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; - props->state = netif_running(ndev) && netif_oper_up(ndev) ? + props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 9a7794ac34c1..2001f20a4361 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1816,6 +1816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; + if (be16_to_cpu(vlan) < 0x1000) { + ctrl->ins_vlan = 1 << 6; + ctrl->vlan_tag = vlan; + } + /* * Make sure descriptor is fully written before * setting ownership bit (because HW can start @@ -1831,11 +1836,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; - if (be16_to_cpu(vlan) < 0x1000) { - ctrl->ins_vlan = 1 << 6; - ctrl->vlan_tag = vlan; - } - stamp = ind + qp->sq_spare_wqes; ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index d53b9e900234..27b6a3ce18ca 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c @@ -245,6 +245,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs) goto err_free_tgfx; } + parport_put_port(pp); return tgfx; err_free_dev: diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index b8c51b9781db..3a87f3ba5f75 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -179,6 +179,22 @@ config KEYBOARD_GPIO To compile this driver as a module, choose M here: the module will be called gpio_keys. +config KEYBOARD_GPIO_POLLED + tristate "Polled GPIO buttons" + depends on GENERIC_GPIO + select INPUT_POLLDEV + help + This driver implements support for buttons connected + to GPIO pins that are not capable of generating interrupts. + + Say Y here if your device has buttons connected + directly to such GPIO pins. Your board-specific + setup logic must also provide a platform device, + with configuration data saying which GPIOs are used. + + To compile this driver as a module, choose M here: the + module will be called gpio_keys_polled. + config KEYBOARD_TCA6416 tristate "TCA6416 Keypad Support" depends on I2C diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index a34452e8ebe2..622de73a445d 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o +obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c new file mode 100644 index 000000000000..4c17aff20657 --- /dev/null +++ b/drivers/input/keyboard/gpio_keys_polled.c @@ -0,0 +1,261 @@ +/* + * Driver for buttons on GPIO lines not capable of generating interrupts + * + * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com> + * + * This file was based on: /drivers/input/misc/cobalt_btns.c + * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * + * also was based on: /drivers/input/keyboard/gpio_keys.c + * Copyright 2005 Phil Blundell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/input.h> +#include <linux/input-polldev.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/gpio_keys.h> + +#define DRV_NAME "gpio-keys-polled" + +struct gpio_keys_button_data { + int last_state; + int count; + int threshold; + int can_sleep; +}; + +struct gpio_keys_polled_dev { + struct input_polled_dev *poll_dev; + struct device *dev; + struct gpio_keys_platform_data *pdata; + struct gpio_keys_button_data data[0]; +}; + +static void gpio_keys_polled_check_state(struct input_dev *input, + struct gpio_keys_button *button, + struct gpio_keys_button_data *bdata) +{ + int state; + + if (bdata->can_sleep) + state = !!gpio_get_value_cansleep(button->gpio); + else + state = !!gpio_get_value(button->gpio); + + if (state != bdata->last_state) { + unsigned int type = button->type ?: EV_KEY; + + input_event(input, type, button->code, + !!(state ^ button->active_low)); + input_sync(input); + bdata->count = 0; + bdata->last_state = state; + } +} + +static void gpio_keys_polled_poll(struct input_polled_dev *dev) +{ + struct gpio_keys_polled_dev *bdev = dev->private; + struct gpio_keys_platform_data *pdata = bdev->pdata; + struct input_dev *input = dev->input; + int i; + + for (i = 0; i < bdev->pdata->nbuttons; i++) { + struct gpio_keys_button_data *bdata = &bdev->data[i]; + + if (bdata->count < bdata->threshold) + bdata->count++; + else + gpio_keys_polled_check_state(input, &pdata->buttons[i], + bdata); + } +} + +static void gpio_keys_polled_open(struct input_polled_dev *dev) +{ + struct gpio_keys_polled_dev *bdev = dev->private; + struct gpio_keys_platform_data *pdata = bdev->pdata; + + if (pdata->enable) + pdata->enable(bdev->dev); +} + +static void gpio_keys_polled_close(struct input_polled_dev *dev) +{ + struct gpio_keys_polled_dev *bdev = dev->private; + struct gpio_keys_platform_data *pdata = bdev->pdata; + + if (pdata->disable) + pdata->disable(bdev->dev); +} + +static int __devinit gpio_keys_polled_probe(struct platform_device *pdev) +{ + struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; + struct device *dev = &pdev->dev; + struct gpio_keys_polled_dev *bdev; + struct input_polled_dev *poll_dev; + struct input_dev *input; + int error; + int i; + + if (!pdata || !pdata->poll_interval) + return -EINVAL; + + bdev = kzalloc(sizeof(struct gpio_keys_polled_dev) + + pdata->nbuttons * sizeof(struct gpio_keys_button_data), + GFP_KERNEL); + if (!bdev) { + dev_err(dev, "no memory for private data\n"); + return -ENOMEM; + } + + poll_dev = input_allocate_polled_device(); + if (!poll_dev) { + dev_err(dev, "no memory for polled device\n"); + error = -ENOMEM; + goto err_free_bdev; + } + + poll_dev->private = bdev; + poll_dev->poll = gpio_keys_polled_poll; + poll_dev->poll_interval = pdata->poll_interval; + poll_dev->open = gpio_keys_polled_open; + poll_dev->close = gpio_keys_polled_close; + + input = poll_dev->input; + + input->evbit[0] = BIT(EV_KEY); + input->name = pdev->name; + input->phys = DRV_NAME"/input0"; + input->dev.parent = &pdev->dev; + + input->id.bustype = BUS_HOST; + input->id.vendor = 0x0001; + input->id.product = 0x0001; + input->id.version = 0x0100; + + for (i = 0; i < pdata->nbuttons; i++) { + struct gpio_keys_button *button = &pdata->buttons[i]; + struct gpio_keys_button_data *bdata = &bdev->data[i]; + unsigned int gpio = button->gpio; + unsigned int type = button->type ?: EV_KEY; + + if (button->wakeup) { + dev_err(dev, DRV_NAME " does not support wakeup\n"); + error = -EINVAL; + goto err_free_gpio; + } + + error = gpio_request(gpio, + button->desc ? button->desc : DRV_NAME); + if (error) { + dev_err(dev, "unable to claim gpio %u, err=%d\n", + gpio, error); + goto err_free_gpio; + } + + error = gpio_direction_input(gpio); + if (error) { + dev_err(dev, + "unable to set direction on gpio %u, err=%d\n", + gpio, error); + goto err_free_gpio; + } + + bdata->can_sleep = gpio_cansleep(gpio); + bdata->last_state = -1; + bdata->threshold = DIV_ROUND_UP(button->debounce_interval, + pdata->poll_interval); + + input_set_capability(input, type, button->code); + } + + bdev->poll_dev = poll_dev; + bdev->dev = dev; + bdev->pdata = pdata; + platform_set_drvdata(pdev, bdev); + + error = input_register_polled_device(poll_dev); + if (error) { + dev_err(dev, "unable to register polled device, err=%d\n", + error); + goto err_free_gpio; + } + + /* report initial state of the buttons */ + for (i = 0; i < pdata->nbuttons; i++) + gpio_keys_polled_check_state(input, &pdata->buttons[i], + &bdev->data[i]); + + return 0; + +err_free_gpio: + while (--i >= 0) + gpio_free(pdata->buttons[i].gpio); + + input_free_polled_device(poll_dev); + +err_free_bdev: + kfree(bdev); + + platform_set_drvdata(pdev, NULL); + return error; +} + +static int __devexit gpio_keys_polled_remove(struct platform_device *pdev) +{ + struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev); + struct gpio_keys_platform_data *pdata = bdev->pdata; + int i; + + input_unregister_polled_device(bdev->poll_dev); + + for (i = 0; i < pdata->nbuttons; i++) + gpio_free(pdata->buttons[i].gpio); + + input_free_polled_device(bdev->poll_dev); + + kfree(bdev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver gpio_keys_polled_driver = { + .probe = gpio_keys_polled_probe, + .remove = __devexit_p(gpio_keys_polled_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init gpio_keys_polled_init(void) +{ + return platform_driver_register(&gpio_keys_polled_driver); +} + +static void __exit gpio_keys_polled_exit(void) +{ + platform_driver_unregister(&gpio_keys_polled_driver); +} + +module_init(gpio_keys_polled_init); +module_exit(gpio_keys_polled_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); +MODULE_DESCRIPTION("Polled GPIO Buttons driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 613a3652f98f..0aefaa885871 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -51,7 +51,8 @@ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) -#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100) +#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ +#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) /* synaptics modes query bits */ diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c index 3c287dd879d3..4225f5d6b15f 100644 --- a/drivers/input/serio/gscps2.c +++ b/drivers/input/serio/gscps2.c @@ -358,7 +358,7 @@ static int __devinit gscps2_probe(struct parisc_device *dev) gscps2_reset(ps2port); ps2port->id = readb(ps2port->addr + GSC_ID) & 0x0f; - snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s", + snprintf(serio->name, sizeof(serio->name), "gsc-ps2-%s", (ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse"); strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys)); serio->id.type = SERIO_8042; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index b3252ef1e279..4852b440960a 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1436,6 +1436,12 @@ static struct wacom_features wacom_features_0xD2 = { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT }; static struct wacom_features wacom_features_0xD3 = { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; +static struct wacom_features wacom_features_0xD8 = + { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; +static struct wacom_features wacom_features_0xDA = + { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT }; +static struct wacom_features wacom_features_0xDB = + { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; #define USB_DEVICE_WACOM(prod) \ USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ @@ -1504,6 +1510,9 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xD1) }, { USB_DEVICE_WACOM(0xD2) }, { USB_DEVICE_WACOM(0xD3) }, + { USB_DEVICE_WACOM(0xD8) }, + { USB_DEVICE_WACOM(0xDA) }, + { USB_DEVICE_WACOM(0xDB) }, { USB_DEVICE_WACOM(0xF0) }, { USB_DEVICE_WACOM(0xCC) }, { USB_DEVICE_WACOM(0x90) }, diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index f45f80f6d336..73fd6642b681 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -178,6 +178,7 @@ static const struct usb_device_id usbtouch_devices[] = { #ifdef CONFIG_TOUCHSCREEN_USB_ITM {USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM}, + {USB_DEVICE(0x16e3, 0xf9e9), .driver_info = DEVTYPE_ITM}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 2e847a90bad0..f2b5bab5e6a1 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c @@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup); static int __init icn_init(void) { char *p; - char rev[10]; + char rev[20]; memset(&dev, 0, sizeof(icn_dev)); dev.memaddr = (membase & 0x0ffc000); @@ -1637,9 +1637,10 @@ static int __init icn_init(void) spin_lock_init(&dev.devlock); if ((p = strchr(revision, ':'))) { - strcpy(rev, p + 1); + strncpy(rev, p + 1, 20); p = strchr(rev, '$'); - *p = 0; + if (p) + *p = 0; } else strcpy(rev, " ??? "); printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev, diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 77b8fd20cd90..6f190f4cdbc0 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -7,20 +7,20 @@ menuconfig NEW_LEDS This is not related to standard keyboard LEDs which are controlled via the input system. -if NEW_LEDS - config LEDS_CLASS bool "LED Class Support" + depends on NEW_LEDS help This option enables the led sysfs class in /sys/class/leds. You'll need this to do anything useful with LEDs. If unsure, say N. -if LEDS_CLASS +if NEW_LEDS comment "LED drivers" config LEDS_88PM860X tristate "LED Support for Marvell 88PM860x PMIC" + depends on LEDS_CLASS depends on MFD_88PM860X help This option enables support for on-chip LED drivers found on Marvell @@ -28,6 +28,7 @@ config LEDS_88PM860X config LEDS_ATMEL_PWM tristate "LED Support using Atmel PWM outputs" + depends on LEDS_CLASS depends on ATMEL_PWM help This option enables support for LEDs driven using outputs @@ -35,6 +36,7 @@ config LEDS_ATMEL_PWM config LEDS_LOCOMO tristate "LED Support for Locomo device" + depends on LEDS_CLASS depends on SHARP_LOCOMO help This option enables support for the LEDs on Sharp Locomo. @@ -42,6 +44,7 @@ config LEDS_LOCOMO config LEDS_MIKROTIK_RB532 tristate "LED Support for Mikrotik Routerboard 532" + depends on LEDS_CLASS depends on MIKROTIK_RB532 help This option enables support for the so called "User LED" of @@ -49,6 +52,7 @@ config LEDS_MIKROTIK_RB532 config LEDS_S3C24XX tristate "LED Support for Samsung S3C24XX GPIO LEDs" + depends on LEDS_CLASS depends on ARCH_S3C2410 help This option enables support for LEDs connected to GPIO lines @@ -56,12 +60,14 @@ config LEDS_S3C24XX config LEDS_AMS_DELTA tristate "LED Support for the Amstrad Delta (E3)" + depends on LEDS_CLASS depends on MACH_AMS_DELTA help This option enables support for the LEDs on Amstrad Delta (E3). config LEDS_NET48XX tristate "LED Support for Soekris net48xx series Error LED" + depends on LEDS_CLASS depends on SCx200_GPIO help This option enables support for the Soekris net4801 and net4826 error @@ -79,18 +85,21 @@ config LEDS_NET5501 config LEDS_FSG tristate "LED Support for the Freecom FSG-3" + depends on LEDS_CLASS depends on MACH_FSG help This option enables support for the LEDs on the Freecom FSG-3. config LEDS_WRAP tristate "LED Support for the WRAP series LEDs" + depends on LEDS_CLASS depends on SCx200_GPIO help This option enables support for the PCEngines WRAP programmable LEDs. config LEDS_ALIX2 tristate "LED Support for ALIX.2 and ALIX.3 series" + depends on LEDS_CLASS depends on X86 && !GPIO_CS5535 && !CS5535_GPIO help This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. @@ -98,12 +107,14 @@ config LEDS_ALIX2 config LEDS_H1940 tristate "LED Support for iPAQ H1940 device" + depends on LEDS_CLASS depends on ARCH_H1940 help This option enables support for the LEDs on the h1940. config LEDS_COBALT_QUBE tristate "LED Support for the Cobalt Qube series front LED" + depends on LEDS_CLASS depends on MIPS_COBALT help This option enables support for the front LED on Cobalt Qube series @@ -117,6 +128,7 @@ config LEDS_COBALT_RAQ config LEDS_SUNFIRE tristate "LED support for SunFire servers." + depends on LEDS_CLASS depends on SPARC64 select LEDS_TRIGGERS help @@ -125,6 +137,7 @@ config LEDS_SUNFIRE config LEDS_HP6XX tristate "LED Support for the HP Jornada 6xx" + depends on LEDS_CLASS depends on SH_HP6XX help This option enables LED support for the handheld @@ -132,6 +145,7 @@ config LEDS_HP6XX config LEDS_PCA9532 tristate "LED driver for PCA9532 dimmer" + depends on LEDS_CLASS depends on I2C && INPUT && EXPERIMENTAL help This option enables support for NXP pca9532 @@ -140,6 +154,7 @@ config LEDS_PCA9532 config LEDS_GPIO tristate "LED Support for GPIO connected LEDs" + depends on LEDS_CLASS depends on GENERIC_GPIO help This option enables support for the LEDs connected to GPIO @@ -167,6 +182,7 @@ config LEDS_GPIO_OF config LEDS_LP3944 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" + depends on LEDS_CLASS depends on I2C help This option enables support for LEDs connected to the National @@ -196,6 +212,7 @@ config LEDS_LP5523 config LEDS_CLEVO_MAIL tristate "Mail LED on Clevo notebook" + depends on LEDS_CLASS depends on X86 && SERIO_I8042 && DMI help This driver makes the mail LED accessible from userspace @@ -226,6 +243,7 @@ config LEDS_CLEVO_MAIL config LEDS_PCA955X tristate "LED Support for PCA955x I2C chips" + depends on LEDS_CLASS depends on I2C help This option enables support for LEDs connected to PCA955x @@ -234,6 +252,7 @@ config LEDS_PCA955X config LEDS_WM831X_STATUS tristate "LED support for status LEDs on WM831x PMICs" + depends on LEDS_CLASS depends on MFD_WM831X help This option enables support for the status LEDs of the WM831x @@ -241,6 +260,7 @@ config LEDS_WM831X_STATUS config LEDS_WM8350 tristate "LED Support for WM8350 AudioPlus PMIC" + depends on LEDS_CLASS depends on MFD_WM8350 help This option enables support for LEDs driven by the Wolfson @@ -248,6 +268,7 @@ config LEDS_WM8350 config LEDS_DA903X tristate "LED Support for DA9030/DA9034 PMIC" + depends on LEDS_CLASS depends on PMIC_DA903X help This option enables support for on-chip LED drivers found @@ -255,6 +276,7 @@ config LEDS_DA903X config LEDS_DAC124S085 tristate "LED Support for DAC124S085 SPI DAC" + depends on LEDS_CLASS depends on SPI help This option enables support for DAC124S085 SPI DAC from NatSemi, @@ -262,18 +284,21 @@ config LEDS_DAC124S085 config LEDS_PWM tristate "PWM driven LED Support" + depends on LEDS_CLASS depends on HAVE_PWM help This option enables support for pwm driven LEDs config LEDS_REGULATOR tristate "REGULATOR driven LED support" + depends on LEDS_CLASS depends on REGULATOR help This option enables support for regulator driven LEDs. config LEDS_BD2802 tristate "LED driver for BD2802 RGB LED" + depends on LEDS_CLASS depends on I2C help This option enables support for BD2802GU RGB LED driver chips @@ -281,6 +306,7 @@ config LEDS_BD2802 config LEDS_INTEL_SS4200 tristate "LED driver for Intel NAS SS4200 series" + depends on LEDS_CLASS depends on PCI && DMI help This option enables support for the Intel SS4200 series of @@ -290,6 +316,7 @@ config LEDS_INTEL_SS4200 config LEDS_LT3593 tristate "LED driver for LT3593 controllers" + depends on LEDS_CLASS depends on GENERIC_GPIO help This option enables support for LEDs driven by a Linear Technology @@ -298,6 +325,7 @@ config LEDS_LT3593 config LEDS_ADP5520 tristate "LED Support for ADP5520/ADP5501 PMIC" + depends on LEDS_CLASS depends on PMIC_ADP5520 help This option enables support for on-chip LED drivers found @@ -308,6 +336,7 @@ config LEDS_ADP5520 config LEDS_DELL_NETBOOKS tristate "External LED on Dell Business Netbooks" + depends on LEDS_CLASS depends on X86 && ACPI_WMI help This adds support for the Latitude 2100 and similar @@ -315,6 +344,7 @@ config LEDS_DELL_NETBOOKS config LEDS_MC13783 tristate "LED Support for MC13783 PMIC" + depends on LEDS_CLASS depends on MFD_MC13783 help This option enable support for on-chip LED drivers found @@ -322,6 +352,7 @@ config LEDS_MC13783 config LEDS_NS2 tristate "LED support for Network Space v2 GPIO LEDs" + depends on LEDS_CLASS depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2 default y help @@ -340,17 +371,17 @@ config LEDS_NETXBIG config LEDS_TRIGGERS bool "LED Trigger support" + depends on LEDS_CLASS help This option enables trigger support for the leds class. These triggers allow kernel events to drive the LEDs and can be configured via sysfs. If unsure, say Y. -if LEDS_TRIGGERS - comment "LED Triggers" config LEDS_TRIGGER_TIMER tristate "LED Timer Trigger" + depends on LEDS_TRIGGERS help This allows LEDs to be controlled by a programmable timer via sysfs. Some LED hardware can be programmed to start @@ -362,12 +393,14 @@ config LEDS_TRIGGER_TIMER config LEDS_TRIGGER_IDE_DISK bool "LED IDE Disk Trigger" depends on IDE_GD_ATA + depends on LEDS_TRIGGERS help This allows LEDs to be controlled by IDE disk activity. If unsure, say Y. config LEDS_TRIGGER_HEARTBEAT tristate "LED Heartbeat Trigger" + depends on LEDS_TRIGGERS help This allows LEDs to be controlled by a CPU load average. The flash frequency is a hyperbolic function of the 1-minute @@ -376,6 +409,7 @@ config LEDS_TRIGGER_HEARTBEAT config LEDS_TRIGGER_BACKLIGHT tristate "LED backlight Trigger" + depends on LEDS_TRIGGERS help This allows LEDs to be controlled as a backlight device: they turn off and on when the display is blanked and unblanked. @@ -384,6 +418,7 @@ config LEDS_TRIGGER_BACKLIGHT config LEDS_TRIGGER_GPIO tristate "LED GPIO Trigger" + depends on LEDS_TRIGGERS depends on GPIOLIB help This allows LEDs to be controlled by gpio events. It's good @@ -396,6 +431,7 @@ config LEDS_TRIGGER_GPIO config LEDS_TRIGGER_DEFAULT_ON tristate "LED Default ON Trigger" + depends on LEDS_TRIGGERS help This allows LEDs to be initialised in the ON state. If unsure, say Y. @@ -403,8 +439,4 @@ config LEDS_TRIGGER_DEFAULT_ON comment "iptables trigger is under Netfilter config (LED target)" depends on LEDS_TRIGGERS -endif # LEDS_TRIGGERS - -endif # LEDS_CLASS - endif # NEW_LEDS diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 3782f31f06d2..33facd0c45d1 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -125,11 +125,22 @@ struct lp5521_chip { u8 num_leds; }; -#define cdev_to_led(c) container_of(c, struct lp5521_led, cdev) -#define engine_to_lp5521(eng) container_of((eng), struct lp5521_chip, \ - engines[(eng)->id - 1]) -#define led_to_lp5521(led) container_of((led), struct lp5521_chip, \ - leds[(led)->id]) +static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev) +{ + return container_of(cdev, struct lp5521_led, cdev); +} + +static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine) +{ + return container_of(engine, struct lp5521_chip, + engines[engine->id - 1]); +} + +static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led) +{ + return container_of(led, struct lp5521_chip, + leds[led->id]); +} static void lp5521_led_brightness_work(struct work_struct *work); @@ -185,14 +196,17 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) /* move current engine to direct mode and remember the state */ ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); - usleep_range(1000, 10000); + /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ + usleep_range(1000, 2000); ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); /* For loading, all the engines to load mode */ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); - usleep_range(1000, 10000); + /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ + usleep_range(1000, 2000); lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); - usleep_range(1000, 10000); + /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ + usleep_range(1000, 2000); addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; i2c_smbus_write_i2c_block_data(client, @@ -231,10 +245,6 @@ static int lp5521_configure(struct i2c_client *client, lp5521_init_engine(chip, attr_group); - lp5521_write(client, LP5521_REG_RESET, 0xff); - - usleep_range(10000, 20000); - /* Set all PWMs to direct control mode */ ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); @@ -251,8 +261,8 @@ static int lp5521_configure(struct i2c_client *client, ret |= lp5521_write(client, LP5521_REG_ENABLE, LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | LP5521_EXEC_RUN); - /* enable takes 500us */ - usleep_range(500, 20000); + /* enable takes 500us. 1 - 2 ms leaves some margin */ + usleep_range(1000, 2000); return ret; } @@ -305,7 +315,8 @@ static int lp5521_detect(struct i2c_client *client) LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); if (ret) return ret; - usleep_range(1000, 10000); + /* enable takes 500us. 1 - 2 ms leaves some margin */ + usleep_range(1000, 2000); ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); if (ret) return ret; @@ -693,11 +704,16 @@ static int lp5521_probe(struct i2c_client *client, if (pdata->enable) { pdata->enable(0); - usleep_range(1000, 10000); + usleep_range(1000, 2000); /* Keep enable down at least 1ms */ pdata->enable(1); - usleep_range(1000, 10000); /* Spec says min 500us */ + usleep_range(1000, 2000); /* 500us abs min. */ } + lp5521_write(client, LP5521_REG_RESET, 0xff); + usleep_range(10000, 20000); /* + * Exact value is not available. 10 - 20ms + * appears to be enough for reset. + */ ret = lp5521_detect(client); if (ret) { diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 1e11fcc08b28..0cc4ead2fd8b 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -134,15 +134,18 @@ struct lp5523_chip { u8 num_leds; }; -#define cdev_to_led(c) container_of(c, struct lp5523_led, cdev) +static inline struct lp5523_led *cdev_to_led(struct led_classdev *cdev) +{ + return container_of(cdev, struct lp5523_led, cdev); +} -static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) +static inline struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) { return container_of(engine, struct lp5523_chip, engines[engine->id - 1]); } -static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) +static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) { return container_of(led, struct lp5523_chip, leds[led->id]); @@ -200,13 +203,9 @@ static int lp5523_configure(struct i2c_client *client) { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, }; - lp5523_write(client, LP5523_REG_RESET, 0xff); - - usleep_range(10000, 100000); - ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); - /* Chip startup time after reset is 500 us */ - usleep_range(1000, 10000); + /* Chip startup time is 500 us, 1 - 2 ms gives some margin */ + usleep_range(1000, 2000); ret |= lp5523_write(client, LP5523_REG_CONFIG, LP5523_AUTO_INC | LP5523_PWR_SAVE | @@ -243,8 +242,8 @@ static int lp5523_configure(struct i2c_client *client) return -1; } - /* Wait 3ms and check the engine status */ - usleep_range(3000, 20000); + /* Let the programs run for couple of ms and check the engine status */ + usleep_range(3000, 6000); lp5523_read(client, LP5523_REG_STATUS, &status); status &= LP5523_ENG_STATUS_MASK; @@ -449,10 +448,10 @@ static ssize_t lp5523_selftest(struct device *dev, /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */ lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, LP5523_EN_LEDTEST | 16); - usleep_range(3000, 10000); + usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */ ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); if (!(status & LP5523_LEDTEST_DONE)) - usleep_range(3000, 10000); + usleep_range(3000, 6000); /* Was not ready. Wait little bit */ ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd); vdd--; /* There may be some fluctuation in measurement */ @@ -468,16 +467,16 @@ static ssize_t lp5523_selftest(struct device *dev, chip->pdata->led_config[i].led_current); lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); - /* let current stabilize 2ms before measurements start */ - usleep_range(2000, 10000); + /* let current stabilize 2 - 4ms before measurements start */ + usleep_range(2000, 4000); lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, LP5523_EN_LEDTEST | i); - /* ledtest takes 2.7ms */ - usleep_range(3000, 10000); + /* ADC conversion time is 2.7 ms typically */ + usleep_range(3000, 6000); ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); if (!(status & LP5523_LEDTEST_DONE)) - usleep_range(3000, 10000); + usleep_range(3000, 6000);/* Was not ready. Wait. */ ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) @@ -930,11 +929,16 @@ static int lp5523_probe(struct i2c_client *client, if (pdata->enable) { pdata->enable(0); - usleep_range(1000, 10000); + usleep_range(1000, 2000); /* Keep enable down at least 1ms */ pdata->enable(1); - usleep_range(1000, 10000); /* Spec says min 500us */ + usleep_range(1000, 2000); /* 500us abs min. */ } + lp5523_write(client, LP5523_REG_RESET, 0xff); + usleep_range(10000, 20000); /* + * Exact value is not available. 10 - 20ms + * appears to be enough for reset. + */ ret = lp5523_detect(client); if (ret) goto fail2; diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index a688293abd0b..614ebebaaa28 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -102,6 +102,7 @@ static struct dmi_system_id __initdata nas_led_whitelist[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") } }, + {} }; /* diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index 3d7355ff7308..fa51af11c6f1 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -102,6 +102,7 @@ config ADB_PMU_LED config ADB_PMU_LED_IDE bool "Use front LED as IDE LED by default" depends on ADB_PMU_LED + depends on LEDS_CLASS select LEDS_TRIGGERS select LEDS_TRIGGER_IDE_DISK help diff --git a/drivers/md/md.c b/drivers/md/md.c index 324a3663fcda..84c46a161927 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1337,7 +1337,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); md_super_wait(rdev->mddev); - return num_sectors / 2; /* kB for sysfs */ + return num_sectors; } @@ -1704,7 +1704,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); md_super_wait(rdev->mddev); - return num_sectors / 2; /* kB for sysfs */ + return num_sectors; } static struct super_type super_types[] = { @@ -4338,6 +4338,8 @@ static int md_alloc(dev_t dev, char *name) if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_bitmap_group)) printk(KERN_DEBUG "pointless warning\n"); + + blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); abort: mutex_unlock(&disks_mutex); if (!error && mddev->kobj.sd) { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 45f8324196ec..845cf95b612c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1161,6 +1161,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) * is not possible. */ if (!test_bit(Faulty, &rdev->flags) && + !mddev->recovery_disabled && mddev->degraded < conf->raid_disks) { err = -EBUSY; goto abort; diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig index 2385e6cca635..78b089526e02 100644 --- a/drivers/media/common/tuners/Kconfig +++ b/drivers/media/common/tuners/Kconfig @@ -31,7 +31,7 @@ config MEDIA_TUNER select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMISE select MEDIA_TUNER_MC44S803 if !MEDIA_TUNER_CUSTOMISE -menuconfig MEDIA_TUNER_CUSTOMISE +config MEDIA_TUNER_CUSTOMISE bool "Customize analog and hybrid tuner modules to build" depends on MEDIA_TUNER default y if EMBEDDED @@ -44,7 +44,8 @@ menuconfig MEDIA_TUNER_CUSTOMISE If unsure say N. -if MEDIA_TUNER_CUSTOMISE +menu "Customize TV tuners" + visible if MEDIA_TUNER_CUSTOMISE config MEDIA_TUNER_SIMPLE tristate "Simple tuner support" @@ -185,5 +186,4 @@ config MEDIA_TUNER_TDA18218 default m if MEDIA_TUNER_CUSTOMISE help NXP TDA18218 silicon tuner driver. - -endif # MEDIA_TUNER_CUSTOMISE +endmenu diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig index e9062b08a485..96b27016670e 100644 --- a/drivers/media/dvb/frontends/Kconfig +++ b/drivers/media/dvb/frontends/Kconfig @@ -12,9 +12,8 @@ config DVB_FE_CUSTOMISE If unsure say N. -if DVB_FE_CUSTOMISE - menu "Customise DVB Frontends" + visible if DVB_FE_CUSTOMISE comment "Multistandard (satellite) frontends" depends on DVB_CORE @@ -619,5 +618,3 @@ config DVB_DUMMY_FE tristate "Dummy frontend driver" default n endmenu - -endif diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c index 6a435786b63d..03829e6818bd 100644 --- a/drivers/media/radio/radio-si4713.c +++ b/drivers/media/radio/radio-si4713.c @@ -291,7 +291,7 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev) goto unregister_v4l2_dev; } - sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, NULL, + sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, pdata->subdev_board_info, NULL); if (!sd) { dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n"); diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index ac16e815e275..6830d2848bd7 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -112,7 +112,7 @@ config VIDEO_IR_I2C # menu "Encoders/decoders and other helper chips" - depends on !VIDEO_HELPER_CHIPS_AUTO + visible if !VIDEO_HELPER_CHIPS_AUTO comment "Audio decoders" diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c index 0453816d4ec3..01be89fa5c78 100644 --- a/drivers/media/video/au0828/au0828-cards.c +++ b/drivers/media/video/au0828/au0828-cards.c @@ -212,7 +212,7 @@ void au0828_card_setup(struct au0828_dev *dev) be abstracted out if we ever need to support a different demod) */ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "au8522", 0x8e >> 1, NULL); + "au8522", 0x8e >> 1, NULL); if (sd == NULL) printk(KERN_ERR "analog subdev registration failed\n"); } @@ -221,7 +221,7 @@ void au0828_card_setup(struct au0828_dev *dev) if (dev->board.tuner_type != TUNER_ABSENT) { /* Load the tuner module, which does the attach */ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tuner", dev->board.tuner_addr, NULL); + "tuner", dev->board.tuner_addr, NULL); if (sd == NULL) printk(KERN_ERR "tuner subdev registration fail\n"); diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c index 87d8b006ef77..49efcf660ba6 100644 --- a/drivers/media/video/bt8xx/bttv-cards.c +++ b/drivers/media/video/bt8xx/bttv-cards.c @@ -3529,7 +3529,7 @@ void __devinit bttv_init_card2(struct bttv *btv) struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "saa6588", 0, addrs); + &btv->c.i2c_adap, "saa6588", 0, addrs); btv->has_saa6588 = (sd != NULL); } @@ -3554,7 +3554,7 @@ void __devinit bttv_init_card2(struct bttv *btv) }; btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "msp3400", 0, addrs); + &btv->c.i2c_adap, "msp3400", 0, addrs); if (btv->sd_msp34xx) return; goto no_audio; @@ -3568,7 +3568,7 @@ void __devinit bttv_init_card2(struct bttv *btv) }; if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tda7432", 0, addrs)) + &btv->c.i2c_adap, "tda7432", 0, addrs)) return; goto no_audio; } @@ -3576,7 +3576,7 @@ void __devinit bttv_init_card2(struct bttv *btv) case 3: { /* The user specified that we should probe for tvaudio */ btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tvaudio", 0, tvaudio_addrs()); + &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; goto no_audio; @@ -3596,11 +3596,11 @@ void __devinit bttv_init_card2(struct bttv *btv) found is really something else (e.g. a tea6300). */ if (!bttv_tvcards[btv->c.type].no_msp34xx) { btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "msp3400", + &btv->c.i2c_adap, "msp3400", 0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1)); } else if (bttv_tvcards[btv->c.type].msp34xx_alt) { btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "msp3400", + &btv->c.i2c_adap, "msp3400", 0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1)); } @@ -3616,13 +3616,13 @@ void __devinit bttv_init_card2(struct bttv *btv) }; if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tda7432", 0, addrs)) + &btv->c.i2c_adap, "tda7432", 0, addrs)) return; } /* Now see if we can find one of the tvaudio devices. */ btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tvaudio", 0, tvaudio_addrs()); + &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; @@ -3646,13 +3646,13 @@ void __devinit bttv_init_tuner(struct bttv *btv) /* Load tuner module before issuing tuner config call! */ if (bttv_tvcards[btv->c.type].has_radio) v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tuner", + &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tuner", + &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tuner", + &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c index 7bc36670071a..260c666ce931 100644 --- a/drivers/media/video/cafe_ccic.c +++ b/drivers/media/video/cafe_ccic.c @@ -2066,8 +2066,7 @@ static int cafe_pci_probe(struct pci_dev *pdev, cam->sensor_addr = 0x42; cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter, - "ov7670", "ov7670", 0, &sensor_cfg, cam->sensor_addr, - NULL); + "ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL); if (cam->sensor == NULL) { ret = -ENODEV; goto out_smbus; diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c index a09caf883170..e71a026f3419 100644 --- a/drivers/media/video/cx18/cx18-i2c.c +++ b/drivers/media/video/cx18/cx18-i2c.c @@ -122,15 +122,15 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx) if (hw == CX18_HW_TUNER) { /* special tuner group handling */ sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, - adap, NULL, type, 0, cx->card_i2c->radio); + adap, type, 0, cx->card_i2c->radio); if (sd != NULL) sd->grp_id = hw; sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, - adap, NULL, type, 0, cx->card_i2c->demod); + adap, type, 0, cx->card_i2c->demod); if (sd != NULL) sd->grp_id = hw; sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, - adap, NULL, type, 0, cx->card_i2c->tv); + adap, type, 0, cx->card_i2c->tv); if (sd != NULL) sd->grp_id = hw; return sd != NULL ? 0 : -1; @@ -144,7 +144,7 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx) return -1; /* It's an I2C device other than an analog tuner or IR chip */ - sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, NULL, type, hw_addrs[idx], + sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, type, hw_addrs[idx], NULL); if (sd != NULL) sd->grp_id = hw; diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c index 56c2d8195ac6..2c78d188bb06 100644 --- a/drivers/media/video/cx231xx/cx231xx-cards.c +++ b/drivers/media/video/cx231xx/cx231xx-cards.c @@ -560,7 +560,7 @@ void cx231xx_card_setup(struct cx231xx *dev) if (dev->board.decoder == CX231XX_AVDECODER) { dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[0].i2c_adap, - NULL, "cx25840", 0x88 >> 1, NULL); + "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840 == NULL) cx231xx_info("cx25840 subdev registration failure\n"); cx25840_call(dev, core, load_fw); @@ -571,7 +571,7 @@ void cx231xx_card_setup(struct cx231xx *dev) if (dev->board.tuner_type != TUNER_ABSENT) { dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, - NULL, "tuner", + "tuner", dev->tuner_addr, NULL); if (dev->sd_tuner == NULL) cx231xx_info("tuner subdev registration failure\n"); diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c index db054004e462..8861309268b1 100644 --- a/drivers/media/video/cx23885/cx23885-cards.c +++ b/drivers/media/video/cx23885/cx23885-cards.c @@ -1247,7 +1247,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200: dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[2].i2c_adap, - NULL, "cx25840", 0x88 >> 1, NULL); + "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840) { dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE; v4l2_subdev_call(dev->sd_cx25840, core, load_fw); diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c index 3cc9f462d08d..8b2fb8a4375c 100644 --- a/drivers/media/video/cx23885/cx23885-video.c +++ b/drivers/media/video/cx23885/cx23885-video.c @@ -1507,10 +1507,10 @@ int cx23885_video_register(struct cx23885_dev *dev) if (dev->tuner_addr) sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[1].i2c_adap, - NULL, "tuner", dev->tuner_addr, NULL); + "tuner", dev->tuner_addr, NULL); else sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_bus[1].i2c_adap, NULL, + &dev->i2c_bus[1].i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV)); if (sd) { struct tuner_setup tun_setup; diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c index b26fcba8600c..9b9e169cce90 100644 --- a/drivers/media/video/cx88/cx88-cards.c +++ b/drivers/media/video/cx88/cx88-cards.c @@ -3515,19 +3515,18 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr) later code configures a tea5767. */ v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - NULL, "tuner", - 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); + "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); if (has_demod) v4l2_i2c_new_subdev(&core->v4l2_dev, - &core->i2c_adap, NULL, "tuner", + &core->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (core->board.tuner_addr == ADDR_UNSET) { v4l2_i2c_new_subdev(&core->v4l2_dev, - &core->i2c_adap, NULL, "tuner", + &core->i2c_adap, "tuner", 0, has_demod ? tv_addrs + 4 : tv_addrs); } else { v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - NULL, "tuner", core->board.tuner_addr, NULL); + "tuner", core->board.tuner_addr, NULL); } } diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c index 88b51194f917..62cea9549404 100644 --- a/drivers/media/video/cx88/cx88-video.c +++ b/drivers/media/video/cx88/cx88-video.c @@ -1895,14 +1895,13 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev, if (core->board.audio_chip == V4L2_IDENT_WM8775) v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - NULL, "wm8775", 0x36 >> 1, NULL); + "wm8775", 0x36 >> 1, NULL); if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) { /* This probes for a tda9874 as is used on some Pixelview Ultra boards. */ - v4l2_i2c_new_subdev(&core->v4l2_dev, - &core->i2c_adap, - NULL, "tvaudio", 0, I2C_ADDRS(0xb0 >> 1)); + v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, + "tvaudio", 0, I2C_ADDRS(0xb0 >> 1)); } switch (core->boardnr) { diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c index d8e38cc4ec40..7333a9bb2549 100644 --- a/drivers/media/video/davinci/vpfe_capture.c +++ b/drivers/media/video/davinci/vpfe_capture.c @@ -1986,7 +1986,6 @@ static __init int vpfe_probe(struct platform_device *pdev) vpfe_dev->sd[i] = v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev, i2c_adap, - NULL, &sdinfo->board_info, NULL); if (vpfe_dev->sd[i]) { diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c index 6ac6acd16352..193abab6b355 100644 --- a/drivers/media/video/davinci/vpif_capture.c +++ b/drivers/media/video/davinci/vpif_capture.c @@ -2013,7 +2013,6 @@ static __init int vpif_probe(struct platform_device *pdev) vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, i2c_adap, - NULL, &subdevdata->board_info, NULL); diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c index 685f6a6ee603..412c65d54fe1 100644 --- a/drivers/media/video/davinci/vpif_display.c +++ b/drivers/media/video/davinci/vpif_display.c @@ -1553,7 +1553,7 @@ static __init int vpif_probe(struct platform_device *pdev) for (i = 0; i < subdev_count; i++) { vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, - i2c_adap, NULL, + i2c_adap, &subdevdata[i].board_info, NULL); if (!vpif_obj.sd[i]) { diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index 54859233f311..f7e9168157a5 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c @@ -2554,39 +2554,39 @@ void em28xx_card_setup(struct em28xx *dev) /* request some modules */ if (dev->board.has_msp34xx) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "msp3400", 0, msp3400_addrs); + "msp3400", 0, msp3400_addrs); if (dev->board.decoder == EM28XX_SAA711X) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "saa7115_auto", 0, saa711x_addrs); + "saa7115_auto", 0, saa711x_addrs); if (dev->board.decoder == EM28XX_TVP5150) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tvp5150", 0, tvp5150_addrs); + "tvp5150", 0, tvp5150_addrs); if (dev->em28xx_sensor == EM28XX_MT9V011) { struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "mt9v011", 0, mt9v011_addrs); + &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs); v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal); } if (dev->board.adecoder == EM28XX_TVAUDIO) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tvaudio", dev->board.tvaudio_addr, NULL); + "tvaudio", dev->board.tvaudio_addr, NULL); if (dev->board.tuner_type != TUNER_ABSENT) { int has_demod = (dev->tda9887_conf & TDA9887_PRESENT); if (dev->board.radio.type) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tuner", dev->board.radio_addr, NULL); + "tuner", dev->board.radio_addr, NULL); if (has_demod) v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (dev->tuner_addr == 0) { enum v4l2_i2c_tuner_type type = @@ -2594,14 +2594,14 @@ void em28xx_card_setup(struct em28xx *dev) struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(type)); if (sd) dev->tuner_addr = v4l2_i2c_subdev_addr(sd); } else { v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tuner", dev->tuner_addr, NULL); + "tuner", dev->tuner_addr, NULL); } } diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c index 9a075d83dd1f..b8faff2dd711 100644 --- a/drivers/media/video/fsl-viu.c +++ b/drivers/media/video/fsl-viu.c @@ -1486,7 +1486,7 @@ static int __devinit viu_of_probe(struct platform_device *op, ad = i2c_get_adapter(0); viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad, - NULL, "saa7113", VIU_VIDEO_DECODER_ADDR, NULL); + "saa7113", VIU_VIDEO_DECODER_ADDR, NULL); viu_dev->vidq.timeout.function = viu_vid_timeout; viu_dev->vidq.timeout.data = (unsigned long)viu_dev; diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c index 9e8039ac909e..665191c9b407 100644 --- a/drivers/media/video/ivtv/ivtv-i2c.c +++ b/drivers/media/video/ivtv/ivtv-i2c.c @@ -239,19 +239,16 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx) return -1; if (hw == IVTV_HW_TUNER) { /* special tuner handling */ - sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, - 0, itv->card_i2c->radio); + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, + itv->card_i2c->radio); if (sd) sd->grp_id = 1 << idx; - sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, - 0, itv->card_i2c->demod); + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, + itv->card_i2c->demod); if (sd) sd->grp_id = 1 << idx; - sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, - 0, itv->card_i2c->tv); + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, + itv->card_i2c->tv); if (sd) sd->grp_id = 1 << idx; return sd ? 0 : -1; @@ -267,17 +264,16 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx) /* It's an I2C device other than an analog tuner or IR chip */ if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) { sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, 0, I2C_ADDRS(hw_addrs[idx])); + adap, type, 0, I2C_ADDRS(hw_addrs[idx])); } else if (hw == IVTV_HW_CX25840) { struct cx25840_platform_data pdata; pdata.pvr150_workaround = itv->pvr150_workaround; sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev, - adap, NULL, type, 0, &pdata, hw_addrs[idx], - NULL); + adap, type, 0, &pdata, hw_addrs[idx], NULL); } else { sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, hw_addrs[idx], NULL); + adap, type, hw_addrs[idx], NULL); } if (sd) sd->grp_id = 1 << idx; diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c index 94ba698d0ad4..4e8fd965f151 100644 --- a/drivers/media/video/mxb.c +++ b/drivers/media/video/mxb.c @@ -185,17 +185,17 @@ static int mxb_probe(struct saa7146_dev *dev) } mxb->saa7111a = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "saa7111", I2C_SAA7111A, NULL); + "saa7111", I2C_SAA7111A, NULL); mxb->tea6420_1 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tea6420", I2C_TEA6420_1, NULL); + "tea6420", I2C_TEA6420_1, NULL); mxb->tea6420_2 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tea6420", I2C_TEA6420_2, NULL); + "tea6420", I2C_TEA6420_2, NULL); mxb->tea6415c = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tea6415c", I2C_TEA6415C, NULL); + "tea6415c", I2C_TEA6415C, NULL); mxb->tda9840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tda9840", I2C_TDA9840, NULL); + "tda9840", I2C_TDA9840, NULL); mxb->tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tuner", I2C_TUNER, NULL); + "tuner", I2C_TUNER, NULL); /* check if all devices are present */ if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c || diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c index bef202752cc8..66ad516bdfd9 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c @@ -2088,16 +2088,14 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw, " Setting up with specified i2c address 0x%x", mid, i2caddr[0]); sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, - NULL, fname, - i2caddr[0], NULL); + fname, i2caddr[0], NULL); } else { pvr2_trace(PVR2_TRACE_INIT, "Module ID %u:" " Setting up with address probe list", mid); sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, - NULL, fname, - 0, i2caddr); + fname, 0, i2caddr); } if (!sd) { diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c index e8f13d3e2df1..1b93207c89e8 100644 --- a/drivers/media/video/s5p-fimc/fimc-capture.c +++ b/drivers/media/video/s5p-fimc/fimc-capture.c @@ -44,7 +44,7 @@ static struct v4l2_subdev *fimc_subdev_register(struct fimc_dev *fimc, return ERR_PTR(-ENOMEM); sd = v4l2_i2c_new_subdev_board(&vid_cap->v4l2_dev, i2c_adap, - MODULE_NAME, isp_info->board_info, NULL); + isp_info->board_info, NULL); if (!sd) { v4l2_err(&vid_cap->v4l2_dev, "failed to acquire subdev\n"); return NULL; diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 0911cb580e18..1d4d0a49ea52 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -7551,22 +7551,22 @@ int saa7134_board_init2(struct saa7134_dev *dev) so we do not need to probe for a radio tuner device. */ if (dev->radio_type != UNSET) v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", dev->radio_addr, NULL); if (has_demod) v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (dev->tuner_addr == ADDR_UNSET) { enum v4l2_i2c_tuner_type type = has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(type)); } else { v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", dev->tuner_addr, NULL); } } diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c index 764d7d219fed..756a27812260 100644 --- a/drivers/media/video/saa7134/saa7134-core.c +++ b/drivers/media/video/saa7134/saa7134-core.c @@ -991,7 +991,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev, if (card_is_empress(dev)) { struct v4l2_subdev *sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "saa6752hs", + "saa6752hs", saa7134_boards[dev->board].empress_addr, NULL); if (sd) @@ -1002,7 +1002,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev, struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "saa6588", + &dev->i2c_adap, "saa6588", 0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr)); if (sd) { printk(KERN_INFO "%s: found RDS decoder\n", dev->name); diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c index 0f4906136b8f..4e5a8cf76ded 100644 --- a/drivers/media/video/sh_vou.c +++ b/drivers/media/video/sh_vou.c @@ -1406,7 +1406,7 @@ static int __devinit sh_vou_probe(struct platform_device *pdev) goto ereset; subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap, - NULL, vou_pdata->board_info, NULL); + vou_pdata->board_info, NULL); if (!subdev) { ret = -ENOMEM; goto ei2cnd; diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 43848a751d11..335120c2021b 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -896,7 +896,7 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd, icl->board_info->platform_data = icd; subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap, - NULL, icl->board_info, NULL); + icl->board_info, NULL); if (!subdev) goto ei2cnd; diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c index e3bbae26e3ce..81dd53bb5267 100644 --- a/drivers/media/video/usbvision/usbvision-i2c.c +++ b/drivers/media/video/usbvision/usbvision-i2c.c @@ -251,7 +251,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision) hit-and-miss. */ mdelay(10); v4l2_i2c_new_subdev(&usbvision->v4l2_dev, - &usbvision->i2c_adap, NULL, + &usbvision->i2c_adap, "saa7115_auto", 0, saa711x_addrs); break; } @@ -261,14 +261,14 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision) struct tuner_setup tun_setup; sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev, - &usbvision->i2c_adap, NULL, + &usbvision->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); /* depending on whether we found a demod or not, select the tuner type. */ type = sd ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev, - &usbvision->i2c_adap, NULL, + &usbvision->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(type)); if (sd == NULL) diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index 9294282b5add..b5eb1f3950b1 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -368,18 +368,15 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); /* Load an i2c sub-device. */ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, const char *module_name, - struct i2c_board_info *info, const unsigned short *probe_addrs) + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs) { struct v4l2_subdev *sd = NULL; struct i2c_client *client; BUG_ON(!v4l2_dev); - if (module_name) - request_module(module_name); - else - request_module(I2C_MODULE_PREFIX "%s", info->type); + request_module(I2C_MODULE_PREFIX "%s", info->type); /* Create the i2c client */ if (info->addr == 0 && probe_addrs) @@ -432,8 +429,7 @@ error: EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board); struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, - const char *module_name, const char *client_type, + struct i2c_adapter *adapter, const char *client_type, int irq, void *platform_data, u8 addr, const unsigned short *probe_addrs) { @@ -447,8 +443,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, info.irq = irq; info.platform_data = platform_data; - return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name, - &info, probe_addrs); + return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs); } EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg); diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c index 02a21bccae18..9eda7cc03121 100644 --- a/drivers/media/video/via-camera.c +++ b/drivers/media/video/via-camera.c @@ -1360,7 +1360,7 @@ static __devinit int viacam_probe(struct platform_device *pdev) */ sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31); cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, sensor_adapter, - "ov7670", "ov7670", 0x42 >> 1, NULL); + "ov7670", 0x42 >> 1, NULL); if (cam->sensor == NULL) { dev_err(&pdev->dev, "Unable to find the sensor!\n"); ret = -ENODEV; diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c index e5e005dc1554..7e7eec48f8b1 100644 --- a/drivers/media/video/vino.c +++ b/drivers/media/video/vino.c @@ -4334,10 +4334,10 @@ static int __init vino_module_init(void) vino_drvdata->decoder = v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, - NULL, "saa7191", 0, I2C_ADDRS(0x45)); + "saa7191", 0, I2C_ADDRS(0x45)); vino_drvdata->camera = v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, - NULL, "indycam", 0, I2C_ADDRS(0x2b)); + "indycam", 0, I2C_ADDRS(0x2b)); dprintk("init complete!\n"); diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c index 7e6d62467eaa..e520abf9f4c3 100644 --- a/drivers/media/video/zoran/zoran_card.c +++ b/drivers/media/video/zoran/zoran_card.c @@ -1343,13 +1343,12 @@ static int __devinit zoran_probe(struct pci_dev *pdev, } zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, - &zr->i2c_adapter, NULL, zr->card.i2c_decoder, + &zr->i2c_adapter, zr->card.i2c_decoder, 0, zr->card.addrs_decoder); if (zr->card.i2c_encoder) zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, - &zr->i2c_adapter, - NULL, zr->card.i2c_encoder, + &zr->i2c_adapter, zr->card.i2c_encoder, 0, zr->card.addrs_encoder); dprintk(2, diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index ca47e6285075..307aada5fffe 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c @@ -183,9 +183,7 @@ static int isl29020_probe(struct i2c_client *client, static int isl29020_remove(struct i2c_client *client) { - struct als_data *data = i2c_get_clientdata(client); sysfs_remove_group(&client->dev.kobj, &m_als_gr); - kfree(data); return 0; } @@ -245,6 +243,6 @@ static void __exit sensor_isl29020_exit(void) module_init(sensor_isl29020_init); module_exit(sensor_isl29020_exit); -MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); +MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>"); MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index d551f09ccb79..6956f7e7d439 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -439,18 +439,23 @@ xpc_discovery(void) * nodes that can comprise an access protection grouping. The access * protection is in regards to memory, IOI and IPI. */ - max_regions = 64; region_size = xp_region_size; - switch (region_size) { - case 128: - max_regions *= 2; - case 64: - max_regions *= 2; - case 32: - max_regions *= 2; - region_size = 16; - DBUG_ON(!is_shub2()); + if (is_uv()) + max_regions = 256; + else { + max_regions = 64; + + switch (region_size) { + case 128: + max_regions *= 2; + case 64: + max_regions *= 2; + case 32: + max_regions *= 2; + region_size = 16; + DBUG_ON(!is_shub2()); + } } for (region = 0; region < max_regions; region++) { diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 8f86d702e46e..31ae07a36576 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1559,7 +1559,7 @@ void mmc_stop_host(struct mmc_host *host) if (host->caps & MMC_CAP_DISABLE) cancel_delayed_work(&host->disable); - cancel_delayed_work(&host->detect); + cancel_delayed_work_sync(&host->detect); mmc_flush_scheduled_work(); /* clear pm flags now and let card drivers set them as needed */ diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 995261f7fd70..77f93c3b8808 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -375,7 +375,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; - int err, ddr = MMC_SDR_MODE; + int err, ddr = 0; u32 cid[4]; unsigned int max_dtr; @@ -562,7 +562,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, 1 << bus_width, ddr); err = 0; } else { - mmc_card_set_ddr_mode(card); + if (ddr) + mmc_card_set_ddr_mode(card); + else + ddr = MMC_SDR_MODE; + mmc_set_bus_width_ddr(card->host, bus_width, ddr); } } diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index c3ad1058cd31..efef5f94ac42 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -547,9 +547,11 @@ static void mmc_sdio_detect(struct mmc_host *host) BUG_ON(!host->card); /* Make sure card is powered before detecting it */ - err = pm_runtime_get_sync(&host->card->dev); - if (err < 0) - goto out; + if (host->caps & MMC_CAP_POWER_OFF_CARD) { + err = pm_runtime_get_sync(&host->card->dev); + if (err < 0) + goto out; + } mmc_claim_host(host); @@ -560,6 +562,20 @@ static void mmc_sdio_detect(struct mmc_host *host) mmc_release_host(host); + /* + * Tell PM core it's OK to power off the card now. + * + * The _sync variant is used in order to ensure that the card + * is left powered off in case an error occurred, and the card + * is going to be removed. + * + * Since there is no specific reason to believe a new user + * is about to show up at this point, the _sync variant is + * desirable anyway. + */ + if (host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_put_sync(&host->card->dev); + out: if (err) { mmc_sdio_remove(host); @@ -568,9 +584,6 @@ out: mmc_detach_bus(host); mmc_release_host(host); } - - /* Tell PM core that we're done */ - pm_runtime_put(&host->card->dev); } /* @@ -718,16 +731,21 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) card = host->card; /* - * Let runtime PM core know our card is active + * Enable runtime PM only if supported by host+card+board */ - err = pm_runtime_set_active(&card->dev); - if (err) - goto remove; + if (host->caps & MMC_CAP_POWER_OFF_CARD) { + /* + * Let runtime PM core know our card is active + */ + err = pm_runtime_set_active(&card->dev); + if (err) + goto remove; - /* - * Enable runtime PM for this card - */ - pm_runtime_enable(&card->dev); + /* + * Enable runtime PM for this card + */ + pm_runtime_enable(&card->dev); + } /* * The number of functions on the card is encoded inside @@ -745,9 +763,10 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) goto remove; /* - * Enable Runtime PM for this func + * Enable Runtime PM for this func (if supported) */ - pm_runtime_enable(&card->sdio_func[i]->dev); + if (host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_enable(&card->sdio_func[i]->dev); } mmc_release_host(host); diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 2716c7ab6bbf..203da443e339 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -17,6 +17,7 @@ #include <linux/pm_runtime.h> #include <linux/mmc/card.h> +#include <linux/mmc/host.h> #include <linux/mmc/sdio_func.h> #include "sdio_cis.h" @@ -132,9 +133,11 @@ static int sdio_bus_probe(struct device *dev) * it should call pm_runtime_put_noidle() in its probe routine and * pm_runtime_get_noresume() in its remove routine. */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) - goto out; + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto out; + } /* Set the default block size so the driver is sure it's something * sensible. */ @@ -151,7 +154,8 @@ static int sdio_bus_probe(struct device *dev) return 0; disable_runtimepm: - pm_runtime_put_noidle(dev); + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_put_noidle(dev); out: return ret; } @@ -160,12 +164,14 @@ static int sdio_bus_remove(struct device *dev) { struct sdio_driver *drv = to_sdio_driver(dev->driver); struct sdio_func *func = dev_to_sdio_func(dev); - int ret; + int ret = 0; /* Make sure card is powered before invoking ->remove() */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) - goto out; + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto out; + } drv->remove(func); @@ -178,10 +184,12 @@ static int sdio_bus_remove(struct device *dev) } /* First, undo the increment made directly above */ - pm_runtime_put_noidle(dev); + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_put_noidle(dev); /* Then undo the runtime PM settings in sdio_bus_probe() */ - pm_runtime_put_noidle(dev); + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_put_noidle(dev); out: return ret; @@ -191,6 +199,8 @@ out: static int sdio_bus_pm_prepare(struct device *dev) { + struct sdio_func *func = dev_to_sdio_func(dev); + /* * Resume an SDIO device which was suspended at run time at this * point, in order to allow standard SDIO suspend/resume paths @@ -212,7 +222,8 @@ static int sdio_bus_pm_prepare(struct device *dev) * since there is little point in failing system suspend if a * device can't be resumed. */ - pm_runtime_resume(dev); + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_resume(dev); return 0; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 82a1079bbdc7..5d46021cbb57 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1002,7 +1002,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, * Monitor a 0->1 transition first */ if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { - while ((!(OMAP_HSMMC_READ(host, SYSCTL) & bit)) + while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) && (i++ < limit)) cpu_relax(); } diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 2e9cca19c90b..9b82910b9dbb 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -17,6 +17,7 @@ #include <linux/clk.h> #include <linux/mmc/host.h> #include <linux/mmc/sdhci-pltfm.h> +#include <mach/hardware.h> #include "sdhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" @@ -112,6 +113,13 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd clk_enable(clk); pltfm_host->clk = clk; + if (cpu_is_mx35() || cpu_is_mx51()) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + + /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */ + if (cpu_is_mx25() || cpu_is_mx35()) + host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; + return 0; } @@ -133,10 +141,8 @@ static struct sdhci_ops sdhci_esdhc_ops = { }; struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { - .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_MULTIBLOCK - | SDHCI_QUIRK_BROKEN_ADMA, + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA, /* ADMA has issues. Might be fixable */ - /* NO_MULTIBLOCK might be MX35 only (Errata: ENGcm07207) */ .ops = &sdhci_esdhc_ops, .init = esdhc_pltfm_init, .exit = esdhc_pltfm_exit, diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 55746bac2f44..3d9c2460d437 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -149,11 +149,11 @@ static const struct sdhci_pci_fixes sdhci_cafe = { * ADMA operation is disabled for Moorestown platform due to * hardware bugs. */ -static int mrst_hc1_probe(struct sdhci_pci_chip *chip) +static int mrst_hc_probe(struct sdhci_pci_chip *chip) { /* - * slots number is fixed here for MRST as SDIO3 is never used and has - * hardware bugs. + * slots number is fixed here for MRST as SDIO3/5 are never used and + * have hardware bugs. */ chip->num_slots = 1; return 0; @@ -163,9 +163,9 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, }; -static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1 = { +static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, - .probe = mrst_hc1_probe, + .probe = mrst_hc_probe, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { @@ -538,7 +538,15 @@ static const struct pci_device_id pci_ids[] __devinitdata = { .device = PCI_DEVICE_ID_INTEL_MRST_SD1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, - .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1, + .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_MRST_SD2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, }, { @@ -637,6 +645,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) { struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; + mmc_pm_flag_t slot_pm_flags; mmc_pm_flag_t pm_flags = 0; int i, ret; @@ -657,7 +666,11 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) return ret; } - pm_flags |= slot->host->mmc->pm_flags; + slot_pm_flags = slot->host->mmc->pm_flags; + if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) + sdhci_enable_irq_wakeups(slot->host); + + pm_flags |= slot_pm_flags; } if (chip->fixes && chip->fixes->suspend) { @@ -671,8 +684,10 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) pci_save_state(pdev); if (pm_flags & MMC_PM_KEEP_POWER) { - if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) + if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) { + pci_pme_active(pdev, true); pci_enable_wake(pdev, PCI_D3hot, 1); + } pci_set_power_state(pdev, PCI_D3hot); } else { pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c index fc406ac5d193..5a61208cbc66 100644 --- a/drivers/mmc/host/sdhci-pxa.c +++ b/drivers/mmc/host/sdhci-pxa.c @@ -141,6 +141,10 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev) if (pdata->quirks) host->quirks |= pdata->quirks; + /* If slot design supports 8 bit data, indicate this to MMC. */ + if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) + host->mmc->caps |= MMC_CAP_8_BIT_DATA; + ret = sdhci_add_host(host); if (ret) { dev_err(&pdev->dev, "failed to add host\n"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 782c0ee3c925..a25db426c910 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1185,17 +1185,31 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (host->ops->platform_send_init_74_clocks) host->ops->platform_send_init_74_clocks(host, ios->power_mode); - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - - if (ios->bus_width == MMC_BUS_WIDTH_8) - ctrl |= SDHCI_CTRL_8BITBUS; - else - ctrl &= ~SDHCI_CTRL_8BITBUS; + /* + * If your platform has 8-bit width support but is not a v3 controller, + * or if it requires special setup code, you should implement that in + * platform_8bit_width(). + */ + if (host->ops->platform_8bit_width) + host->ops->platform_8bit_width(host, ios->bus_width); + else { + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + if (ios->bus_width == MMC_BUS_WIDTH_8) { + ctrl &= ~SDHCI_CTRL_4BITBUS; + if (host->version >= SDHCI_SPEC_300) + ctrl |= SDHCI_CTRL_8BITBUS; + } else { + if (host->version >= SDHCI_SPEC_300) + ctrl &= ~SDHCI_CTRL_8BITBUS; + if (ios->bus_width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + else + ctrl &= ~SDHCI_CTRL_4BITBUS; + } + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } - if (ios->bus_width == MMC_BUS_WIDTH_4) - ctrl |= SDHCI_CTRL_4BITBUS; - else - ctrl &= ~SDHCI_CTRL_4BITBUS; + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); if ((ios->timing == MMC_TIMING_SD_HS || ios->timing == MMC_TIMING_MMC_HS) @@ -1681,6 +1695,16 @@ int sdhci_resume_host(struct sdhci_host *host) EXPORT_SYMBOL_GPL(sdhci_resume_host); +void sdhci_enable_irq_wakeups(struct sdhci_host *host) +{ + u8 val; + val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); + val |= SDHCI_WAKE_ON_INT; + sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); +} + +EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); + #endif /* CONFIG_PM */ /*****************************************************************************\ @@ -1845,11 +1869,19 @@ int sdhci_add_host(struct sdhci_host *host) mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; + mmc->f_max = host->max_clk; mmc->caps |= MMC_CAP_SDIO_IRQ; + /* + * A controller may support 8-bit width, but the board itself + * might not have the pins brought out. Boards that support + * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in + * their platform code before calling sdhci_add_host(), and we + * won't assume 8-bit width for hosts without that CAP. + */ if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) - mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; + mmc->caps |= MMC_CAP_4_BIT_DATA; if (caps & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index b7b8a3b28b01..e42d7f00c060 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -76,7 +76,7 @@ #define SDHCI_CTRL_ADMA1 0x08 #define SDHCI_CTRL_ADMA32 0x10 #define SDHCI_CTRL_ADMA64 0x18 -#define SDHCI_CTRL_8BITBUS 0x20 +#define SDHCI_CTRL_8BITBUS 0x20 #define SDHCI_POWER_CONTROL 0x29 #define SDHCI_POWER_ON 0x01 @@ -87,6 +87,9 @@ #define SDHCI_BLOCK_GAP_CONTROL 0x2A #define SDHCI_WAKE_UP_CONTROL 0x2B +#define SDHCI_WAKE_ON_INT 0x01 +#define SDHCI_WAKE_ON_INSERT 0x02 +#define SDHCI_WAKE_ON_REMOVE 0x04 #define SDHCI_CLOCK_CONTROL 0x2C #define SDHCI_DIVIDER_SHIFT 8 @@ -152,6 +155,7 @@ #define SDHCI_CLOCK_BASE_SHIFT 8 #define SDHCI_MAX_BLOCK_MASK 0x00030000 #define SDHCI_MAX_BLOCK_SHIFT 16 +#define SDHCI_CAN_DO_8BIT 0x00040000 #define SDHCI_CAN_DO_ADMA2 0x00080000 #define SDHCI_CAN_DO_ADMA1 0x00100000 #define SDHCI_CAN_DO_HISPD 0x00200000 @@ -212,6 +216,8 @@ struct sdhci_ops { unsigned int (*get_max_clock)(struct sdhci_host *host); unsigned int (*get_min_clock)(struct sdhci_host *host); unsigned int (*get_timeout_clock)(struct sdhci_host *host); + int (*platform_8bit_width)(struct sdhci_host *host, + int width); void (*platform_send_init_74_clocks)(struct sdhci_host *host, u8 power_mode); unsigned int (*get_ro)(struct sdhci_host *host); @@ -317,6 +323,7 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead); #ifdef CONFIG_PM extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); extern int sdhci_resume_host(struct sdhci_host *host); +extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); #endif #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index b4ead4a13c98..f8f65df9b017 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c @@ -425,7 +425,7 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id struct usb_device *usb_dev = interface_to_usbdev(intf); struct mmc_host *mmc; struct ushc_data *ushc; - int ret = -ENOMEM; + int ret; mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); if (mmc == NULL) @@ -462,11 +462,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id mmc->max_blk_count = 511; ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); - if (ushc->int_urb == NULL) + if (ushc->int_urb == NULL) { + ret = -ENOMEM; goto err; + } ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); - if (ushc->int_data == NULL) + if (ushc->int_data == NULL) { + ret = -ENOMEM; goto err; + } usb_fill_int_urb(ushc->int_urb, ushc->usb_dev, usb_rcvintpipe(usb_dev, intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), @@ -475,11 +479,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id intf->cur_altsetting->endpoint[0].desc.bInterval); ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); - if (ushc->cbw_urb == NULL) + if (ushc->cbw_urb == NULL) { + ret = -ENOMEM; goto err; + } ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); - if (ushc->cbw == NULL) + if (ushc->cbw == NULL) { + ret = -ENOMEM; goto err; + } ushc->cbw->signature = USHC_CBW_SIGNATURE; usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), @@ -487,15 +495,21 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id cbw_callback, ushc); ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); - if (ushc->data_urb == NULL) + if (ushc->data_urb == NULL) { + ret = -ENOMEM; goto err; + } ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); - if (ushc->csw_urb == NULL) + if (ushc->csw_urb == NULL) { + ret = -ENOMEM; goto err; + } ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); - if (ushc->csw == NULL) + if (ushc->csw == NULL) { + ret = -ENOMEM; goto err; + } usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6), ushc->csw, sizeof(struct ushc_csw), csw_callback, ushc); diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index c2960ac9f39c..811775aa8ee8 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -482,10 +482,17 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) uint32_t data = 0; struct ubi_vid_hdr vid_hdr; - addr = (loff_t)pnum * ubi->peb_size + ubi->vid_hdr_aloffset; + /* + * It is important to first invalidate the EC header, and then the VID + * header. Otherwise a power cut may lead to valid EC header and + * invalid VID header, in which case UBI will treat this PEB as + * corrupted and will try to preserve it, and print scary warnings (see + * the header comment in scan.c for more information). + */ + addr = (loff_t)pnum * ubi->peb_size; err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data); if (!err) { - addr -= ubi->vid_hdr_aloffset; + addr += ubi->vid_hdr_aloffset; err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data); if (!err) @@ -494,18 +501,24 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) /* * We failed to write to the media. This was observed with Spansion - * S29GL512N NOR flash. Most probably the eraseblock erasure was - * interrupted at a very inappropriate moment, so it became unwritable. - * In this case we probably anyway have garbage in this PEB. + * S29GL512N NOR flash. Most probably the previously eraseblock erasure + * was interrupted at a very inappropriate moment, so it became + * unwritable. In this case we probably anyway have garbage in this + * PEB. */ err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); - if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) - /* - * The VID header is corrupted, so we can safely erase this - * PEB and not afraid that it will be treated as a valid PEB in - * case of an unclean reboot. - */ - return 0; + if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) { + struct ubi_ec_hdr ec_hdr; + + err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); + if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) + /* + * Both VID and EC headers are corrupted, so we can + * safely erase this PEB and not afraid that it will be + * treated as a valid PEB in case of an unclean reboot. + */ + return 0; + } /* * The PEB contains a valid VID header, but we cannot invalidate it. diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 3c631863bf40..79ca304fc4db 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -787,16 +787,15 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, * erased, so it became unstable and corrupted, and should be * erased. */ - return 0; + err = 0; + goto out_unlock; } if (err) - return err; + goto out_unlock; - if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) { - mutex_unlock(&ubi->buf_mutex); - return 0; - } + if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) + goto out_unlock; ubi_err("PEB %d contains corrupted VID header, and the data does not " "contain all 0xFF, this may be a non-UBI PEB or a severe VID " @@ -806,8 +805,11 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, pnum, ubi->leb_start, ubi->leb_size); ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, ubi->peb_buf1, ubi->leb_size, 1); + err = 1; + +out_unlock: mutex_unlock(&ubi->buf_mutex); - return 1; + return err; } /** @@ -951,6 +953,10 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, * impossible to distinguish it from a PEB which just * contains garbage because of a power cut during erase * operation. So we just schedule this PEB for erasure. + * + * Besides, in case of NOR flash, we deliberatly + * corrupt both headers because NOR flash erasure is + * slow and can start from the end. */ err = 0; else diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f6668cdaac85..4f1755bddf6b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2543,10 +2543,10 @@ config PCH_GBE depends on PCI select MII ---help--- - This is a gigabit ethernet driver for Topcliff PCH. - Topcliff PCH is the platform controller hub that is used in Intel's + This is a gigabit ethernet driver for EG20T PCH. + EG20T PCH is the platform controller hub that is used in Intel's general embedded platform. - Topcliff PCH has Gigabit Ethernet interface. + EG20T PCH has Gigabit Ethernet interface. Using this interface, it is able to access system devices connected to Gigabit Ethernet. This driver enables Gigabit Ethernet function. @@ -2945,6 +2945,18 @@ source "drivers/s390/net/Kconfig" source "drivers/net/caif/Kconfig" +config TILE_NET + tristate "Tilera GBE/XGBE network driver support" + depends on TILE + default y + select CRC32 + help + This is a standard Linux network device driver for the + on-chip Tilera Gigabit Ethernet and XAUI interfaces. + + To compile this driver as a module, choose M here: the module + will be called tile_net. + config XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" depends on XEN diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 652fc6b98039..b90738d13994 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -301,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ obj-$(CONFIG_PCH_GBE) += pch_gbe/ +obj-$(CONFIG_TILE_NET) += tile/ diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c index 919080b2c3a5..1bf672009948 100644 --- a/drivers/net/atl1c/atl1c_hw.c +++ b/drivers/net/atl1c/atl1c_hw.c @@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) addr[0] = addr[1] = 0; AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); if (atl1c_check_eeprom_exist(hw)) { - if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) { + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { /* Enable OTP CLK */ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { otp_ctrl_data |= OTP_CTRL_CLK_EN; diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 43489f89c142..53eff9ba6e95 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -155,10 +155,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset) spin_lock_irqsave(&aup->lock, flags); if (force_reset || (!aup->mac_enabled)) { - writel(MAC_EN_CLOCK_ENABLE, &aup->enable); + writel(MAC_EN_CLOCK_ENABLE, aup->enable); au_sync_delay(2); writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 - | MAC_EN_CLOCK_ENABLE), &aup->enable); + | MAC_EN_CLOCK_ENABLE), aup->enable); au_sync_delay(2); aup->mac_enabled = 1; @@ -503,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev) au1000_hard_stop(dev); - writel(MAC_EN_CLOCK_ENABLE, &aup->enable); + writel(MAC_EN_CLOCK_ENABLE, aup->enable); au_sync_delay(2); - writel(0, &aup->enable); + writel(0, aup->enable); au_sync_delay(2); aup->tx_full = 0; @@ -1119,7 +1119,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) /* set a random MAC now in case platform_data doesn't provide one */ random_ether_addr(dev->dev_addr); - writel(0, &aup->enable); + writel(0, aup->enable); aup->mac_enabled = 0; pd = pdev->dev.platform_data; diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index c3449bbc585a..d887a76cd39d 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -816,40 +816,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) } /* - * Collect up to maxaddrs worth of a netdevice's unicast addresses into an - * array of addrss pointers and return the number collected. + * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. */ -static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int maxaddrs) +static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) { + unsigned int index = 0; unsigned int naddr = 0; const struct netdev_hw_addr *ha; - for_each_dev_addr(dev, ha) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } + for_each_dev_addr(dev, ha) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } return naddr; } /* - * Collect up to maxaddrs worth of a netdevice's multicast addresses into an - * array of addrss pointers and return the number collected. + * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. */ -static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int maxaddrs) +static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) { + unsigned int index = 0; unsigned int naddr = 0; const struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } + netdev_for_each_mc_addr(ha, dev) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } return naddr; } @@ -862,16 +870,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) u64 mhash = 0; u64 uhash = 0; bool free = true; - u16 filt_idx[7]; + unsigned int offset, naddr; const u8 *addr[7]; - int ret, naddr = 0; + int ret; const struct port_info *pi = netdev_priv(dev); /* first do the secondary unicast addresses */ - naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); - if (naddr > 0) { + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_uc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, filt_idx, &uhash, sleep); + naddr, addr, NULL, &uhash, sleep); if (ret < 0) return ret; @@ -879,12 +891,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) } /* next set up the multicast addresses */ - naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); - if (naddr > 0) { + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_mc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, filt_idx, &mhash, sleep); + naddr, addr, NULL, &mhash, sleep); if (ret < 0) return ret; + free = false; } return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index e306c20dfaee..19520afe1a12 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -1014,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { - int i, ret; + int offset, ret = 0; + unsigned nfilters = 0; + unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; - struct fw_vi_mac_exact *p; - size_t len16; - if (naddr > ARRAY_SIZE(cmd.u.exact)) + if (naddr > FW_CLS_TCAM_NUM_ENTRIES) return -EINVAL; - len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, - u.exact[naddr]), 16); - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16(len16)); + for (offset = 0; offset < naddr; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) + ? rem + : ARRAY_SIZE(cmd.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; - for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = - cpu_to_be16(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); - memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); - } + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + cmd.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16(len16)); + + for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, + sleep_ok); + if (ret && ret != -ENOMEM) + break; - ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); - if (ret) - return ret; - - for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); - - if (idx) - idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES - ? 0xffff - : index); - if (index < FW_CLS_TCAM_NUM_ENTRIES) - ret++; - else if (hash) - *hash |= (1 << hash_mac_addr(addr[i])); + for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET( + be16_to_cpu(p->valid_to_idx)); + + if (idx) + idx[offset+i] = + (index >= FW_CLS_TCAM_NUM_ENTRIES + ? 0xffff + : index); + if (index < FW_CLS_TCAM_NUM_ENTRIES) + nfilters++; + else if (hash) + *hash |= (1ULL << hash_mac_addr(addr[offset+i])); + } + + free = false; + offset += fw_naddr; + rem -= fw_naddr; } + + /* + * If there were no errors or we merely ran out of room in our MAC + * address arena, return the number of filters actually written. + */ + if (ret == 0 || ret == -ENOMEM) + ret = nfilters; return ret; } diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4686c3983fc3..4d62f7bfa036 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -31,7 +31,7 @@ char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.21-k6-NAPI" +#define DRV_VERSION "7.3.21-k8-NAPI" const char e1000_driver_version[] = DRV_VERSION; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; u32 rctl, tctl; - /* signal that we're down so the interrupt handler does not - * reschedule our watchdog timer */ - set_bit(__E1000_DOWN, &adapter->flags); /* disable receives in the hardware */ rctl = er32(RCTL); @@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter) e1000_irq_disable(adapter); + /* + * Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * timers and tasks from rescheduling. + */ + set_bit(__E1000_DOWN, &adapter->flags); + del_timer_sync(&adapter->tx_fifo_stall_timer); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 182b2a7be8dc..3d0af08483a1 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -400,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) skb_arr_rq1[index] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb_arr_rq1[index]) { + ehea_info("Unable to allocate enough skb in the array\n"); pr->rq1_skba.os_skbs = fill_wqes - i; break; } @@ -422,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) struct net_device *dev = pr->port->netdev; int i; - for (i = 0; i < pr->rq1_skba.len; i++) { + if (nr_rq1a > pr->rq1_skba.len) { + ehea_error("NR_RQ1A bigger than skb array len\n"); + return; + } + + for (i = 0; i < nr_rq1a; i++) { skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb_arr_rq1[i]) + if (!skb_arr_rq1[i]) { + ehea_info("No enough memory to allocate skb array\n"); break; + } } /* Ring doorbell */ - ehea_update_rq1a(pr->qp, nr_rq1a); + ehea_update_rq1a(pr->qp, i); } static int ehea_refill_rq_def(struct ehea_port_res *pr, @@ -735,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev, skb = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb) + if (!skb) { + ehea_info("Not enough memory to allocate skb\n"); break; + } } skb_copy_to_linear_data(skb, ((char *)cqe) + 64, cqe->num_bytes_transfered - 4); diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 00b38bccd6d0..52a7c86af663 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate) /* Baud Rate Error Correction x 10000 */ u32 rate_err_array[] = { - 0000, 0625, 1250, 1875, + 0, 625, 1250, 1875, 2500, 3125, 3750, 4375, 5000, 5625, 6250, 6875, 7500, 8125, 8750, 9375, diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index b68eee2414c2..7a7e18ba278a 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -289,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); dev_cap->bf_reg_size = 1 << (field & 0x1f); MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); + if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) { + mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f); + field = 3; + } dev_cap->bf_regs_per_page = 1 << (field & 0x3f); mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 472056b47440..03a1d280105f 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -1,6 +1,6 @@ /* * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. * * This code was derived from the Intel e1000e Linux driver. * @@ -2464,8 +2464,8 @@ static void __exit pch_gbe_exit_module(void) module_init(pch_gbe_init_module); module_exit(pch_gbe_exit_module); -MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver"); -MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>"); +MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver"); +MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c index 2510146fc560..ef0996a0eaaa 100644 --- a/drivers/net/pch_gbe/pch_gbe_param.c +++ b/drivers/net/pch_gbe/pch_gbe_param.c @@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter) .err = "using default of " __MODULE_STRING(PCH_GBE_DEFAULT_TXD), .def = PCH_GBE_DEFAULT_TXD, - .arg = { .r = { .min = PCH_GBE_MIN_TXD } }, - .arg = { .r = { .max = PCH_GBE_MAX_TXD } } + .arg = { .r = { .min = PCH_GBE_MIN_TXD, + .max = PCH_GBE_MAX_TXD } } }; struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; tx_ring->count = TxDescriptors; @@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter) .err = "using default of " __MODULE_STRING(PCH_GBE_DEFAULT_RXD), .def = PCH_GBE_DEFAULT_RXD, - .arg = { .r = { .min = PCH_GBE_MIN_RXD } }, - .arg = { .r = { .max = PCH_GBE_MAX_RXD } } + .arg = { .r = { .min = PCH_GBE_MIN_RXD, + .max = PCH_GBE_MAX_RXD } } }; struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; rx_ring->count = RxDescriptors; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f0bd1a1aba3a..e8b9c53c304b 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -30,11 +30,14 @@ #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/marvell_phy.h> +#include <linux/of.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> +#define MII_MARVELL_PHY_PAGE 22 + #define MII_M1011_IEVENT 0x13 #define MII_M1011_IEVENT_CLEAR 0x0000 @@ -80,7 +83,6 @@ #define MII_88E1121_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_PAGE 3 #define MII_88E1121_PHY_LED_DEF 0x0030 -#define MII_88E1121_PHY_PAGE 22 #define MII_M1011_PHY_STATUS 0x11 #define MII_M1011_PHY_STATUS_1000 0x8000 @@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev) return 0; } +#ifdef CONFIG_OF_MDIO +/* + * Set and/or override some configuration registers based on the + * marvell,reg-init property stored in the of_node for the phydev. + * + * marvell,reg-init = <reg-page reg mask value>,...; + * + * There may be one or more sets of <reg-page reg mask value>: + * + * reg-page: which register bank to use. + * reg: the register. + * mask: if non-zero, ANDed with existing register value. + * value: ORed with the masked value and written to the regiser. + * + */ +static int marvell_of_reg_init(struct phy_device *phydev) +{ + const __be32 *paddr; + int len, i, saved_page, current_page, page_changed, ret; + + if (!phydev->dev.of_node) + return 0; + + paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len); + if (!paddr || len < (4 * sizeof(*paddr))) + return 0; + + saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE); + if (saved_page < 0) + return saved_page; + page_changed = 0; + current_page = saved_page; + + ret = 0; + len /= sizeof(*paddr); + for (i = 0; i < len - 3; i += 4) { + u16 reg_page = be32_to_cpup(paddr + i); + u16 reg = be32_to_cpup(paddr + i + 1); + u16 mask = be32_to_cpup(paddr + i + 2); + u16 val_bits = be32_to_cpup(paddr + i + 3); + int val; + + if (reg_page != current_page) { + current_page = reg_page; + page_changed = 1; + ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page); + if (ret < 0) + goto err; + } + + val = 0; + if (mask) { + val = phy_read(phydev, reg); + if (val < 0) { + ret = val; + goto err; + } + val &= mask; + } + val |= val_bits; + + ret = phy_write(phydev, reg, val); + if (ret < 0) + goto err; + + } +err: + if (page_changed) { + i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page); + if (ret == 0) + ret = i; + } + return ret; +} +#else +static int marvell_of_reg_init(struct phy_device *phydev) +{ + return 0; +} +#endif /* CONFIG_OF_MDIO */ + static int m88e1121_config_aneg(struct phy_device *phydev) { int err, oldpage, mscr; - oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); + oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); - err = phy_write(phydev, MII_88E1121_PHY_PAGE, + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_MSCR_PAGE); if (err < 0) return err; @@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) return err; } - phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); + phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err < 0) @@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (err < 0) return err; - oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); + oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); - phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); + phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); - phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); + phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); err = genphy_config_aneg(phydev); @@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev) { int err, oldpage, mscr; - oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); + oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); - err = phy_write(phydev, MII_88E1121_PHY_PAGE, + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_MSCR_PAGE); if (err < 0) return err; @@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); if (err < 0) return err; @@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev) return err; } + err = marvell_of_reg_init(phydev); + if (err < 0) + return err; err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err < 0) @@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev) int err; /* Change address */ - err = phy_write(phydev, 0x16, 0x0002); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002); if (err < 0) return err; @@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev) return err; /* Change address */ - err = phy_write(phydev, 0x16, 0x0003); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003); if (err < 0) return err; @@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; + err = marvell_of_reg_init(phydev); + if (err < 0) + return err; + /* Reset address */ - err = phy_write(phydev, 0x16, 0x0); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0); + if (err < 0) + return err; + + err = phy_write(phydev, MII_BMCR, BMCR_RESET); + if (err < 0) + return err; + + return 0; +} + +static int m88e1149_config_init(struct phy_device *phydev) +{ + int err; + + /* Change address */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002); + if (err < 0) + return err; + + /* Enable 1000 Mbit */ + err = phy_write(phydev, 0x15, 0x1048); + if (err < 0) + return err; + + err = marvell_of_reg_init(phydev); + if (err < 0) + return err; + + /* Reset address */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0); if (err < 0) return err; @@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev) } } + err = marvell_of_reg_init(phydev); + if (err < 0) + return err; + return 0; } @@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { + .phy_id = MARVELL_PHY_ID_88E1149R, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1149R", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &m88e1149_config_init, + .config_aneg = &m88e1118_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .driver = { .owner = THIS_MODULE }, + }, + { .phy_id = MARVELL_PHY_ID_88E1240, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1240", @@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { 0x01410e10, 0xfffffff0 }, { 0x01410cb0, 0xfffffff0 }, { 0x01410cd0, 0xfffffff0 }, + { 0x01410e50, 0xfffffff0 }, { 0x01410e30, 0xfffffff0 }, { 0x01410e90, 0xfffffff0 }, { } diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 09cf56d0416a..39659976a1ac 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -2584,16 +2584,16 @@ ppp_create_interface(struct net *net, int unit, int *retp) */ dev_net_set(dev, net); - ret = -EEXIST; mutex_lock(&pn->all_ppp_mutex); if (unit < 0) { unit = unit_get(&pn->units_idr, ppp); if (unit < 0) { - *retp = unit; + ret = unit; goto out2; } } else { + ret = -EEXIST; if (unit_find(&pn->units_idr, unit)) goto out2; /* unit already exists */ /* @@ -2668,10 +2668,10 @@ static void ppp_shutdown_interface(struct ppp *ppp) ppp->closing = 1; ppp_unlock(ppp); unregister_netdev(ppp->dev); + unit_put(&pn->units_idr, ppp->file.index); } else ppp_unlock(ppp); - unit_put(&pn->units_idr, ppp->file.index); ppp->file.dead = 1; ppp->owner = NULL; wake_up_interruptible(&ppp->file.rwait); @@ -2859,8 +2859,7 @@ static void __exit ppp_cleanup(void) * by holding all_ppp_mutex */ -/* associate pointer with specified number */ -static int unit_set(struct idr *p, void *ptr, int n) +static int __unit_alloc(struct idr *p, void *ptr, int n) { int unit, err; @@ -2871,10 +2870,24 @@ again: } err = idr_get_new_above(p, ptr, n, &unit); - if (err == -EAGAIN) - goto again; + if (err < 0) { + if (err == -EAGAIN) + goto again; + return err; + } + + return unit; +} + +/* associate pointer with specified number */ +static int unit_set(struct idr *p, void *ptr, int n) +{ + int unit; - if (unit != n) { + unit = __unit_alloc(p, ptr, n); + if (unit < 0) + return unit; + else if (unit != n) { idr_remove(p, unit); return -EINVAL; } @@ -2885,19 +2898,7 @@ again: /* get new free unit number and associate pointer with it */ static int unit_get(struct idr *p, void *ptr) { - int unit, err; - -again: - if (!idr_pre_get(p, GFP_KERNEL)) { - printk(KERN_ERR "PPP: No free memory for idr\n"); - return -ENOMEM; - } - - err = idr_get_new_above(p, ptr, 0, &unit); - if (err == -EAGAIN) - goto again; - - return unit; + return __unit_alloc(p, ptr, 0); } /* put unit number back to a pool */ diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c30e0fe55a31..528eaef5308f 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -62,15 +62,15 @@ static const u32 default_msg = /* NETIF_MSG_PKTDATA | */ NETIF_MSG_HW | NETIF_MSG_WOL | 0; -static int debug = 0x00007fff; /* defaults above */ -module_param(debug, int, 0); +static int debug = -1; /* defaults above */ +module_param(debug, int, 0664); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define MSIX_IRQ 0 #define MSI_IRQ 1 #define LEG_IRQ 2 static int qlge_irq_type = MSIX_IRQ; -module_param(qlge_irq_type, int, MSIX_IRQ); +module_param(qlge_irq_type, int, 0664); MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); static int qlge_mpi_coredump; diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile new file mode 100644 index 000000000000..f634f142cab4 --- /dev/null +++ b/drivers/net/tile/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the TILE on-chip networking support. +# + +obj-$(CONFIG_TILE_NET) += tile_net.o +ifdef CONFIG_TILEGX +tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o +else +tile_net-objs := tilepro.o +endif diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c new file mode 100644 index 000000000000..0e6bac5ec65b --- /dev/null +++ b/drivers/net/tile/tilepro.c @@ -0,0 +1,2406 @@ +/* + * Copyright 2010 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/moduleparam.h> +#include <linux/sched.h> +#include <linux/kernel.h> /* printk() */ +#include <linux/slab.h> /* kmalloc() */ +#include <linux/errno.h> /* error codes */ +#include <linux/types.h> /* size_t */ +#include <linux/interrupt.h> +#include <linux/in.h> +#include <linux/netdevice.h> /* struct device, and other headers */ +#include <linux/etherdevice.h> /* eth_type_trans */ +#include <linux/skbuff.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/hugetlb.h> +#include <linux/in6.h> +#include <linux/timer.h> +#include <linux/io.h> +#include <asm/checksum.h> +#include <asm/homecache.h> + +#include <hv/drv_xgbe_intf.h> +#include <hv/drv_xgbe_impl.h> +#include <hv/hypervisor.h> +#include <hv/netio_intf.h> + +/* For TSO */ +#include <linux/ip.h> +#include <linux/tcp.h> + + +/* There is no singlethread_cpu, so schedule work on the current cpu. */ +#define singlethread_cpu -1 + + +/* + * First, "tile_net_init_module()" initializes all four "devices" which + * can be used by linux. + * + * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes + * the network cpus, then uses "tile_net_open_aux()" to initialize + * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all + * the tiles, provide buffers to LIPP, allow ingress to start, and + * turn on hypervisor interrupt handling (and NAPI) on all tiles. + * + * If registration fails due to the link being down, then "retry_work" + * is used to keep calling "tile_net_open_inner()" until it succeeds. + * + * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to + * stop egress, drain the LIPP buffers, unregister all the tiles, stop + * LIPP/LEPP, and wipe the LEPP queue. + * + * We start out with the ingress interrupt enabled on each CPU. When + * this interrupt fires, we disable it, and call "napi_schedule()". + * This will cause "tile_net_poll()" to be called, which will pull + * packets from the netio queue, filtering them out, or passing them + * to "netif_receive_skb()". If our budget is exhausted, we will + * return, knowing we will be called again later. Otherwise, we + * reenable the ingress interrupt, and call "napi_complete()". + * + * + * NOTE: The use of "native_driver" ensures that EPP exists, and that + * "epp_sendv" is legal, and that "LIPP" is being used. + * + * NOTE: Failing to free completions for an arbitrarily long time + * (which is defined to be illegal) does in fact cause bizarre + * problems. The "egress_timer" helps prevent this from happening. + * + * NOTE: The egress code can be interrupted by the interrupt handler. + */ + + +/* HACK: Allow use of "jumbo" packets. */ +/* This should be 1500 if "jumbo" is not set in LIPP. */ +/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ +/* ISSUE: This has not been thoroughly tested (except at 1500). */ +#define TILE_NET_MTU 1500 + +/* HACK: Define to support GSO. */ +/* ISSUE: This may actually hurt performance of the TCP blaster. */ +/* #define TILE_NET_GSO */ + +/* Define this to collapse "duplicate" acks. */ +/* #define IGNORE_DUP_ACKS */ + +/* HACK: Define this to verify incoming packets. */ +/* #define TILE_NET_VERIFY_INGRESS */ + +/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ +#define TILE_NET_TX_QUEUE_LEN 0 + +/* Define to dump packets (prints out the whole packet on tx and rx). */ +/* #define TILE_NET_DUMP_PACKETS */ + +/* Define to enable debug spew (all PDEBUG's are enabled). */ +/* #define TILE_NET_DEBUG */ + + +/* Define to activate paranoia checks. */ +/* #define TILE_NET_PARANOIA */ + +/* Default transmit lockup timeout period, in jiffies. */ +#define TILE_NET_TIMEOUT (5 * HZ) + +/* Default retry interval for bringing up the NetIO interface, in jiffies. */ +#define TILE_NET_RETRY_INTERVAL (5 * HZ) + +/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ +#define TILE_NET_DEVS 4 + + + +/* Paranoia. */ +#if NET_IP_ALIGN != LIPP_PACKET_PADDING +#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." +#endif + + +/* Debug print. */ +#ifdef TILE_NET_DEBUG +#define PDEBUG(fmt, args...) net_printk(fmt, ## args) +#else +#define PDEBUG(fmt, args...) +#endif + + +MODULE_AUTHOR("Tilera"); +MODULE_LICENSE("GPL"); + + +#define IS_MULTICAST(mac_addr) \ + (((u8 *)(mac_addr))[0] & 0x01) + +#define IS_BROADCAST(mac_addr) \ + (((u16 *)(mac_addr))[0] == 0xffff) + + +/* + * Queue of incoming packets for a specific cpu and device. + * + * Includes a pointer to the "system" data, and the actual "user" data. + */ +struct tile_netio_queue { + netio_queue_impl_t *__system_part; + netio_queue_user_impl_t __user_part; + +}; + + +/* + * Statistics counters for a specific cpu and device. + */ +struct tile_net_stats_t { + u32 rx_packets; + u32 rx_bytes; + u32 tx_packets; + u32 tx_bytes; +}; + + +/* + * Info for a specific cpu and device. + * + * ISSUE: There is a "dev" pointer in "napi" as well. + */ +struct tile_net_cpu { + /* The NAPI struct. */ + struct napi_struct napi; + /* Packet queue. */ + struct tile_netio_queue queue; + /* Statistics. */ + struct tile_net_stats_t stats; + /* ISSUE: Is this needed? */ + bool napi_enabled; + /* True if this tile has succcessfully registered with the IPP. */ + bool registered; + /* True if the link was down last time we tried to register. */ + bool link_down; + /* True if "egress_timer" is scheduled. */ + bool egress_timer_scheduled; + /* Number of small sk_buffs which must still be provided. */ + unsigned int num_needed_small_buffers; + /* Number of large sk_buffs which must still be provided. */ + unsigned int num_needed_large_buffers; + /* A timer for handling egress completions. */ + struct timer_list egress_timer; +}; + + +/* + * Info for a specific device. + */ +struct tile_net_priv { + /* Our network device. */ + struct net_device *dev; + /* The actual egress queue. */ + lepp_queue_t *epp_queue; + /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */ + spinlock_t cmd_lock; + /* Protects "epp_queue->comp_head". */ + spinlock_t comp_lock; + /* The hypervisor handle for this interface. */ + int hv_devhdl; + /* The intr bit mask that IDs this device. */ + u32 intr_id; + /* True iff "tile_net_open_aux()" has succeeded. */ + int partly_opened; + /* True iff "tile_net_open_inner()" has succeeded. */ + int fully_opened; + /* Effective network cpus. */ + struct cpumask network_cpus_map; + /* Number of network cpus. */ + int network_cpus_count; + /* Credits per network cpu. */ + int network_cpus_credits; + /* Network stats. */ + struct net_device_stats stats; + /* For NetIO bringup retries. */ + struct delayed_work retry_work; + /* Quick access to per cpu data. */ + struct tile_net_cpu *cpu[NR_CPUS]; +}; + + +/* + * The actual devices (xgbe0, xgbe1, gbe0, gbe1). + */ +static struct net_device *tile_net_devs[TILE_NET_DEVS]; + +/* + * The "tile_net_cpu" structures for each device. + */ +static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); + + +/* + * True if "network_cpus" was specified. + */ +static bool network_cpus_used; + +/* + * The actual cpus in "network_cpus". + */ +static struct cpumask network_cpus_map; + + + +#ifdef TILE_NET_DEBUG +/* + * printk with extra stuff. + * + * We print the CPU we're running in brackets. + */ +static void net_printk(char *fmt, ...) +{ + int i; + int len; + va_list args; + static char buf[256]; + + len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); + va_start(args, fmt); + i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); + va_end(args); + buf[255] = '\0'; + pr_notice(buf); +} +#endif + + +#ifdef TILE_NET_DUMP_PACKETS +/* + * Dump a packet. + */ +static void dump_packet(unsigned char *data, unsigned long length, char *s) +{ + unsigned long i; + static unsigned int count; + + pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", + data, length, s, count++); + + pr_info("\n"); + + for (i = 0; i < length; i++) { + if ((i & 0xf) == 0) + sprintf(buf, "%8.8lx:", i); + sprintf(buf + strlen(buf), " %2.2x", data[i]); + if ((i & 0xf) == 0xf || i == length - 1) + pr_info("%s\n", buf); + } +} +#endif + + +/* + * Provide support for the __netio_fastio1() swint + * (see <hv/drv_xgbe_intf.h> for how it is used). + * + * The fastio swint2 call may clobber all the caller-saved registers. + * It rarely clobbers memory, but we allow for the possibility in + * the signature just to be on the safe side. + * + * Also, gcc doesn't seem to allow an input operand to be + * clobbered, so we fake it with dummy outputs. + * + * This function can't be static because of the way it is declared + * in the netio header. + */ +inline int __netio_fastio1(u32 fastio_index, u32 arg0) +{ + long result, clobber_r1, clobber_r10; + asm volatile("swint2" + : "=R00" (result), + "=R01" (clobber_r1), "=R10" (clobber_r10) + : "R10" (fastio_index), "R01" (arg0) + : "memory", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", + "r11", "r12", "r13", "r14", + "r15", "r16", "r17", "r18", "r19", + "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29"); + return result; +} + + +/* + * Provide a linux buffer to LIPP. + */ +static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, + void *va, bool small) +{ + struct tile_netio_queue *queue = &info->queue; + + /* Convert "va" and "small" to "linux_buffer_t". */ + unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; + + __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); +} + + +/* + * Provide a linux buffer for LIPP. + */ +static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, + bool small) +{ + /* ISSUE: What should we use here? */ + unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; + + /* Round up to ensure to avoid "false sharing" with last cache line. */ + unsigned int buffer_size = + (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + + CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); + + /* + * ISSUE: Since CPAs are 38 bits, and we can only encode the + * high 31 bits in a "linux_buffer_t", the low 7 bits must be + * zero, and thus, we must align the actual "va" mod 128. + */ + const unsigned long align = 128; + + struct sk_buff *skb; + void *va; + + struct sk_buff **skb_ptr; + + /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */ + /* and also "reserves" that many bytes. */ + /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */ + int len = sizeof(*skb_ptr) + align + buffer_size; + + while (1) { + + /* Allocate (or fail). */ + skb = dev_alloc_skb(len); + if (skb == NULL) + return false; + + /* Make room for a back-pointer to 'skb'. */ + skb_reserve(skb, sizeof(*skb_ptr)); + + /* Make sure we are aligned. */ + skb_reserve(skb, -(long)skb->data & (align - 1)); + + /* This address is given to IPP. */ + va = skb->data; + + if (small) + break; + + /* ISSUE: This has never been observed! */ + /* Large buffers must not span a huge page. */ + if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0) + break; + pr_err("Leaking unaligned linux buffer at %p.\n", va); + } + + /* Skip two bytes to satisfy LIPP assumptions. */ + /* Note that this aligns IP on a 16 byte boundary. */ + /* ISSUE: Do this when the packet arrives? */ + skb_reserve(skb, NET_IP_ALIGN); + + /* Save a back-pointer to 'skb'. */ + skb_ptr = va - sizeof(*skb_ptr); + *skb_ptr = skb; + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(skb->data, buffer_size); + + /* Make sure "skb_ptr" has been flushed. */ + __insn_mf(); + +#ifdef TILE_NET_PARANOIA +#if CHIP_HAS_CBOX_HOME_MAP() + if (hash_default) { + HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); + if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) + panic("Non-coherent ingress buffer!"); + } +#endif +#endif + + /* Provide the new buffer. */ + tile_net_provide_linux_buffer(info, va, small); + + return true; +} + + +/* + * Provide linux buffers for LIPP. + */ +static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) +{ + while (info->num_needed_small_buffers != 0) { + if (!tile_net_provide_needed_buffer(info, true)) + goto oops; + info->num_needed_small_buffers--; + } + + while (info->num_needed_large_buffers != 0) { + if (!tile_net_provide_needed_buffer(info, false)) + goto oops; + info->num_needed_large_buffers--; + } + + return; + +oops: + + /* Add a description to the page allocation failure dump. */ + pr_notice("Could not provide a linux buffer to LIPP.\n"); +} + + +/* + * Grab some LEPP completions, and store them in "comps", of size + * "comps_size", and return the number of completions which were + * stored, so the caller can free them. + * + * If "pending" is not NULL, it will be set to true if there might + * still be some pending completions caused by this tile, else false. + */ +static unsigned int tile_net_lepp_grab_comps(struct net_device *dev, + struct sk_buff *comps[], + unsigned int comps_size, + bool *pending) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + lepp_queue_t *eq = priv->epp_queue; + + unsigned int n = 0; + + unsigned int comp_head; + unsigned int comp_busy; + unsigned int comp_tail; + + spin_lock(&priv->comp_lock); + + comp_head = eq->comp_head; + comp_busy = eq->comp_busy; + comp_tail = eq->comp_tail; + + while (comp_head != comp_busy && n < comps_size) { + comps[n++] = eq->comps[comp_head]; + LEPP_QINC(comp_head); + } + + if (pending != NULL) + *pending = (comp_head != comp_tail); + + eq->comp_head = comp_head; + + spin_unlock(&priv->comp_lock); + + return n; +} + + +/* + * Make sure the egress timer is scheduled. + * + * Note that we use "schedule if not scheduled" logic instead of the more + * obvious "reschedule" logic, because "reschedule" is fairly expensive. + */ +static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) +{ + if (!info->egress_timer_scheduled) { + mod_timer_pinned(&info->egress_timer, jiffies + 1); + info->egress_timer_scheduled = true; + } +} + + +/* + * The "function" for "info->egress_timer". + * + * This timer will reschedule itself as long as there are any pending + * completions expected (on behalf of any tile). + * + * ISSUE: Realistically, will the timer ever stop scheduling itself? + * + * ISSUE: This timer is almost never actually needed, so just use a global + * timer that can run on any tile. + * + * ISSUE: Maybe instead track number of expected completions, and free + * only that many, resetting to zero if "pending" is ever false. + */ +static void tile_net_handle_egress_timer(unsigned long arg) +{ + struct tile_net_cpu *info = (struct tile_net_cpu *)arg; + struct net_device *dev = info->napi.dev; + + struct sk_buff *olds[32]; + unsigned int wanted = 32; + unsigned int i, nolds = 0; + bool pending; + + /* The timer is no longer scheduled. */ + info->egress_timer_scheduled = false; + + nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending); + + for (i = 0; i < nolds; i++) + kfree_skb(olds[i]); + + /* Reschedule timer if needed. */ + if (pending) + tile_net_schedule_egress_timer(info); +} + + +#ifdef IGNORE_DUP_ACKS + +/* + * Help detect "duplicate" ACKs. These are sequential packets (for a + * given flow) which are exactly 66 bytes long, sharing everything but + * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, + * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are + * +N, and the Tstamps are usually identical. + * + * NOTE: Apparently truly duplicate acks (with identical "ack" values), + * should not be collapsed, as they are used for some kind of flow control. + */ +static bool is_dup_ack(char *s1, char *s2, unsigned int len) +{ + int i; + + unsigned long long ignorable = 0; + + /* Identification. */ + ignorable |= (1ULL << 0x12); + ignorable |= (1ULL << 0x13); + + /* Header checksum. */ + ignorable |= (1ULL << 0x18); + ignorable |= (1ULL << 0x19); + + /* ACK. */ + ignorable |= (1ULL << 0x2a); + ignorable |= (1ULL << 0x2b); + ignorable |= (1ULL << 0x2c); + ignorable |= (1ULL << 0x2d); + + /* WinSize. */ + ignorable |= (1ULL << 0x30); + ignorable |= (1ULL << 0x31); + + /* Checksum. */ + ignorable |= (1ULL << 0x32); + ignorable |= (1ULL << 0x33); + + for (i = 0; i < len; i++, ignorable >>= 1) { + + if ((ignorable & 1) || (s1[i] == s2[i])) + continue; + +#ifdef TILE_NET_DEBUG + /* HACK: Mention non-timestamp diffs. */ + if (i < 0x38 && i != 0x2f && + net_ratelimit()) + pr_info("Diff at 0x%x\n", i); +#endif + + return false; + } + +#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS + /* HACK: Do not suppress truly duplicate ACKs. */ + /* ISSUE: Is this actually necessary or helpful? */ + if (s1[0x2a] == s2[0x2a] && + s1[0x2b] == s2[0x2b] && + s1[0x2c] == s2[0x2c] && + s1[0x2d] == s2[0x2d]) { + return false; + } +#endif + + return true; +} + +#endif + + + +/* + * Like "tile_net_handle_packets()", but just discard packets. + */ +static void tile_net_discard_packets(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + + while (qup->__packet_receive_read != + qsp->__packet_receive_queue.__packet_write) { + + int index = qup->__packet_receive_read; + + int index2_aux = index + sizeof(netio_pkt_t); + int index2 = + ((index2_aux == + qsp->__packet_receive_queue.__last_packet_plus_one) ? + 0 : index2_aux); + + netio_pkt_t *pkt = (netio_pkt_t *) + ((unsigned long) &qsp[1] + index); + + /* Extract the "linux_buffer_t". */ + unsigned int buffer = pkt->__packet.word; + + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + kfree_skb(skb); + + /* Consume this packet. */ + qup->__packet_receive_read = index2; + } +} + + +/* + * Handle the next packet. Return true if "processed", false if "filtered". + */ +static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) +{ + struct net_device *dev = info->napi.dev; + + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + struct tile_net_stats_t *stats = &info->stats; + + int filter; + + int index2_aux = index + sizeof(netio_pkt_t); + int index2 = + ((index2_aux == + qsp->__packet_receive_queue.__last_packet_plus_one) ? + 0 : index2_aux); + + netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); + + netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); + + /* Extract the packet size. */ + unsigned long len = + (NETIO_PKT_CUSTOM_LENGTH(pkt) + + NET_IP_ALIGN - NETIO_PACKET_PADDING); + + /* Extract the "linux_buffer_t". */ + unsigned int buffer = pkt->__packet.word; + + /* Extract "small" (vs "large"). */ + bool small = ((buffer & 1) != 0); + + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Extract the packet data pointer. */ + /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ + unsigned char *buf = va + NET_IP_ALIGN; + +#ifdef IGNORE_DUP_ACKS + + static int other; + static int final; + static int keep; + static int skip; + +#endif + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(buf, len); + + /* ISSUE: Is this needed? */ + dev->last_rx = jiffies; + +#ifdef TILE_NET_DUMP_PACKETS + dump_packet(buf, len, "rx"); +#endif /* TILE_NET_DUMP_PACKETS */ + +#ifdef TILE_NET_VERIFY_INGRESS + if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && + NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { + /* + * FIXME: This complains about UDP packets + * with a "zero" checksum (bug 6624). + */ +#ifdef TILE_NET_PANIC_ON_BAD + dump_packet(buf, len, "rx"); + panic("Bad L4 checksum."); +#else + pr_warning("Bad L4 checksum on %d byte packet.\n", len); +#endif + } + if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && + NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { + dump_packet(buf, len, "rx"); + panic("Bad L3 checksum."); + } + switch (NETIO_PKT_STATUS_M(metadata, pkt)) { + case NETIO_PKT_STATUS_OVERSIZE: + if (len >= 64) { + dump_packet(buf, len, "rx"); + panic("Unexpected OVERSIZE."); + } + break; + case NETIO_PKT_STATUS_BAD: +#ifdef TILE_NET_PANIC_ON_BAD + dump_packet(buf, len, "rx"); + panic("Unexpected BAD packet."); +#else + pr_warning("Unexpected BAD %d byte packet.\n", len); +#endif + } +#endif + + filter = 0; + + if (!(dev->flags & IFF_UP)) { + /* Filter packets received before we're up. */ + filter = 1; + } else if (!(dev->flags & IFF_PROMISC)) { + /* + * FIXME: Implement HW multicast filter. + */ + if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { + /* Filter packets not for our address. */ + const u8 *mine = dev->dev_addr; + filter = compare_ether_addr(mine, buf); + } + } + +#ifdef IGNORE_DUP_ACKS + + if (len != 66) { + /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */ + + other++; + + } else if (index2 == + qsp->__packet_receive_queue.__packet_write) { + + final++; + + } else { + + netio_pkt_t *pkt2 = (netio_pkt_t *) + ((unsigned long) &qsp[1] + index2); + + netio_pkt_metadata_t *metadata2 = + NETIO_PKT_METADATA(pkt2); + + /* Extract the packet size. */ + unsigned long len2 = + (NETIO_PKT_CUSTOM_LENGTH(pkt2) + + NET_IP_ALIGN - NETIO_PACKET_PADDING); + + if (len2 == 66 && + NETIO_PKT_FLOW_HASH_M(metadata, pkt) == + NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) { + + /* Extract the "linux_buffer_t". */ + unsigned int buffer2 = pkt2->__packet.word; + + /* Convert "linux_buffer_t" to "va". */ + void *va2 = + __va((phys_addr_t)(buffer2 >> 1) << 7); + + /* Extract the packet data pointer. */ + /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ + unsigned char *buf2 = va2 + NET_IP_ALIGN; + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(buf2, len2); + + if (is_dup_ack(buf, buf2, len)) { + skip++; + filter = 1; + } else { + keep++; + } + } + } + + if (net_ratelimit()) + pr_info("Other %d Final %d Keep %d Skip %d.\n", + other, final, keep, skip); + +#endif + + if (filter) { + + /* ISSUE: Update "drop" statistics? */ + + tile_net_provide_linux_buffer(info, va, small); + + } else { + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + /* Paranoia. */ + if (skb->data != buf) + panic("Corrupt linux buffer from LIPP! " + "VA=%p, skb=%p, skb->data=%p\n", + va, skb, skb->data); + + /* Encode the actual packet length. */ + skb_put(skb, len); + + /* NOTE: This call also sets "skb->dev = dev". */ + skb->protocol = eth_type_trans(skb, dev); + + /* ISSUE: Discard corrupt packets? */ + /* ISSUE: Discard packets with bad checksums? */ + + /* Avoid recomputing TCP/UDP checksums. */ + if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netif_receive_skb(skb); + + stats->rx_packets++; + stats->rx_bytes += len; + + if (small) + info->num_needed_small_buffers++; + else + info->num_needed_large_buffers++; + } + + /* Return four credits after every fourth packet. */ + if (--qup->__receive_credit_remaining == 0) { + u32 interval = qup->__receive_credit_interval; + qup->__receive_credit_remaining = interval; + __netio_fastio_return_credits(qup->__fastio_index, interval); + } + + /* Consume this packet. */ + qup->__packet_receive_read = index2; + + return !filter; +} + + +/* + * Handle some packets for the given device on the current CPU. + * + * ISSUE: The "rotting packet" race condition occurs if a packet + * arrives after the queue appears to be empty, and before the + * hypervisor interrupt is re-enabled. + */ +static int tile_net_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + + unsigned int work = 0; + + while (1) { + int index = qup->__packet_receive_read; + if (index == qsp->__packet_receive_queue.__packet_write) + break; + + if (tile_net_poll_aux(info, index)) { + if (++work >= budget) + goto done; + } + } + + napi_complete(&info->napi); + + /* Re-enable hypervisor interrupts. */ + enable_percpu_irq(priv->intr_id); + + /* HACK: Avoid the "rotting packet" problem. */ + if (qup->__packet_receive_read != + qsp->__packet_receive_queue.__packet_write) + napi_schedule(&info->napi); + + /* ISSUE: Handle completions? */ + +done: + + tile_net_provide_needed_buffers(info); + + return work; +} + + +/* + * Handle an ingress interrupt for the given device on the current cpu. + */ +static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Disable hypervisor interrupt. */ + disable_percpu_irq(priv->intr_id); + + napi_schedule(&info->napi); + + return IRQ_HANDLED; +} + + +/* + * One time initialization per interface. + */ +static int tile_net_open_aux(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + int ret; + int dummy; + unsigned int epp_lotar; + + /* + * Find out where EPP memory should be homed. + */ + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), + NETIO_EPP_SHM_OFF); + if (ret < 0) { + pr_err("could not read epp_shm_queue lotar.\n"); + return -EIO; + } + + /* + * Home the page on the EPP. + */ + { + int epp_home = hv_lotar_to_cpu(epp_lotar); + struct page *page = virt_to_page(priv->epp_queue); + homecache_change_page_home(page, 0, epp_home); + } + + /* + * Register the EPP shared memory queue. + */ + { + netio_ipp_address_t ea = { + .va = 0, + .pa = __pa(priv->epp_queue), + .pte = hv_pte(0), + .size = PAGE_SIZE, + }; + ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); + ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); + ret = hv_dev_pwrite(priv->hv_devhdl, 0, + (HV_VirtAddr)&ea, + sizeof(ea), + NETIO_EPP_SHM_OFF); + if (ret < 0) + return -EIO; + } + + /* + * Start LIPP/LEPP. + */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { + pr_warning("Failed to start LIPP/LEPP.\n"); + return -EIO; + } + + return 0; +} + + +/* + * Register with hypervisor on each CPU. + * + * Strangely, this function does important things even if it "fails", + * which is especially common if the link is not up yet. Hopefully + * these things are all "harmless" if done twice! + */ +static void tile_net_register(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info; + + struct tile_netio_queue *queue; + + /* Only network cpus can receive packets. */ + int queue_id = + cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; + + netio_input_config_t config = { + .flags = 0, + .num_receive_packets = priv->network_cpus_credits, + .queue_id = queue_id + }; + + int ret = 0; + netio_queue_impl_t *queuep; + + PDEBUG("tile_net_register(queue_id %d)\n", queue_id); + + if (!strcmp(dev->name, "xgbe0")) + info = &__get_cpu_var(hv_xgbe0); + else if (!strcmp(dev->name, "xgbe1")) + info = &__get_cpu_var(hv_xgbe1); + else if (!strcmp(dev->name, "gbe0")) + info = &__get_cpu_var(hv_gbe0); + else if (!strcmp(dev->name, "gbe1")) + info = &__get_cpu_var(hv_gbe1); + else + BUG(); + + /* Initialize the egress timer. */ + init_timer(&info->egress_timer); + info->egress_timer.data = (long)info; + info->egress_timer.function = tile_net_handle_egress_timer; + + priv->cpu[my_cpu] = info; + + /* + * Register ourselves with the IPP. + */ + ret = hv_dev_pwrite(priv->hv_devhdl, 0, + (HV_VirtAddr)&config, + sizeof(netio_input_config_t), + NETIO_IPP_INPUT_REGISTER_OFF); + PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", + ret); + if (ret < 0) { + printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF" + " failure %d\n", ret); + info->link_down = (ret == NETIO_LINK_DOWN); + return; + } + + /* + * Get the pointer to our queue's system part. + */ + + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&queuep, + sizeof(netio_queue_impl_t *), + NETIO_IPP_INPUT_REGISTER_OFF); + PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", + ret); + PDEBUG("queuep %p\n", queuep); + if (ret <= 0) { + /* ISSUE: Shouldn't this be a fatal error? */ + pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); + return; + } + + queue = &info->queue; + + queue->__system_part = queuep; + + memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); + + /* This is traditionally "config.num_receive_packets / 2". */ + queue->__user_part.__receive_credit_interval = 4; + queue->__user_part.__receive_credit_remaining = + queue->__user_part.__receive_credit_interval; + + /* + * Get a fastio index from the hypervisor. + * ISSUE: Shouldn't this check the result? + */ + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&queue->__user_part.__fastio_index, + sizeof(queue->__user_part.__fastio_index), + NETIO_IPP_GET_FASTIO_OFF); + PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); + + netif_napi_add(dev, &info->napi, tile_net_poll, 64); + + /* Now we are registered. */ + info->registered = true; +} + + +/* + * Unregister with hypervisor on each CPU. + */ +static void tile_net_unregister(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + int ret = 0; + int dummy = 0; + + /* Do nothing if never registered. */ + if (info == NULL) + return; + + /* Do nothing if already unregistered. */ + if (!info->registered) + return; + + /* + * Unregister ourselves with LIPP. + */ + ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); + PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n", + ret); + if (ret < 0) { + /* FIXME: Just panic? */ + pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF" + " failure %d\n", ret); + } + + /* + * Discard all packets still in our NetIO queue. Hopefully, + * once the unregister call is complete, there will be no + * packets still in flight on the IDN. + */ + tile_net_discard_packets(dev); + + /* Reset state. */ + info->num_needed_small_buffers = 0; + info->num_needed_large_buffers = 0; + + /* Cancel egress timer. */ + del_timer(&info->egress_timer); + info->egress_timer_scheduled = false; + + netif_napi_del(&info->napi); + + /* Now we are unregistered. */ + info->registered = false; +} + + +/* + * Helper function for "tile_net_stop()". + * + * Also used to handle registration failure in "tile_net_open_inner()", + * when "fully_opened" is known to be false, and the various extra + * steps in "tile_net_stop()" are not necessary. ISSUE: It might be + * simpler if we could just call "tile_net_stop()" anyway. + */ +static void tile_net_stop_aux(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + int dummy = 0; + + /* Unregister all tiles, so LIPP will stop delivering packets. */ + on_each_cpu(tile_net_unregister, (void *)dev, 1); + + /* Stop LIPP/LEPP. */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) + panic("Failed to stop LIPP/LEPP!\n"); + + priv->partly_opened = 0; +} + + +/* + * Disable ingress interrupts for the given device on the current cpu. + */ +static void tile_net_disable_intr(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Disable hypervisor interrupt. */ + disable_percpu_irq(priv->intr_id); + + /* Disable NAPI if needed. */ + if (info != NULL && info->napi_enabled) { + napi_disable(&info->napi); + info->napi_enabled = false; + } +} + + +/* + * Enable ingress interrupts for the given device on the current cpu. + */ +static void tile_net_enable_intr(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Enable hypervisor interrupt. */ + enable_percpu_irq(priv->intr_id); + + /* Enable NAPI. */ + napi_enable(&info->napi); + info->napi_enabled = true; +} + + +/* + * tile_net_open_inner does most of the work of bringing up the interface. + * It's called from tile_net_open(), and also from tile_net_retry_open(). + * The return value is 0 if the interface was brought up, < 0 if + * tile_net_open() should return the return value as an error, and > 0 if + * tile_net_open() should return success and schedule a work item to + * periodically retry the bringup. + */ +static int tile_net_open_inner(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info; + struct tile_netio_queue *queue; + unsigned int irq; + int i; + + /* + * First try to register just on the local CPU, and handle any + * semi-expected "link down" failure specially. Note that we + * do NOT call "tile_net_stop_aux()", unlike below. + */ + tile_net_register(dev); + info = priv->cpu[my_cpu]; + if (!info->registered) { + if (info->link_down) + return 1; + return -EAGAIN; + } + + /* + * Now register everywhere else. If any registration fails, + * even for "link down" (which might not be possible), we + * clean up using "tile_net_stop_aux()". + */ + smp_call_function(tile_net_register, (void *)dev, 1); + for_each_online_cpu(i) { + if (!priv->cpu[i]->registered) { + tile_net_stop_aux(dev); + return -EAGAIN; + } + } + + queue = &info->queue; + + /* + * Set the device intr bit mask. + * The tile_net_register above sets per tile __intr_id. + */ + priv->intr_id = queue->__system_part->__intr_id; + BUG_ON(!priv->intr_id); + + /* + * Register the device interrupt handler. + * The __ffs() function returns the index into the interrupt handler + * table from the interrupt bit mask which should have one bit + * and one bit only set. + */ + irq = __ffs(priv->intr_id); + tile_irq_activate(irq, TILE_IRQ_PERCPU); + BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, + 0, dev->name, (void *)dev) != 0); + + /* ISSUE: How could "priv->fully_opened" ever be "true" here? */ + + if (!priv->fully_opened) { + + int dummy = 0; + + /* Allocate initial buffers. */ + + int max_buffers = + priv->network_cpus_count * priv->network_cpus_credits; + + info->num_needed_small_buffers = + min(LIPP_SMALL_BUFFERS, max_buffers); + + info->num_needed_large_buffers = + min(LIPP_LARGE_BUFFERS, max_buffers); + + tile_net_provide_needed_buffers(info); + + if (info->num_needed_small_buffers != 0 || + info->num_needed_large_buffers != 0) + panic("Insufficient memory for buffer stack!"); + + /* Start LIPP/LEPP and activate "ingress" at the shim. */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) + panic("Failed to activate the LIPP Shim!\n"); + + priv->fully_opened = 1; + } + + /* On each tile, enable the hypervisor to trigger interrupts. */ + /* ISSUE: Do this before starting LIPP/LEPP? */ + on_each_cpu(tile_net_enable_intr, (void *)dev, 1); + + /* Start our transmit queue. */ + netif_start_queue(dev); + + return 0; +} + + +/* + * Called periodically to retry bringing up the NetIO interface, + * if it doesn't come up cleanly during tile_net_open(). + */ +static void tile_net_open_retry(struct work_struct *w) +{ + struct delayed_work *dw = + container_of(w, struct delayed_work, work); + + struct tile_net_priv *priv = + container_of(dw, struct tile_net_priv, retry_work); + + /* + * Try to bring the NetIO interface up. If it fails, reschedule + * ourselves to try again later; otherwise, tell Linux we now have + * a working link. ISSUE: What if the return value is negative? + */ + if (tile_net_open_inner(priv->dev)) + schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, + TILE_NET_RETRY_INTERVAL); + else + netif_carrier_on(priv->dev); +} + + +/* + * Called when a network interface is made active. + * + * Returns 0 on success, negative value on failure. + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + * + * If the actual link is not available yet, then we tell Linux that + * we have no carrier, and we keep checking until the link comes up. + */ +static int tile_net_open(struct net_device *dev) +{ + int ret = 0; + struct tile_net_priv *priv = netdev_priv(dev); + + /* + * We rely on priv->partly_opened to tell us if this is the + * first time this interface is being brought up. If it is + * set, the IPP was already initialized and should not be + * initialized again. + */ + if (!priv->partly_opened) { + + int count; + int credits; + + /* Initialize LIPP/LEPP, and start the Shim. */ + ret = tile_net_open_aux(dev); + if (ret < 0) { + pr_err("tile_net_open_aux failed: %d\n", ret); + return ret; + } + + /* Analyze the network cpus. */ + + if (network_cpus_used) + cpumask_copy(&priv->network_cpus_map, + &network_cpus_map); + else + cpumask_copy(&priv->network_cpus_map, cpu_online_mask); + + + count = cpumask_weight(&priv->network_cpus_map); + + /* Limit credits to available buffers, and apply min. */ + credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); + + /* Apply "GBE" max limit. */ + /* ISSUE: Use higher limit for XGBE? */ + credits = min(NETIO_MAX_RECEIVE_PKTS, credits); + + priv->network_cpus_count = count; + priv->network_cpus_credits = credits; + +#ifdef TILE_NET_DEBUG + pr_info("Using %d network cpus, with %d credits each\n", + priv->network_cpus_count, priv->network_cpus_credits); +#endif + + priv->partly_opened = 1; + } + + /* + * Attempt to bring up the link. + */ + ret = tile_net_open_inner(dev); + if (ret <= 0) { + if (ret == 0) + netif_carrier_on(dev); + return ret; + } + + /* + * We were unable to bring up the NetIO interface, but we want to + * try again in a little bit. Tell Linux that we have no carrier + * so it doesn't try to use the interface before the link comes up + * and then remember to try again later. + */ + netif_carrier_off(dev); + schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, + TILE_NET_RETRY_INTERVAL); + + return 0; +} + + +/* + * Disables a network interface. + * + * Returns 0, this is not allowed to fail. + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + * + * ISSUE: Can this can be called while "tile_net_poll()" is running? + */ +static int tile_net_stop(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + bool pending = true; + + PDEBUG("tile_net_stop()\n"); + + /* ISSUE: Only needed if not yet fully open. */ + cancel_delayed_work_sync(&priv->retry_work); + + /* Can't transmit any more. */ + netif_stop_queue(dev); + + /* + * Disable hypervisor interrupts on each tile. + */ + on_each_cpu(tile_net_disable_intr, (void *)dev, 1); + + /* + * Unregister the interrupt handler. + * The __ffs() function returns the index into the interrupt handler + * table from the interrupt bit mask which should have one bit + * and one bit only set. + */ + if (priv->intr_id) + free_irq(__ffs(priv->intr_id), dev); + + /* + * Drain all the LIPP buffers. + */ + + while (true) { + int buffer; + + /* NOTE: This should never fail. */ + if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, + sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) + break; + + /* Stop when done. */ + if (buffer == 0) + break; + + { + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + kfree_skb(skb); + } + } + + /* Stop LIPP/LEPP. */ + tile_net_stop_aux(dev); + + + priv->fully_opened = 0; + + + /* + * XXX: ISSUE: It appears that, in practice anyway, by the + * time we get here, there are no pending completions. + */ + while (pending) { + + struct sk_buff *olds[32]; + unsigned int wanted = 32; + unsigned int i, nolds = 0; + + nolds = tile_net_lepp_grab_comps(dev, olds, + wanted, &pending); + + /* ISSUE: We have never actually seen this debug spew. */ + if (nolds != 0) + pr_info("During tile_net_stop(), grabbed %d comps.\n", + nolds); + + for (i = 0; i < nolds; i++) + kfree_skb(olds[i]); + } + + + /* Wipe the EPP queue. */ + memset(priv->epp_queue, 0, sizeof(lepp_queue_t)); + + /* Evict the EPP queue. */ + finv_buffer(priv->epp_queue, PAGE_SIZE); + + return 0; +} + + +/* + * Prepare the "frags" info for the resulting LEPP command. + * + * If needed, flush the memory used by the frags. + */ +static unsigned int tile_net_tx_frags(lepp_frag_t *frags, + struct sk_buff *skb, + void *b_data, unsigned int b_len) +{ + unsigned int i, n = 0; + + struct skb_shared_info *sh = skb_shinfo(skb); + + phys_addr_t cpa; + + if (b_len != 0) { + + if (!hash_default) + finv_buffer_remote(b_data, b_len); + + cpa = __pa(b_data); + frags[n].cpa_lo = cpa; + frags[n].cpa_hi = cpa >> 32; + frags[n].length = b_len; + frags[n].hash_for_home = hash_default; + n++; + } + + for (i = 0; i < sh->nr_frags; i++) { + + skb_frag_t *f = &sh->frags[i]; + unsigned long pfn = page_to_pfn(f->page); + + /* FIXME: Compute "hash_for_home" properly. */ + /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ + int hash_for_home = hash_default; + + /* FIXME: Hmmm. */ + if (!hash_default) { + void *va = pfn_to_kaddr(pfn) + f->page_offset; + BUG_ON(PageHighMem(f->page)); + finv_buffer_remote(va, f->size); + } + + cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; + frags[n].cpa_lo = cpa; + frags[n].cpa_hi = cpa >> 32; + frags[n].length = f->size; + frags[n].hash_for_home = hash_for_home; + n++; + } + + return n; +} + + +/* + * This function takes "skb", consisting of a header template and a + * payload, and hands it to LEPP, to emit as one or more segments, + * each consisting of a possibly modified header, plus a piece of the + * payload, via a process known as "tcp segmentation offload". + * + * Usually, "data" will contain the header template, of size "sh_len", + * and "sh->frags" will contain "skb->data_len" bytes of payload, and + * there will be "sh->gso_segs" segments. + * + * Sometimes, if "sendfile()" requires copying, we will be called with + * "data" containing the header and payload, with "frags" being empty. + * + * In theory, "sh->nr_frags" could be 3, but in practice, it seems + * that this will never actually happen. + * + * See "emulate_large_send_offload()" for some reference code, which + * does not handle checksumming. + * + * ISSUE: How do we make sure that high memory DMA does not migrate? + */ +static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_net_stats_t *stats = &info->stats; + + struct skb_shared_info *sh = skb_shinfo(skb); + + unsigned char *data = skb->data; + + /* The ip header follows the ethernet header. */ + struct iphdr *ih = ip_hdr(skb); + unsigned int ih_len = ih->ihl * 4; + + /* Note that "nh == ih", by definition. */ + unsigned char *nh = skb_network_header(skb); + unsigned int eh_len = nh - data; + + /* The tcp header follows the ip header. */ + struct tcphdr *th = (struct tcphdr *)(nh + ih_len); + unsigned int th_len = th->doff * 4; + + /* The total number of header bytes. */ + /* NOTE: This may be less than skb_headlen(skb). */ + unsigned int sh_len = eh_len + ih_len + th_len; + + /* The number of payload bytes at "skb->data + sh_len". */ + /* This is non-zero for sendfile() without HIGHDMA. */ + unsigned int b_len = skb_headlen(skb) - sh_len; + + /* The total number of payload bytes. */ + unsigned int d_len = b_len + skb->data_len; + + /* The maximum payload size. */ + unsigned int p_len = sh->gso_size; + + /* The total number of segments. */ + unsigned int num_segs = sh->gso_segs; + + /* The temporary copy of the command. */ + u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; + lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; + + /* Analyze the "frags". */ + unsigned int num_frags = + tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); + + /* The size of the command, including frags and header. */ + size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); + + /* The command header. */ + lepp_tso_cmd_t cmd_init = { + .tso = true, + .header_size = sh_len, + .ip_offset = eh_len, + .tcp_offset = eh_len + ih_len, + .payload_size = p_len, + .num_frags = num_frags, + }; + + unsigned long irqflags; + + lepp_queue_t *eq = priv->epp_queue; + + struct sk_buff *olds[4]; + unsigned int wanted = 4; + unsigned int i, nolds = 0; + + unsigned int cmd_head, cmd_tail, cmd_next; + unsigned int comp_tail; + + unsigned int free_slots; + + + /* Paranoia. */ + BUG_ON(skb->protocol != htons(ETH_P_IP)); + BUG_ON(ih->protocol != IPPROTO_TCP); + BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); + BUG_ON(num_frags > LEPP_MAX_FRAGS); + /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ + BUG_ON(num_segs <= 1); + + + /* Finish preparing the command. */ + + /* Copy the command header. */ + *cmd = cmd_init; + + /* Copy the "header". */ + memcpy(&cmd->frags[num_frags], data, sh_len); + + + /* Prefetch and wait, to minimize time spent holding the spinlock. */ + prefetch_L1(&eq->comp_tail); + prefetch_L1(&eq->cmd_tail); + mb(); + + + /* Enqueue the command. */ + + spin_lock_irqsave(&priv->cmd_lock, irqflags); + + /* + * Handle completions if needed to make room. + * HACK: Spin until there is sufficient room. + */ + free_slots = lepp_num_free_comp_slots(eq); + if (free_slots < 1) { +spin: + nolds += tile_net_lepp_grab_comps(dev, olds + nolds, + wanted - nolds, NULL); + if (lepp_num_free_comp_slots(eq) < 1) + goto spin; + } + + cmd_head = eq->cmd_head; + cmd_tail = eq->cmd_tail; + + /* NOTE: The "gotos" below are untested. */ + + /* Prepare to advance, detecting full queue. */ + cmd_next = cmd_tail + cmd_size; + if (cmd_tail < cmd_head && cmd_next >= cmd_head) + goto spin; + if (cmd_next > LEPP_CMD_LIMIT) { + cmd_next = 0; + if (cmd_next == cmd_head) + goto spin; + } + + /* Copy the command. */ + memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); + + /* Advance. */ + cmd_tail = cmd_next; + + /* Record "skb" for eventual freeing. */ + comp_tail = eq->comp_tail; + eq->comps[comp_tail] = skb; + LEPP_QINC(comp_tail); + eq->comp_tail = comp_tail; + + /* Flush before allowing LEPP to handle the command. */ + __insn_mf(); + + eq->cmd_tail = cmd_tail; + + spin_unlock_irqrestore(&priv->cmd_lock, irqflags); + + if (nolds == 0) + nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); + + /* Handle completions. */ + for (i = 0; i < nolds; i++) + kfree_skb(olds[i]); + + /* Update stats. */ + stats->tx_packets += num_segs; + stats->tx_bytes += (num_segs * sh_len) + d_len; + + /* Make sure the egress timer is scheduled. */ + tile_net_schedule_egress_timer(info); + + return NETDEV_TX_OK; +} + + +/* + * Transmit a packet (called by the kernel via "hard_start_xmit" hook). + */ +static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_net_stats_t *stats = &info->stats; + + unsigned long irqflags; + + struct skb_shared_info *sh = skb_shinfo(skb); + + unsigned int len = skb->len; + unsigned char *data = skb->data; + + unsigned int csum_start = skb->csum_start - skb_headroom(skb); + + lepp_frag_t frags[LEPP_MAX_FRAGS]; + + unsigned int num_frags; + + lepp_queue_t *eq = priv->epp_queue; + + struct sk_buff *olds[4]; + unsigned int wanted = 4; + unsigned int i, nolds = 0; + + unsigned int cmd_size = sizeof(lepp_cmd_t); + + unsigned int cmd_head, cmd_tail, cmd_next; + unsigned int comp_tail; + + lepp_cmd_t cmds[LEPP_MAX_FRAGS]; + + unsigned int free_slots; + + + /* + * This is paranoia, since we think that if the link doesn't come + * up, telling Linux we have no carrier will keep it from trying + * to transmit. If it does, though, we can't execute this routine, + * since data structures we depend on aren't set up yet. + */ + if (!info->registered) + return NETDEV_TX_BUSY; + + + /* Save the timestamp. */ + dev->trans_start = jiffies; + + +#ifdef TILE_NET_PARANOIA +#if CHIP_HAS_CBOX_HOME_MAP() + if (hash_default) { + HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); + if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) + panic("Non-coherent egress buffer!"); + } +#endif +#endif + + +#ifdef TILE_NET_DUMP_PACKETS + /* ISSUE: Does not dump the "frags". */ + dump_packet(data, skb_headlen(skb), "tx"); +#endif /* TILE_NET_DUMP_PACKETS */ + + + if (sh->gso_size != 0) + return tile_net_tx_tso(skb, dev); + + + /* Prepare the commands. */ + + num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); + + for (i = 0; i < num_frags; i++) { + + bool final = (i == num_frags - 1); + + lepp_cmd_t cmd = { + .cpa_lo = frags[i].cpa_lo, + .cpa_hi = frags[i].cpa_hi, + .length = frags[i].length, + .hash_for_home = frags[i].hash_for_home, + .send_completion = final, + .end_of_packet = final + }; + + if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { + cmd.compute_checksum = 1; + cmd.checksum_data.bits.start_byte = csum_start; + cmd.checksum_data.bits.count = len - csum_start; + cmd.checksum_data.bits.destination_byte = + csum_start + skb->csum_offset; + } + + cmds[i] = cmd; + } + + + /* Prefetch and wait, to minimize time spent holding the spinlock. */ + prefetch_L1(&eq->comp_tail); + prefetch_L1(&eq->cmd_tail); + mb(); + + + /* Enqueue the commands. */ + + spin_lock_irqsave(&priv->cmd_lock, irqflags); + + /* + * Handle completions if needed to make room. + * HACK: Spin until there is sufficient room. + */ + free_slots = lepp_num_free_comp_slots(eq); + if (free_slots < 1) { +spin: + nolds += tile_net_lepp_grab_comps(dev, olds + nolds, + wanted - nolds, NULL); + if (lepp_num_free_comp_slots(eq) < 1) + goto spin; + } + + cmd_head = eq->cmd_head; + cmd_tail = eq->cmd_tail; + + /* NOTE: The "gotos" below are untested. */ + + /* Copy the commands, or fail. */ + for (i = 0; i < num_frags; i++) { + + /* Prepare to advance, detecting full queue. */ + cmd_next = cmd_tail + cmd_size; + if (cmd_tail < cmd_head && cmd_next >= cmd_head) + goto spin; + if (cmd_next > LEPP_CMD_LIMIT) { + cmd_next = 0; + if (cmd_next == cmd_head) + goto spin; + } + + /* Copy the command. */ + *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; + + /* Advance. */ + cmd_tail = cmd_next; + } + + /* Record "skb" for eventual freeing. */ + comp_tail = eq->comp_tail; + eq->comps[comp_tail] = skb; + LEPP_QINC(comp_tail); + eq->comp_tail = comp_tail; + + /* Flush before allowing LEPP to handle the command. */ + __insn_mf(); + + eq->cmd_tail = cmd_tail; + + spin_unlock_irqrestore(&priv->cmd_lock, irqflags); + + if (nolds == 0) + nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); + + /* Handle completions. */ + for (i = 0; i < nolds; i++) + kfree_skb(olds[i]); + + /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ + stats->tx_packets++; + stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); + + /* Make sure the egress timer is scheduled. */ + tile_net_schedule_egress_timer(info); + + return NETDEV_TX_OK; +} + + +/* + * Deal with a transmit timeout. + */ +static void tile_net_tx_timeout(struct net_device *dev) +{ + PDEBUG("tile_net_tx_timeout()\n"); + PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, + jiffies - dev->trans_start); + + /* XXX: ISSUE: This doesn't seem useful for us. */ + netif_wake_queue(dev); +} + + +/* + * Ioctl commands. + */ +static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + return -EOPNOTSUPP; +} + + +/* + * Get System Network Statistics. + * + * Returns the address of the device statistics structure. + */ +static struct net_device_stats *tile_net_get_stats(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + u32 rx_packets = 0; + u32 tx_packets = 0; + u32 rx_bytes = 0; + u32 tx_bytes = 0; + int i; + + for_each_online_cpu(i) { + if (priv->cpu[i]) { + rx_packets += priv->cpu[i]->stats.rx_packets; + rx_bytes += priv->cpu[i]->stats.rx_bytes; + tx_packets += priv->cpu[i]->stats.tx_packets; + tx_bytes += priv->cpu[i]->stats.tx_bytes; + } + } + + priv->stats.rx_packets = rx_packets; + priv->stats.rx_bytes = rx_bytes; + priv->stats.tx_packets = tx_packets; + priv->stats.tx_bytes = tx_bytes; + + return &priv->stats; +} + + +/* + * Change the "mtu". + * + * The "change_mtu" method is usually not needed. + * If you need it, it must be like this. + */ +static int tile_net_change_mtu(struct net_device *dev, int new_mtu) +{ + PDEBUG("tile_net_change_mtu()\n"); + + /* Check ranges. */ + if ((new_mtu < 68) || (new_mtu > 1500)) + return -EINVAL; + + /* Accept the value. */ + dev->mtu = new_mtu; + + return 0; +} + + +/* + * Change the Ethernet Address of the NIC. + * + * The hypervisor driver does not support changing MAC address. However, + * the IPP does not do anything with the MAC address, so the address which + * gets used on outgoing packets, and which is accepted on incoming packets, + * is completely up to the NetIO program or kernel driver which is actually + * handling them. + * + * Returns 0 on success, negative on failure. + */ +static int tile_net_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + /* ISSUE: Note that "dev_addr" is now a pointer. */ + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + return 0; +} + + +/* + * Obtain the MAC address from the hypervisor. + * This must be done before opening the device. + */ +static int tile_net_get_mac(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + char hv_dev_name[32]; + int len; + + __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; + + int ret; + + /* For example, "xgbe0". */ + strcpy(hv_dev_name, dev->name); + len = strlen(hv_dev_name); + + /* For example, "xgbe/0". */ + hv_dev_name[len] = hv_dev_name[len - 1]; + hv_dev_name[len - 1] = '/'; + len++; + + /* For example, "xgbe/0/native_hash". */ + strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); + + /* Get the hypervisor handle for this device. */ + priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); + PDEBUG("hv_dev_open(%s) returned %d %p\n", + hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); + if (priv->hv_devhdl < 0) { + if (priv->hv_devhdl == HV_ENODEV) + printk(KERN_DEBUG "Ignoring unconfigured device %s\n", + hv_dev_name); + else + printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", + hv_dev_name, priv->hv_devhdl); + return -1; + } + + /* + * Read the hardware address from the hypervisor. + * ISSUE: Note that "dev_addr" is now a pointer. + */ + offset.bits.class = NETIO_PARAM; + offset.bits.addr = NETIO_PARAM_MAC; + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)dev->dev_addr, dev->addr_len, + offset.word); + PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); + if (ret <= 0) { + printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", + dev->name); + /* + * Since the device is configured by the hypervisor but we + * can't get its MAC address, we are most likely running + * the simulator, so let's generate a random MAC address. + */ + random_ether_addr(dev->dev_addr); + } + + return 0; +} + + +static struct net_device_ops tile_net_ops = { + .ndo_open = tile_net_open, + .ndo_stop = tile_net_stop, + .ndo_start_xmit = tile_net_tx, + .ndo_do_ioctl = tile_net_ioctl, + .ndo_get_stats = tile_net_get_stats, + .ndo_change_mtu = tile_net_change_mtu, + .ndo_tx_timeout = tile_net_tx_timeout, + .ndo_set_mac_address = tile_net_set_mac_address +}; + + +/* + * The setup function. + * + * This uses ether_setup() to assign various fields in dev, including + * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. + */ +static void tile_net_setup(struct net_device *dev) +{ + PDEBUG("tile_net_setup()\n"); + + ether_setup(dev); + + dev->netdev_ops = &tile_net_ops; + + dev->watchdog_timeo = TILE_NET_TIMEOUT; + + /* We want lockless xmit. */ + dev->features |= NETIF_F_LLTX; + + /* We support hardware tx checksums. */ + dev->features |= NETIF_F_HW_CSUM; + + /* We support scatter/gather. */ + dev->features |= NETIF_F_SG; + + /* We support TSO. */ + dev->features |= NETIF_F_TSO; + +#ifdef TILE_NET_GSO + /* We support GSO. */ + dev->features |= NETIF_F_GSO; +#endif + + if (hash_default) + dev->features |= NETIF_F_HIGHDMA; + + /* ISSUE: We should support NETIF_F_UFO. */ + + dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; + + dev->mtu = TILE_NET_MTU; +} + + +/* + * Allocate the device structure, register the device, and obtain the + * MAC address from the hypervisor. + */ +static struct net_device *tile_net_dev_init(const char *name) +{ + int ret; + struct net_device *dev; + struct tile_net_priv *priv; + struct page *page; + + /* + * Allocate the device structure. This allocates "priv", calls + * tile_net_setup(), and saves "name". Normally, "name" is a + * template, instantiated by register_netdev(), but not for us. + */ + dev = alloc_netdev(sizeof(*priv), name, tile_net_setup); + if (!dev) { + pr_err("alloc_netdev(%s) failed\n", name); + return NULL; + } + + priv = netdev_priv(dev); + + /* Initialize "priv". */ + + memset(priv, 0, sizeof(*priv)); + + /* Save "dev" for "tile_net_open_retry()". */ + priv->dev = dev; + + INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); + + spin_lock_init(&priv->cmd_lock); + spin_lock_init(&priv->comp_lock); + + /* Allocate "epp_queue". */ + BUG_ON(get_order(sizeof(lepp_queue_t)) != 0); + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); + if (!page) { + free_netdev(dev); + return NULL; + } + priv->epp_queue = page_address(page); + + /* Register the network device. */ + ret = register_netdev(dev); + if (ret) { + pr_err("register_netdev %s failed %d\n", dev->name, ret); + free_page((unsigned long)priv->epp_queue); + free_netdev(dev); + return NULL; + } + + /* Get the MAC address. */ + ret = tile_net_get_mac(dev); + if (ret < 0) { + unregister_netdev(dev); + free_page((unsigned long)priv->epp_queue); + free_netdev(dev); + return NULL; + } + + return dev; +} + + +/* + * Module cleanup. + */ +static void tile_net_cleanup(void) +{ + int i; + + for (i = 0; i < TILE_NET_DEVS; i++) { + if (tile_net_devs[i]) { + struct net_device *dev = tile_net_devs[i]; + struct tile_net_priv *priv = netdev_priv(dev); + unregister_netdev(dev); + finv_buffer(priv->epp_queue, PAGE_SIZE); + free_page((unsigned long)priv->epp_queue); + free_netdev(dev); + } + } +} + + +/* + * Module initialization. + */ +static int tile_net_init_module(void) +{ + pr_info("Tilera IPP Net Driver\n"); + + tile_net_devs[0] = tile_net_dev_init("xgbe0"); + tile_net_devs[1] = tile_net_dev_init("xgbe1"); + tile_net_devs[2] = tile_net_dev_init("gbe0"); + tile_net_devs[3] = tile_net_dev_init("gbe1"); + + return 0; +} + + +#ifndef MODULE +/* + * The "network_cpus" boot argument specifies the cpus that are dedicated + * to handle ingress packets. + * + * The parameter should be in the form "network_cpus=m-n[,x-y]", where + * m, n, x, y are integer numbers that represent the cpus that can be + * neither a dedicated cpu nor a dataplane cpu. + */ +static int __init network_cpus_setup(char *str) +{ + int rc = cpulist_parse_crop(str, &network_cpus_map); + if (rc != 0) { + pr_warning("network_cpus=%s: malformed cpu list\n", + str); + } else { + + /* Remove dedicated cpus. */ + cpumask_and(&network_cpus_map, &network_cpus_map, + cpu_possible_mask); + + + if (cpumask_empty(&network_cpus_map)) { + pr_warning("Ignoring network_cpus='%s'.\n", + str); + } else { + char buf[1024]; + cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); + pr_info("Linux network CPUs: %s\n", buf); + network_cpus_used = true; + } + } + + return 0; +} +__setup("network_cpus=", network_cpus_setup); +#endif + + +module_init(tile_net_init_module); +module_exit(tile_net_cleanup); diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 05a95586f3c5..055b87ab4f07 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics { #define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size */ #define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ -#define UCC_GETH_UTFTT_INIT 512 +#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs + due to errata */ /* Gigabit Ethernet (1000 Mbps) */ #define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual FIFO size */ diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index b154a94de03e..62e9e8dc8190 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2994,12 +2994,14 @@ static int hso_probe(struct usb_interface *interface, case HSO_INTF_BULK: /* It's a regular bulk interface */ - if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) && - !disable_net) - hso_dev = hso_create_net_device(interface, port_spec); - else + if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { + if (!disable_net) + hso_dev = + hso_create_net_device(interface, port_spec); + } else { hso_dev = hso_create_bulk_serial_device(interface, port_spec); + } if (!hso_dev) goto exit; break; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index d81ad8397885..24297b274cd4 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -498,7 +498,6 @@ norbuff: static int x25_asy_close(struct net_device *dev) { struct x25_asy *sl = netdev_priv(dev); - int err; spin_lock(&sl->lock); if (sl->tty) @@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev) netif_stop_queue(dev); sl->rcount = 0; sl->xleft = 0; - err = lapb_unregister(dev); - if (err != LAPB_OK) - printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n", - err); spin_unlock(&sl->lock); return 0; } @@ -582,7 +577,7 @@ static int x25_asy_open_tty(struct tty_struct *tty) if (err) return err; /* Done. We have linked the TTY line to a channel. */ - return sl->dev->base_addr; + return 0; } @@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty) static void x25_asy_close_tty(struct tty_struct *tty) { struct x25_asy *sl = tty->disc_data; + int err; /* First make sure we're connected. */ if (!sl || sl->magic != X25_ASY_MAGIC) @@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty) dev_close(sl->dev); rtnl_unlock(); + err = lapb_unregister(sl->dev); + if (err != LAPB_OK) + printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n", + err); + tty->disc_data = NULL; sl->tty = NULL; x25_asy_free(sl); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index c76ea53c20ce..1a62e351ec77 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -518,7 +518,7 @@ bool ath_stoprecv(struct ath_softc *sc) bool stopped; spin_lock_bh(&sc->rx.rxbuflock); - ath9k_hw_stoppcurecv(ah); + ath9k_hw_abortpcurecv(ah); ath9k_hw_setrxfilter(ah, 0); stopped = ath9k_hw_stopdmarecv(ah); diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 980ae70ea424..a314c2c2bfbe 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -647,7 +647,7 @@ init: } unlock: - if (err && (vif_id != -1)) { + if (err && (vif_id >= 0)) { vif_priv->active = false; bitmap_release_region(&ar->vif_bitmap, vif_id, 0); ar->vifs--; diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c index 9a55338d957f..09e2dfd7b175 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/b43/sdio.c @@ -163,6 +163,7 @@ static int b43_sdio_probe(struct sdio_func *func, err_free_ssb: kfree(sdio); err_disable_func: + sdio_claim_host(func); sdio_disable_func(func); err_release_host: sdio_release_host(func); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index d9f51485beee..9383063d2b16 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -349,7 +349,6 @@ static struct irq_chip dino_interrupt_type = { .name = "GSC-PCI", .unmask = dino_unmask_irq, .mask = dino_mask_irq, - .ack = no_ack_irq, }; diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 1211974f55aa..e860038b0b84 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -186,7 +186,6 @@ static struct irq_chip eisa_interrupt_type = { .name = "EISA", .unmask = eisa_unmask_irq, .mask = eisa_mask_irq, - .ack = no_ack_irq, }; static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) @@ -340,7 +339,7 @@ static int __init eisa_probe(struct parisc_device *dev) setup_irq(2, &irq2_action); for (i = 0; i < 16; i++) { set_irq_chip_and_handler(i, &eisa_interrupt_type, - handle_level_irq); + handle_simple_irq); } EISA_bus = 1; diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index e605298e3aee..772b1939ac21 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c @@ -143,7 +143,6 @@ static struct irq_chip gsc_asic_interrupt_type = { .name = "GSC-ASIC", .unmask = gsc_asic_unmask_irq, .mask = gsc_asic_mask_irq, - .ack = no_ack_irq, }; int gsc_assign_irq(struct irq_chip *type, void *data) @@ -153,7 +152,7 @@ int gsc_assign_irq(struct irq_chip *type, void *data) if (irq > GSC_IRQ_MAX) return NO_IRQ; - set_irq_chip_and_handler(irq, type, handle_level_irq); + set_irq_chip_and_handler(irq, type, handle_simple_irq); set_irq_chip_data(irq, data); return irq++; diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index a3120a09c43d..0327894bf235 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -669,6 +669,13 @@ printk("\n"); DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq, vi->eoi_addr, vi->eoi_data); iosapic_eoi(vi->eoi_addr, vi->eoi_data); +} + +static void iosapic_eoi_irq(unsigned int irq) +{ + struct vector_info *vi = get_irq_chip_data(irq); + + iosapic_eoi(vi->eoi_addr, vi->eoi_data); cpu_eoi_irq(irq); } @@ -705,6 +712,7 @@ static struct irq_chip iosapic_interrupt_type = { .unmask = iosapic_unmask_irq, .mask = iosapic_mask_irq, .ack = cpu_ack_irq, + .eoi = iosapic_eoi_irq, #ifdef CONFIG_SMP .set_affinity = iosapic_set_affinity_irq, #endif diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 2350e8a86eef..f2f501e5b6a0 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -64,6 +64,7 @@ static unsigned int led_diskio __read_mostly = 1; static unsigned int led_lanrxtx __read_mostly = 1; static char lcd_text[32] __read_mostly; static char lcd_text_default[32] __read_mostly; +static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */ static struct workqueue_struct *led_wq; @@ -115,7 +116,7 @@ lcd_info __attribute__((aligned(8))) __read_mostly = .lcd_width = 16, .lcd_cmd_reg_addr = KITTYHAWK_LCD_CMD, .lcd_data_reg_addr = KITTYHAWK_LCD_DATA, - .min_cmd_delay = 40, + .min_cmd_delay = 80, .reset_cmd1 = 0x80, .reset_cmd2 = 0xc0, }; @@ -135,6 +136,9 @@ static int start_task(void) /* Display the default text now */ if (led_type == LED_HASLCD) lcd_print( lcd_text_default ); + /* KittyHawk has no LED support on its LCD */ + if (lcd_no_led_support) return 0; + /* Create the work queue and queue the LED task */ led_wq = create_singlethread_workqueue("led_wq"); queue_delayed_work(led_wq, &led_task, 0); @@ -248,9 +252,13 @@ static int __init led_create_procfs(void) proc_pdc_root = proc_mkdir("pdc", 0); if (!proc_pdc_root) return -1; - ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root, - &led_proc_fops, (void *)LED_NOLCD); /* LED */ - if (!ent) return -1; + + if (!lcd_no_led_support) + { + ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root, + &led_proc_fops, (void *)LED_NOLCD); /* LED */ + if (!ent) return -1; + } if (led_type == LED_HASLCD) { @@ -692,6 +700,7 @@ int __init led_init(void) case 0x58B: /* KittyHawk DC2 100 (K200) */ printk(KERN_INFO "%s: KittyHawk-Machine (hversion 0x%x) found, " "LED detection skipped.\n", __FILE__, CPU_HVERSION); + lcd_no_led_support = 1; goto found; /* use the preinitialized values of lcd_info */ } diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index 0846dafdfff1..28241532c0fd 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -323,7 +323,6 @@ static struct irq_chip superio_interrupt_type = { .name = SUPERIO, .unmask = superio_unmask_irq, .mask = superio_mask_irq, - .ack = no_ack_irq, }; #ifdef DEBUG_SUPERIO_INIT @@ -354,7 +353,7 @@ int superio_fixup_irq(struct pci_dev *pcidev) #endif for (i = 0; i < 16; i++) { - set_irq_chip_and_handler(i, &superio_interrupt_type, handle_level_irq); + set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq); } /* diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index f01e344cf4bd..98e6fdf34d30 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o obj-$(CONFIG_X86_VISWS) += setup-irq.o obj-$(CONFIG_MN10300) += setup-bus.o obj-$(CONFIG_MICROBLAZE) += setup-bus.o +obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o # # ACPI Related PCI FW Functions diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f5c63fe9db5c..6f9350cabbd5 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2136,6 +2136,24 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, quirk_unhide_mch_dev6); +#ifdef CONFIG_TILE +/* + * The Tilera TILEmpower platform needs to set the link speed + * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed + * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe + * capability register of the PEX8624 PCIe switch. The switch + * supports link speed auto negotiation, but falsely sets + * the link speed to 5GT/s. + */ +static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev) +{ + if (tile_plx_gen1) { + pci_write_config_dword(dev, 0x98, 0x1); + mdelay(50); + } +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); +#endif /* CONFIG_TILE */ #ifdef CONFIG_PCI_MSI /* Some chipsets do not support MSI. We cannot easily rely on setting diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 3753fd0722e7..2fe8cb8e95cd 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -70,6 +70,7 @@ void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func, va_end(args); } } +EXPORT_SYMBOL(soc_pcmcia_debug); #endif diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 60a5a5c6b50a..d235f44fd7a3 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -81,6 +81,8 @@ MODULE_PARM_DESC(wapf, "WAPF value"); static int wlan_status = 1; static int bluetooth_status = 1; +static int wimax_status = -1; +static int wwan_status = -1; module_param(wlan_status, int, 0444); MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " @@ -92,6 +94,16 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " "(0 = disabled, 1 = enabled, -1 = don't do anything). " "default is 1"); +module_param(wimax_status, int, 0444); +MODULE_PARM_DESC(wimax_status, "Set the wireless status on boot " + "(0 = disabled, 1 = enabled, -1 = don't do anything). " + "default is 1"); + +module_param(wwan_status, int, 0444); +MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot " + "(0 = disabled, 1 = enabled, -1 = don't do anything). " + "default is 1"); + /* * Some events we use, same for all Asus */ @@ -114,6 +126,8 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " */ #define WL_RSTS 0x01 /* internal Wifi */ #define BT_RSTS 0x02 /* internal Bluetooth */ +#define WM_RSTS 0x08 /* internal wimax */ +#define WW_RSTS 0x20 /* internal wwan */ /* LED */ #define METHOD_MLED "MLED" @@ -132,6 +146,11 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " */ #define METHOD_WLAN "WLED" #define METHOD_BLUETOOTH "BLED" + +/* WWAN and WIMAX */ +#define METHOD_WWAN "GSMC" +#define METHOD_WIMAX "WMXC" + #define METHOD_WL_STATUS "RSTS" /* Brightness */ @@ -883,6 +902,64 @@ static ssize_t store_bluetooth(struct device *dev, } /* + * Wimax + */ +static int asus_wimax_set(struct asus_laptop *asus, int status) +{ + if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) { + pr_warning("Error setting wimax status to %d", status); + return -EIO; + } + return 0; +} + +static ssize_t show_wimax(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS)); +} + +static ssize_t store_wimax(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX); +} + +/* + * Wwan + */ +static int asus_wwan_set(struct asus_laptop *asus, int status) +{ + if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) { + pr_warning("Error setting wwan status to %d", status); + return -EIO; + } + return 0; +} + +static ssize_t show_wwan(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS)); +} + +static ssize_t store_wwan(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sysfs_acpi_set(asus, buf, count, METHOD_WWAN); +} + +/* * Display */ static void asus_set_display(struct asus_laptop *asus, int value) @@ -1202,6 +1279,8 @@ static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL); static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan); static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth, store_bluetooth); +static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); +static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp); static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); @@ -1212,6 +1291,8 @@ static struct attribute *asus_attributes[] = { &dev_attr_infos.attr, &dev_attr_wlan.attr, &dev_attr_bluetooth.attr, + &dev_attr_wimax.attr, + &dev_attr_wwan.attr, &dev_attr_display.attr, &dev_attr_ledd.attr, &dev_attr_ls_level.attr, @@ -1239,6 +1320,13 @@ static mode_t asus_sysfs_is_visible(struct kobject *kobj, } else if (attr == &dev_attr_display.attr) { supported = !acpi_check_handle(handle, METHOD_SWITCH_DISPLAY, NULL); + } else if (attr == &dev_attr_wimax.attr) { + supported = + !acpi_check_handle(asus->handle, METHOD_WIMAX, NULL); + + } else if (attr == &dev_attr_wwan.attr) { + supported = !acpi_check_handle(asus->handle, METHOD_WWAN, NULL); + } else if (attr == &dev_attr_ledd.attr) { supported = !acpi_check_handle(handle, METHOD_LEDD, NULL); @@ -1397,7 +1485,8 @@ static int asus_laptop_get_info(struct asus_laptop *asus) /* * The HWRS method return informations about the hardware. - * 0x80 bit is for WLAN, 0x100 for Bluetooth. + * 0x80 bit is for WLAN, 0x100 for Bluetooth, + * 0x40 for WWAN, 0x10 for WIMAX. * The significance of others is yet to be found. */ status = @@ -1440,6 +1529,12 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus) if (wlan_status >= 0) asus_wlan_set(asus, !!wlan_status); + if (wimax_status >= 0) + asus_wimax_set(asus, !!wimax_status); + + if (wwan_status >= 0) + asus_wwan_set(asus, !!wwan_status); + /* Keyboard Backlight is on by default */ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL)) asus_kled_set(asus, 1); diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 462ceab93f87..0d50fbbe2478 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c @@ -298,8 +298,8 @@ static void eeepc_wmi_notify(u32 value, void *context) kfree(obj); } -static int store_cpufv(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { int value; struct acpi_buffer input = { (acpi_size)sizeof(value), &value }; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 1dac659b5e0c..9e05af9c41cb 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -172,6 +172,8 @@ static int hp_wmi_perform_query(int query, int write, u32 *buffer, bios_return = *((struct bios_return *)obj->buffer.pointer); memcpy(buffer, &bios_return.value, sizeof(bios_return.value)); + + kfree(obj); return 0; } diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index 3c2c6b91ecb3..94a114aa8e28 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -28,6 +28,7 @@ #include <linux/io.h> #include <linux/sysdev.h> #include <linux/dmi.h> +#include <linux/efi.h> #include <linux/mutex.h> #include <asm/bios_ebda.h> @@ -220,32 +221,13 @@ static void rtl_teardown_sysfs(void) { sysdev_class_unregister(&class_rtl); } -static int dmi_check_cb(const struct dmi_system_id *id) -{ - RTL_DEBUG("found IBM server '%s'\n", id->ident); - return 0; -} - -#define ibm_dmi_entry(NAME, TYPE) \ -{ \ - .ident = NAME, \ - .matches = { \ - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ - DMI_MATCH(DMI_PRODUCT_NAME, TYPE), \ - }, \ - .callback = dmi_check_cb \ -} static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = { - ibm_dmi_entry("BladeCenter LS21", "7971"), - ibm_dmi_entry("BladeCenter LS22", "7901"), - ibm_dmi_entry("BladeCenter HS21 XM", "7995"), - ibm_dmi_entry("BladeCenter HS22", "7870"), - ibm_dmi_entry("BladeCenter HS22V", "7871"), - ibm_dmi_entry("System x3550 M2", "7946"), - ibm_dmi_entry("System x3650 M2", "7947"), - ibm_dmi_entry("System x3550 M3", "7944"), - ibm_dmi_entry("System x3650 M3", "7945"), + { \ + .matches = { \ + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ + }, \ + }, { } }; @@ -257,7 +239,7 @@ static int __init ibm_rtl_init(void) { if (force) pr_warning("ibm-rtl: module loaded by force\n"); /* first ensure that we are running on IBM HW */ - else if (!dmi_check_system(ibm_rtl_dmi_table)) + else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) return -ENODEV; /* Get the address for the Extended BIOS Data Area */ @@ -302,7 +284,7 @@ static int __init ibm_rtl_init(void) { RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n", rtl_cmd_width, rtl_cmd_type); addr = ioread32(&rtl_table->cmd_port_address); - RTL_DEBUG("addr = %#llx\n", addr); + RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr); plen = rtl_cmd_width/sizeof(char); rtl_cmd_addr = rtl_port_map(addr, plen); RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr); diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index 42a5469a2459..35278ad7e628 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -43,16 +43,18 @@ MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"); #define dprintk(msg...) pr_debug(DRV_PFX msg) -#define KEYCODE_BASE 0xD0 -#define MSI_WMI_BRIGHTNESSUP KEYCODE_BASE -#define MSI_WMI_BRIGHTNESSDOWN (KEYCODE_BASE + 1) -#define MSI_WMI_VOLUMEUP (KEYCODE_BASE + 2) -#define MSI_WMI_VOLUMEDOWN (KEYCODE_BASE + 3) +#define SCANCODE_BASE 0xD0 +#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE +#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1) +#define MSI_WMI_VOLUMEUP (SCANCODE_BASE + 2) +#define MSI_WMI_VOLUMEDOWN (SCANCODE_BASE + 3) +#define MSI_WMI_MUTE (SCANCODE_BASE + 4) static struct key_entry msi_wmi_keymap[] = { { KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} }, { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} }, { KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} }, { KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} }, + { KE_KEY, MSI_WMI_MUTE, {KEY_MUTE} }, { KE_END, 0} }; static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1]; @@ -169,7 +171,7 @@ static void msi_wmi_notify(u32 value, void *context) ktime_t diff; cur = ktime_get_real(); diff = ktime_sub(cur, last_pressed[key->code - - KEYCODE_BASE]); + SCANCODE_BASE]); /* Ignore event if the same event happened in a 50 ms timeframe -> Key press may result in 10-20 GPEs */ if (ktime_to_us(diff) < 1000 * 50) { @@ -178,7 +180,7 @@ static void msi_wmi_notify(u32 value, void *context) key->code, ktime_to_us(diff)); return; } - last_pressed[key->code - KEYCODE_BASE] = cur; + last_pressed[key->code - SCANCODE_BASE] = cur; if (key->type == KE_KEY && /* Brightness is served via acpi video driver */ diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 2d61186ad5a2..e8c21994b36d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8497,7 +8497,6 @@ static void ibm_exit(struct ibm_struct *ibm) ibm->acpi->type, dispatch_acpi_notify); ibm->flags.acpi_notify_installed = 0; - ibm->flags.acpi_notify_installed = 0; } if (ibm->flags.proc_created) { diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 06f304f46e02..4276da7291b8 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -135,6 +135,7 @@ static const struct key_entry toshiba_acpi_keymap[] __initconst = { { KE_KEY, 0x141, { KEY_BRIGHTNESSUP } }, { KE_KEY, 0x142, { KEY_WLAN } }, { KE_KEY, 0x143, { KEY_PROG1 } }, + { KE_KEY, 0x17f, { KEY_FN } }, { KE_KEY, 0xb05, { KEY_PROG2 } }, { KE_KEY, 0xb06, { KEY_WWW } }, { KE_KEY, 0xb07, { KEY_MAIL } }, diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 104b77c87ef5..aecd9a9b549f 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -755,7 +755,7 @@ static bool guid_already_parsed(const char *guid_string) struct wmi_block *wblock; list_for_each_entry(wblock, &wmi_block_list, list) - if (strncmp(wblock->gblock.guid, guid_string, 16) == 0) + if (memcmp(wblock->gblock.guid, guid_string, 16) == 0) return true; return false; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index f1d10c974cd4..ba521f0f0fac 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -911,7 +911,7 @@ out: } /** - * set_consumer_device_supply: Bind a regulator to a symbolic supply + * set_consumer_device_supply - Bind a regulator to a symbolic supply * @rdev: regulator source * @consumer_dev: device the supply applies to * @consumer_dev_name: dev_name() string for device supply applies to @@ -1052,7 +1052,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, printk(KERN_WARNING "%s: could not add device link %s err %d\n", __func__, dev->kobj.name, err); - device_remove_file(dev, ®ulator->dev_attr); goto link_name_err; } } @@ -1268,13 +1267,17 @@ static int _regulator_enable(struct regulator_dev *rdev) { int ret, delay; - /* do we need to enable the supply regulator first */ - if (rdev->supply) { - ret = _regulator_enable(rdev->supply); - if (ret < 0) { - printk(KERN_ERR "%s: failed to enable %s: %d\n", - __func__, rdev_get_name(rdev), ret); - return ret; + if (rdev->use_count == 0) { + /* do we need to enable the supply regulator first */ + if (rdev->supply) { + mutex_lock(&rdev->supply->mutex); + ret = _regulator_enable(rdev->supply); + mutex_unlock(&rdev->supply->mutex); + if (ret < 0) { + printk(KERN_ERR "%s: failed to enable %s: %d\n", + __func__, rdev_get_name(rdev), ret); + return ret; + } } } @@ -1313,10 +1316,12 @@ static int _regulator_enable(struct regulator_dev *rdev) if (ret < 0) return ret; - if (delay >= 1000) + if (delay >= 1000) { mdelay(delay / 1000); - else if (delay) + udelay(delay % 1000); + } else if (delay) { udelay(delay); + } } else if (ret < 0) { printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n", @@ -1359,6 +1364,7 @@ static int _regulator_disable(struct regulator_dev *rdev, struct regulator_dev **supply_rdev_ptr) { int ret = 0; + *supply_rdev_ptr = NULL; if (WARN(rdev->use_count <= 0, "unbalanced disables for %s\n", @@ -2346,6 +2352,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, if (init_data->supply_regulator && init_data->supply_regulator_dev) { dev_err(dev, "Supply regulator specified by both name and dev\n"); + ret = -EINVAL; goto scrub; } @@ -2364,6 +2371,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, if (!found) { dev_err(dev, "Failed to find supply %s\n", init_data->supply_regulator); + ret = -ENODEV; goto scrub; } diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index 4597d508a229..ecd99f59dba8 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c @@ -465,8 +465,8 @@ static struct regulator_ops mc13783_fixed_regulator_ops = { .get_voltage = mc13783_fixed_regulator_get_voltage, }; -int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask, - u32 val) +static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask, + u32 val) { struct mc13783 *mc13783 = priv->mc13783; int ret; diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 7e5892efc437..a57262a4fa6c 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -219,12 +219,12 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode) return -EACCES; status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, - message >> 8, 0x15 /* PB_WORD_MSB */ ); - if (status >= 0) + message >> 8, TWL4030_PM_MASTER_PB_WORD_MSB); + if (status < 0) return status; return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, - message, 0x16 /* PB_WORD_LSB */ ); + message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB); } /*----------------------------------------------------------------------*/ diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a5050e217150..825951b6b83f 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -635,7 +635,7 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) init_subchannel_id(&mchk_schid); mchk_schid.sch_no = crw0->rsid; if (crw1) - mchk_schid.ssid = (crw1->rsid >> 8) & 3; + mchk_schid.ssid = (crw1->rsid >> 4) & 3; /* * Since we are always presented with IPI in the CRW, we have to diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 752dbee06af5..5d9c66627b6e 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -292,8 +292,8 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) return; /* reset adapter interrupt indicators */ - put_indicator(irq_ptr->dsci); set_subchannel_ind(irq_ptr, 1); + put_indicator(irq_ptr->dsci); } void __exit tiqdio_unregister_thinints(void) diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index 377cfb72cc66..f30f8d659dc4 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h @@ -345,7 +345,7 @@ extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); * : SCpnt - Command to queue * Returns : 0 - success, else error */ -extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *) +extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *); /* Function: irqreturn_t fas216_intr (FAS216_Info *info) * Purpose : handle interrupts from the interface to progress a command diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 4d8e14b7aa93..09a550860dcf 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -2872,7 +2872,7 @@ static struct console serial8250_console = { .device = uart_console_device, .setup = serial8250_console_setup, .early_setup = serial8250_console_early_setup, - .flags = CON_PRINTBUFFER, + .flags = CON_PRINTBUFFER | CON_ANYTIME, .index = -1, .data = &serial8250_reg, }; diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c index 5fc699e929dc..d40010a22ecd 100644 --- a/drivers/serial/mfd.c +++ b/drivers/serial/mfd.c @@ -900,8 +900,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, unsigned char cval, fcr = 0; unsigned long flags; unsigned int baud, quot; - u32 mul = 0x3600; - u32 ps = 0x10; + u32 ps, mul; switch (termios->c_cflag & CSIZE) { case CS5: @@ -943,31 +942,24 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, baud = uart_get_baud_rate(port, termios, old, 0, 4000000); quot = 1; + ps = 0x10; + mul = 0x3600; switch (baud) { case 3500000: mul = 0x3345; ps = 0xC; break; - case 3000000: - mul = 0x2EE0; - break; - case 2500000: - mul = 0x2710; - break; - case 2000000: - mul = 0x1F40; - break; case 1843200: mul = 0x2400; break; + case 3000000: + case 2500000: + case 2000000: case 1500000: - mul = 0x1770; - break; case 1000000: - mul = 0xFA0; - break; case 500000: - mul = 0x7D0; + /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */ + mul = baud / 500000 * 0x9C4; break; default: /* Use uart_get_divisor to get quot for other baud rates */ diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index cb12a8e1466b..3f5e387ed564 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -418,8 +418,11 @@ int clk_register(struct clk *clk) list_add(&clk->sibling, &root_clks); list_add(&clk->node, &clock_list); + +#ifdef CONFIG_SH_CLK_CPG_LEGACY if (clk->ops && clk->ops->init) clk->ops->init(clk); +#endif out_unlock: mutex_unlock(&clock_list_sem); @@ -455,19 +458,13 @@ EXPORT_SYMBOL_GPL(clk_get_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { - return clk_set_rate_ex(clk, rate, 0); -} -EXPORT_SYMBOL_GPL(clk_set_rate); - -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) -{ int ret = -EOPNOTSUPP; unsigned long flags; spin_lock_irqsave(&clock_lock, flags); if (likely(clk->ops && clk->ops->set_rate)) { - ret = clk->ops->set_rate(clk, rate, algo_id); + ret = clk->ops->set_rate(clk, rate); if (ret != 0) goto out_unlock; } else { @@ -485,7 +482,7 @@ out_unlock: return ret; } -EXPORT_SYMBOL_GPL(clk_set_rate_ex); +EXPORT_SYMBOL_GPL(clk_set_rate); int clk_set_parent(struct clk *clk, struct clk *parent) { @@ -653,8 +650,7 @@ static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) clkp->ops->set_parent(clkp, clkp->parent); if (likely(clkp->ops->set_rate)) - clkp->ops->set_rate(clkp, - rate, NO_CHANGE); + clkp->ops->set_rate(clkp, rate); else if (likely(clkp->ops->recalc)) clkp->rate = clkp->ops->recalc(clkp); } diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 3aea5f0ceb09..6172335ae323 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -110,8 +110,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) return 0; } -static int sh_clk_div6_set_rate(struct clk *clk, - unsigned long rate, int algo_id) +static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate) { unsigned long value; int idx; @@ -132,7 +131,7 @@ static int sh_clk_div6_enable(struct clk *clk) unsigned long value; int ret; - ret = sh_clk_div6_set_rate(clk, clk->rate, 0); + ret = sh_clk_div6_set_rate(clk, clk->rate); if (ret == 0) { value = __raw_readl(clk->enable_reg); value &= ~0x100; /* clear stop bit to enable clock */ @@ -253,7 +252,7 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) return 0; } -static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) +static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate) { struct clk_div4_table *d4t = clk->priv; unsigned long value; diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index 154529aacc03..a067046c9da2 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -352,8 +352,12 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; if (xfer->tx_buf) { + /* tx_buf is a const void* where we need a void * for the dma + * mapping */ + void *nonconst_tx = (void *)xfer->tx_buf; + xfer->tx_dma = dma_map_single(dev, - (void *) xfer->tx_buf, xfer->len, + nonconst_tx, xfer->len, DMA_TO_DEVICE); if (dma_mapping_error(dev, xfer->tx_dma)) return -ENOMEM; diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c index ef9c6a04ad8f..744d3f6e4709 100644 --- a/drivers/ssb/b43_pci_bridge.c +++ b/drivers/ssb/b43_pci_bridge.c @@ -24,6 +24,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) }, + { PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC, 0x4318) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) }, diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c index 8c95d8c2a4f4..016c6f7f8630 100644 --- a/drivers/staging/asus_oled/asus_oled.c +++ b/drivers/staging/asus_oled/asus_oled.c @@ -620,13 +620,13 @@ static ssize_t class_set_picture(struct device *device, #define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file -static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO, get_enabled, set_enabled); -static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture); +static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture); -static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, class_get_enabled, class_set_enabled); -static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture); +static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture); static int asus_oled_probe(struct usb_interface *interface, const struct usb_device_id *id) diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index b68a7e5173be..d85de82f941a 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c @@ -463,9 +463,6 @@ static void hardif_remove_interface(struct batman_if *batman_if) return; batman_if->if_status = IF_TO_BE_REMOVED; - - /* caller must take if_list_lock */ - list_del_rcu(&batman_if->list); synchronize_rcu(); sysfs_del_hardif(&batman_if->hardif_obj); hardif_put(batman_if); @@ -474,13 +471,21 @@ static void hardif_remove_interface(struct batman_if *batman_if) void hardif_remove_interfaces(void) { struct batman_if *batman_if, *batman_if_tmp; + struct list_head if_queue; + + INIT_LIST_HEAD(&if_queue); - rtnl_lock(); spin_lock(&if_list_lock); list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) { - hardif_remove_interface(batman_if); + list_del_rcu(&batman_if->list); + list_add_tail(&batman_if->list, &if_queue); } spin_unlock(&if_list_lock); + + rtnl_lock(); + list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) { + hardif_remove_interface(batman_if); + } rtnl_unlock(); } @@ -507,8 +512,10 @@ static int hard_if_event(struct notifier_block *this, break; case NETDEV_UNREGISTER: spin_lock(&if_list_lock); - hardif_remove_interface(batman_if); + list_del_rcu(&batman_if->list); spin_unlock(&if_list_lock); + + hardif_remove_interface(batman_if); break; case NETDEV_CHANGEMTU: if (batman_if->soft_iface) diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c index 3904db9ce7b1..0e996181daf7 100644 --- a/drivers/staging/batman-adv/soft-interface.c +++ b/drivers/staging/batman-adv/soft-interface.c @@ -194,14 +194,15 @@ void interface_rx(struct net_device *soft_iface, struct bat_priv *priv = netdev_priv(soft_iface); /* check if enough space is available for pulling, and pull */ - if (!pskb_may_pull(skb, hdr_size)) { - kfree_skb(skb); - return; - } + if (!pskb_may_pull(skb, hdr_size)) + goto dropped; + skb_pull_rcsum(skb, hdr_size); /* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/ /* skb->dev & skb->pkt_type are set here */ + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) + goto dropped; skb->protocol = eth_type_trans(skb, soft_iface); /* should not be neccesary anymore as we use skb_pull_rcsum() @@ -216,6 +217,11 @@ void interface_rx(struct net_device *soft_iface, soft_iface->last_rx = jiffies; netif_rx(skb); + return; + +dropped: + kfree_skb(skb); + return; } #ifdef HAVE_NET_DEVICE_OPS diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README index c8f1cf1b4409..a27bb0b4f581 100644 --- a/drivers/staging/brcm80211/README +++ b/drivers/staging/brcm80211/README @@ -88,7 +88,9 @@ with the driver. Contact Info: ============= -Brett Rudley brudley@broadcom.com -Henry Ptasinski henryp@broadcom.com -Dowan Kim dowan@broadcom.com +Brett Rudley brudley@broadcom.com +Henry Ptasinski henryp@broadcom.com +Dowan Kim dowan@broadcom.com +Roland Vossen rvossen@broadcom.com +Arend van Spriel arend@broadcom.com diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO index dbf904184899..24ebadbe4241 100644 --- a/drivers/staging/brcm80211/TODO +++ b/drivers/staging/brcm80211/TODO @@ -46,4 +46,6 @@ Contact Brett Rudley <brudley@broadcom.com> Henry Ptasinski <henryp@broadcom.com> Dowan Kim <dowan@broadcom.com> +Roland Vossen <rvossen@broadcom.com> +Arend van Spriel <arend@broadcom.com> diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c index 1f177a67ff11..de784ff08caa 100644 --- a/drivers/staging/comedi/drivers/usbdux.c +++ b/drivers/staging/comedi/drivers/usbdux.c @@ -2295,8 +2295,8 @@ static void tidy_up(struct usbduxsub *usbduxsub_tmp) usbduxsub_tmp->inBuffer = NULL; kfree(usbduxsub_tmp->insnBuffer); usbduxsub_tmp->insnBuffer = NULL; - kfree(usbduxsub_tmp->inBuffer); - usbduxsub_tmp->inBuffer = NULL; + kfree(usbduxsub_tmp->outBuffer); + usbduxsub_tmp->outBuffer = NULL; kfree(usbduxsub_tmp->dac_commands); usbduxsub_tmp->dac_commands = NULL; kfree(usbduxsub_tmp->dux_commands); diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h index 25961c23dc0f..884263b2775d 100644 --- a/drivers/staging/easycap/easycap.h +++ b/drivers/staging/easycap/easycap.h @@ -75,6 +75,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/usb.h> diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c index a145a15cfdb3..8894ab14f167 100644 --- a/drivers/staging/frontier/tranzport.c +++ b/drivers/staging/frontier/tranzport.c @@ -204,7 +204,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev) t->value = temp; \ return count; \ } \ - static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); + static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value); show_int(enable); show_int(offline); diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c index b3f42f37a313..48d4e483d8a4 100644 --- a/drivers/staging/go7007/go7007-driver.c +++ b/drivers/staging/go7007/go7007-driver.c @@ -199,7 +199,7 @@ static int init_i2c_module(struct i2c_adapter *adapter, const char *type, struct go7007 *go = i2c_get_adapdata(adapter); struct v4l2_device *v4l2_dev = &go->v4l2_dev; - if (v4l2_i2c_new_subdev(v4l2_dev, adapter, NULL, type, addr, NULL)) + if (v4l2_i2c_new_subdev(v4l2_dev, adapter, type, addr, NULL)) return 0; printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", type); diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c index c86d1498737d..1c1e98aee2d9 100644 --- a/drivers/staging/iio/accel/adis16220_core.c +++ b/drivers/staging/iio/accel/adis16220_core.c @@ -507,7 +507,7 @@ static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16220_write_reset, 0); #define IIO_DEV_ATTR_CAPTURE(_store) \ - IIO_DEVICE_ATTR(capture, S_IWUGO, NULL, _store, 0) + IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0) static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture); diff --git a/drivers/staging/intel_sst/intel_sst_stream_encoded.c b/drivers/staging/intel_sst/intel_sst_stream_encoded.c index fbae39fda5c0..5c455608b024 100644 --- a/drivers/staging/intel_sst/intel_sst_stream_encoded.c +++ b/drivers/staging/intel_sst/intel_sst_stream_encoded.c @@ -1269,7 +1269,7 @@ finish: dbufs->output_bytes_produced = total_output; str_info->status = str_info->prev; str_info->prev = STREAM_DECODE; - str_info->decode_ibuf = NULL; kfree(str_info->decode_ibuf); + str_info->decode_ibuf = NULL; return retval; } diff --git a/drivers/staging/line6/control.c b/drivers/staging/line6/control.c index 040e25ca6d33..67e23b6e2d35 100644 --- a/drivers/staging/line6/control.c +++ b/drivers/staging/line6/control.c @@ -266,210 +266,210 @@ VARIAX_PARAM_R(float, mix2); VARIAX_PARAM_R(float, mix1); VARIAX_PARAM_R(int, pickup_wiring); -static DEVICE_ATTR(tweak, S_IWUGO | S_IRUGO, pod_get_tweak, pod_set_tweak); -static DEVICE_ATTR(wah_position, S_IWUGO | S_IRUGO, pod_get_wah_position, +static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak); +static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position, pod_set_wah_position); -static DEVICE_ATTR(compression_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain); -static DEVICE_ATTR(vol_pedal_position, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position); -static DEVICE_ATTR(compression_threshold, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold); -static DEVICE_ATTR(pan, S_IWUGO | S_IRUGO, pod_get_pan, pod_set_pan); -static DEVICE_ATTR(amp_model_setup, S_IWUGO | S_IRUGO, pod_get_amp_model_setup, +static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan); +static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup); -static DEVICE_ATTR(amp_model, S_IWUGO | S_IRUGO, pod_get_amp_model, +static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model, pod_set_amp_model); -static DEVICE_ATTR(drive, S_IWUGO | S_IRUGO, pod_get_drive, pod_set_drive); -static DEVICE_ATTR(bass, S_IWUGO | S_IRUGO, pod_get_bass, pod_set_bass); -static DEVICE_ATTR(mid, S_IWUGO | S_IRUGO, pod_get_mid, pod_set_mid); -static DEVICE_ATTR(lowmid, S_IWUGO | S_IRUGO, pod_get_lowmid, pod_set_lowmid); -static DEVICE_ATTR(treble, S_IWUGO | S_IRUGO, pod_get_treble, pod_set_treble); -static DEVICE_ATTR(highmid, S_IWUGO | S_IRUGO, pod_get_highmid, +static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive); +static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass); +static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid); +static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid); +static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble); +static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid, pod_set_highmid); -static DEVICE_ATTR(chan_vol, S_IWUGO | S_IRUGO, pod_get_chan_vol, +static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol); -static DEVICE_ATTR(reverb_mix, S_IWUGO | S_IRUGO, pod_get_reverb_mix, +static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix); -static DEVICE_ATTR(effect_setup, S_IWUGO | S_IRUGO, pod_get_effect_setup, +static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup); -static DEVICE_ATTR(band_1_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency); -static DEVICE_ATTR(presence, S_IWUGO | S_IRUGO, pod_get_presence, +static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence, pod_set_presence); -static DEVICE_ATTR2(treble__bass, treble, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass); -static DEVICE_ATTR(noise_gate_enable, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable); -static DEVICE_ATTR(gate_threshold, S_IWUGO | S_IRUGO, pod_get_gate_threshold, +static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold); -static DEVICE_ATTR(gate_decay_time, S_IWUGO | S_IRUGO, pod_get_gate_decay_time, +static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time); -static DEVICE_ATTR(stomp_enable, S_IWUGO | S_IRUGO, pod_get_stomp_enable, +static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable); -static DEVICE_ATTR(comp_enable, S_IWUGO | S_IRUGO, pod_get_comp_enable, +static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable); -static DEVICE_ATTR(stomp_time, S_IWUGO | S_IRUGO, pod_get_stomp_time, +static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time); -static DEVICE_ATTR(delay_enable, S_IWUGO | S_IRUGO, pod_get_delay_enable, +static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable); -static DEVICE_ATTR(mod_param_1, S_IWUGO | S_IRUGO, pod_get_mod_param_1, +static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1); -static DEVICE_ATTR(delay_param_1, S_IWUGO | S_IRUGO, pod_get_delay_param_1, +static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1); -static DEVICE_ATTR(delay_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value); -static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass); -static DEVICE_ATTR(delay_param_2, S_IWUGO | S_IRUGO, pod_get_delay_param_2, +static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2); -static DEVICE_ATTR(delay_volume_mix, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix); -static DEVICE_ATTR(delay_param_3, S_IWUGO | S_IRUGO, pod_get_delay_param_3, +static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3); -static DEVICE_ATTR(reverb_enable, S_IWUGO | S_IRUGO, pod_get_reverb_enable, +static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable); -static DEVICE_ATTR(reverb_type, S_IWUGO | S_IRUGO, pod_get_reverb_type, +static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type); -static DEVICE_ATTR(reverb_decay, S_IWUGO | S_IRUGO, pod_get_reverb_decay, +static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay); -static DEVICE_ATTR(reverb_tone, S_IWUGO | S_IRUGO, pod_get_reverb_tone, +static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone); -static DEVICE_ATTR(reverb_pre_delay, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay); -static DEVICE_ATTR(reverb_pre_post, S_IWUGO | S_IRUGO, pod_get_reverb_pre_post, +static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post); -static DEVICE_ATTR(band_2_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency); -static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass); -static DEVICE_ATTR(wah_enable, S_IWUGO | S_IRUGO, pod_get_wah_enable, +static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable); -static DEVICE_ATTR(modulation_lo_cut, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut); -static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut); -static DEVICE_ATTR(volume_pedal_minimum, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum); -static DEVICE_ATTR(eq_pre_post, S_IWUGO | S_IRUGO, pod_get_eq_pre_post, +static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post); -static DEVICE_ATTR(volume_pre_post, S_IWUGO | S_IRUGO, pod_get_volume_pre_post, +static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post); -static DEVICE_ATTR(di_model, S_IWUGO | S_IRUGO, pod_get_di_model, +static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model, pod_set_di_model); -static DEVICE_ATTR(di_delay, S_IWUGO | S_IRUGO, pod_get_di_delay, +static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay, pod_set_di_delay); -static DEVICE_ATTR(mod_enable, S_IWUGO | S_IRUGO, pod_get_mod_enable, +static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable); -static DEVICE_ATTR(mod_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value); -static DEVICE_ATTR(mod_param_2, S_IWUGO | S_IRUGO, pod_get_mod_param_2, +static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2); -static DEVICE_ATTR(mod_param_3, S_IWUGO | S_IRUGO, pod_get_mod_param_3, +static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3); -static DEVICE_ATTR(mod_param_4, S_IWUGO | S_IRUGO, pod_get_mod_param_4, +static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4); -static DEVICE_ATTR(mod_param_5, S_IWUGO | S_IRUGO, pod_get_mod_param_5, +static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5); -static DEVICE_ATTR(mod_volume_mix, S_IWUGO | S_IRUGO, pod_get_mod_volume_mix, +static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix); -static DEVICE_ATTR(mod_pre_post, S_IWUGO | S_IRUGO, pod_get_mod_pre_post, +static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post); -static DEVICE_ATTR(modulation_model, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model); -static DEVICE_ATTR(band_3_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency); -static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass); -static DEVICE_ATTR(mod_param_1_double_precision, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision); -static DEVICE_ATTR(delay_param_1_double_precision, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision); -static DEVICE_ATTR(eq_enable, S_IWUGO | S_IRUGO, pod_get_eq_enable, +static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable); -static DEVICE_ATTR(tap, S_IWUGO | S_IRUGO, pod_get_tap, pod_set_tap); -static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap); +static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign); -static DEVICE_ATTR(band_5_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency); -static DEVICE_ATTR(tuner, S_IWUGO | S_IRUGO, pod_get_tuner, pod_set_tuner); -static DEVICE_ATTR(mic_selection, S_IWUGO | S_IRUGO, pod_get_mic_selection, +static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner); +static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection); -static DEVICE_ATTR(cabinet_model, S_IWUGO | S_IRUGO, pod_get_cabinet_model, +static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model); -static DEVICE_ATTR(stomp_model, S_IWUGO | S_IRUGO, pod_get_stomp_model, +static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model); -static DEVICE_ATTR(roomlevel, S_IWUGO | S_IRUGO, pod_get_roomlevel, +static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel); -static DEVICE_ATTR(band_4_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency); -static DEVICE_ATTR(band_6_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency); -static DEVICE_ATTR(stomp_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value); -static DEVICE_ATTR(stomp_param_2, S_IWUGO | S_IRUGO, pod_get_stomp_param_2, +static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2); -static DEVICE_ATTR(stomp_param_3, S_IWUGO | S_IRUGO, pod_get_stomp_param_3, +static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3); -static DEVICE_ATTR(stomp_param_4, S_IWUGO | S_IRUGO, pod_get_stomp_param_4, +static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4); -static DEVICE_ATTR(stomp_param_5, S_IWUGO | S_IRUGO, pod_get_stomp_param_5, +static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5); -static DEVICE_ATTR(stomp_param_6, S_IWUGO | S_IRUGO, pod_get_stomp_param_6, +static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6); -static DEVICE_ATTR(amp_switch_select, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select); -static DEVICE_ATTR(delay_param_4, S_IWUGO | S_IRUGO, pod_get_delay_param_4, +static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4); -static DEVICE_ATTR(delay_param_5, S_IWUGO | S_IRUGO, pod_get_delay_param_5, +static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5); -static DEVICE_ATTR(delay_pre_post, S_IWUGO | S_IRUGO, pod_get_delay_pre_post, +static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post); -static DEVICE_ATTR(delay_model, S_IWUGO | S_IRUGO, pod_get_delay_model, +static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model, pod_set_delay_model); -static DEVICE_ATTR(delay_verb_model, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model); -static DEVICE_ATTR(tempo_msb, S_IWUGO | S_IRUGO, pod_get_tempo_msb, +static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb); -static DEVICE_ATTR(tempo_lsb, S_IWUGO | S_IRUGO, pod_get_tempo_lsb, +static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb); -static DEVICE_ATTR(wah_model, S_IWUGO | S_IRUGO, pod_get_wah_model, +static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model, pod_set_wah_model); -static DEVICE_ATTR(bypass_volume, S_IWUGO | S_IRUGO, pod_get_bypass_volume, +static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume); -static DEVICE_ATTR(fx_loop_on_off, S_IWUGO | S_IRUGO, pod_get_fx_loop_on_off, +static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off); -static DEVICE_ATTR(tweak_param_select, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select); -static DEVICE_ATTR(amp1_engage, S_IWUGO | S_IRUGO, pod_get_amp1_engage, +static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage); -static DEVICE_ATTR(band_1_gain, S_IWUGO | S_IRUGO, pod_get_band_1_gain, +static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain); -static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass); -static DEVICE_ATTR(band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain, +static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain); -static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass); -static DEVICE_ATTR(band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain, +static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain); -static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass); -static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass); -static DEVICE_ATTR(band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain, +static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain); -static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass); static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write); static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable, diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c index 4304dfe6c166..ab67e889d2c4 100644 --- a/drivers/staging/line6/midi.c +++ b/drivers/staging/line6/midi.c @@ -350,9 +350,9 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev, return count; } -static DEVICE_ATTR(midi_mask_transmit, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit); -static DEVICE_ATTR(midi_mask_receive, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive); /* MIDI device destructor */ diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c index e54770e34d2e..b9c55f9eb501 100644 --- a/drivers/staging/line6/pcm.c +++ b/drivers/staging/line6/pcm.c @@ -79,9 +79,9 @@ static ssize_t pcm_set_impulse_period(struct device *dev, return count; } -static DEVICE_ATTR(impulse_volume, S_IWUGO | S_IRUGO, pcm_get_impulse_volume, +static DEVICE_ATTR(impulse_volume, S_IWUSR | S_IRUGO, pcm_get_impulse_volume, pcm_set_impulse_volume); -static DEVICE_ATTR(impulse_period, S_IWUGO | S_IRUGO, pcm_get_impulse_period, +static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period, pcm_set_impulse_period); #endif diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c index 22e2cedcacf7..d9b30212585c 100644 --- a/drivers/staging/line6/pod.c +++ b/drivers/staging/line6/pod.c @@ -1051,48 +1051,48 @@ POD_GET_SYSTEM_PARAM(tuner_pitch, 1); #undef GET_SYSTEM_PARAM /* POD special files: */ -static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, pod_get_channel, +static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel, pod_set_channel); static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write); static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write); static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write); -static DEVICE_ATTR(dump, S_IWUGO | S_IRUGO, pod_get_dump, pod_set_dump); -static DEVICE_ATTR(dump_buf, S_IWUGO | S_IRUGO, pod_get_dump_buf, +static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump); +static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf); -static DEVICE_ATTR(finish, S_IWUGO, line6_nop_read, pod_set_finish); +static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish); static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version, line6_nop_write); -static DEVICE_ATTR(midi_postprocess, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess); -static DEVICE_ATTR(monitor_level, S_IWUGO | S_IRUGO, pod_get_monitor_level, +static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level); static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write); static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write); -static DEVICE_ATTR(retrieve_amp_setup, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_amp_setup); -static DEVICE_ATTR(retrieve_channel, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read, pod_set_retrieve_channel); -static DEVICE_ATTR(retrieve_effects_setup, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_effects_setup); -static DEVICE_ATTR(routing, S_IWUGO | S_IRUGO, pod_get_routing, +static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing, pod_set_routing); static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number, line6_nop_write); -static DEVICE_ATTR(store_amp_setup, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read, pod_set_store_amp_setup); -static DEVICE_ATTR(store_channel, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read, pod_set_store_channel); -static DEVICE_ATTR(store_effects_setup, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read, pod_set_store_effects_setup); -static DEVICE_ATTR(tuner_freq, S_IWUGO | S_IRUGO, pod_get_tuner_freq, +static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq); -static DEVICE_ATTR(tuner_mute, S_IWUGO | S_IRUGO, pod_get_tuner_mute, +static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute); static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write); static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write); #ifdef CONFIG_LINE6_USB_RAW -static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw); +static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw); #endif /* control info callback */ diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c index 6a10b0f9749a..879e6992bbc6 100644 --- a/drivers/staging/line6/toneport.c +++ b/drivers/staging/line6/toneport.c @@ -154,9 +154,9 @@ static ssize_t toneport_set_led_green(struct device *dev, return count; } -static DEVICE_ATTR(led_red, S_IWUGO | S_IRUGO, line6_nop_read, +static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_red); -static DEVICE_ATTR(led_green, S_IWUGO | S_IRUGO, line6_nop_read, +static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_green); static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2) diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c index 894eee7f2317..81241cdf1be9 100644 --- a/drivers/staging/line6/variax.c +++ b/drivers/staging/line6/variax.c @@ -549,21 +549,21 @@ static ssize_t variax_set_raw2(struct device *dev, #endif /* Variax workbench special files: */ -static DEVICE_ATTR(model, S_IWUGO | S_IRUGO, variax_get_model, +static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model, variax_set_model); -static DEVICE_ATTR(volume, S_IWUGO | S_IRUGO, variax_get_volume, +static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume, variax_set_volume); -static DEVICE_ATTR(tone, S_IWUGO | S_IRUGO, variax_get_tone, variax_set_tone); +static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone); static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write); static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write); static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write); -static DEVICE_ATTR(active, S_IWUGO | S_IRUGO, variax_get_active, +static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active, variax_set_active); static DEVICE_ATTR(guitar, S_IRUGO, variax_get_guitar, line6_nop_write); #ifdef CONFIG_LINE6_USB_RAW -static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw); -static DEVICE_ATTR(raw2, S_IWUGO, line6_nop_read, variax_set_raw2); +static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw); +static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2); #endif /* diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c index d746715d3d89..d83bec876d2e 100644 --- a/drivers/staging/quickstart/quickstart.c +++ b/drivers/staging/quickstart/quickstart.c @@ -355,7 +355,6 @@ static int quickstart_acpi_remove(struct acpi_device *device, int type) static void quickstart_exit(void) { input_unregister_device(quickstart_input); - input_free_device(quickstart_input); device_remove_file(&pf_device->dev, &dev_attr_pressed_button); device_remove_file(&pf_device->dev, &dev_attr_buttons); @@ -375,6 +374,7 @@ static int __init quickstart_init_input(void) { struct quickstart_btn **ptr = &quickstart_data.btn_lst; int count; + int ret; quickstart_input = input_allocate_device(); @@ -391,7 +391,13 @@ static int __init quickstart_init_input(void) ptr = &((*ptr)->next); } - return input_register_device(quickstart_input); + ret = input_register_device(quickstart_input); + if (ret) { + input_free_device(quickstart_input); + return ret; + } + + return 0; } static int __init quickstart_init(void) diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index ddacfc6c4861..cd15daae5412 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c @@ -182,6 +182,7 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ {USB_DEVICE(0x2019, 0xED14)}, /* Planex Communications, Inc. */ + {USB_DEVICE(0x0411, 0x015D)}, /* Buffalo Airstation WLI-UC-GN */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c index 46000d72f4c4..3bdf9b31cc4e 100644 --- a/drivers/staging/rtl8187se/r8185b_init.c +++ b/drivers/staging/rtl8187se/r8185b_init.c @@ -264,8 +264,12 @@ HwHSSIThreeWire( udelay(10); } - if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) - panic("HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n", u1bTmp); + if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) { + printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:" + " %#X RE|WE bits are not clear!!\n", u1bTmp); + dump_stack(); + return 0; + } /* RTL8187S HSSI Read/Write Function */ u1bTmp = read_nic_byte(dev, RF_SW_CONFIG); @@ -298,13 +302,23 @@ HwHSSIThreeWire( int idx; int ByteCnt = nDataBufBitCnt / 8; /* printk("%d\n",nDataBufBitCnt); */ - if ((nDataBufBitCnt % 8) != 0) - panic("HwThreeWire(): nDataBufBitCnt(%d) should be multiple of 8!!!\n", - nDataBufBitCnt); + if ((nDataBufBitCnt % 8) != 0) { + printk(KERN_ERR "rtl8187se: " + "HwThreeWire(): nDataBufBitCnt(%d)" + " should be multiple of 8!!!\n", + nDataBufBitCnt); + dump_stack(); + nDataBufBitCnt += 8; + nDataBufBitCnt &= ~7; + } - if (nDataBufBitCnt > 64) - panic("HwThreeWire(): nDataBufBitCnt(%d) should <= 64!!!\n", - nDataBufBitCnt); + if (nDataBufBitCnt > 64) { + printk(KERN_ERR "rtl8187se: HwThreeWire():" + " nDataBufBitCnt(%d) should <= 64!!!\n", + nDataBufBitCnt); + dump_stack(); + nDataBufBitCnt = 64; + } for (idx = 0; idx < ByteCnt; idx++) write_nic_byte(dev, (SW_3W_DB0+idx), *(pDataBuf+idx)); diff --git a/drivers/staging/rtl8712/usb_halinit.c b/drivers/staging/rtl8712/usb_halinit.c index f6569dce3012..0e9483bbabe1 100644 --- a/drivers/staging/rtl8712/usb_halinit.c +++ b/drivers/staging/rtl8712/usb_halinit.c @@ -37,7 +37,7 @@ u8 r8712_usb_hal_bus_init(struct _adapter *padapter) { u8 val8 = 0; u8 ret = _SUCCESS; - u8 PollingCnt = 20; + int PollingCnt = 20; struct registry_priv *pregistrypriv = &padapter->registrypriv; if (pregistrypriv->chip_version == RTL8712_FPGA) { diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c index eb44b60e1eb5..ac2bf11e1119 100644 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ b/drivers/staging/samsung-laptop/samsung-laptop.c @@ -356,7 +356,7 @@ static ssize_t set_silent_state(struct device *dev, } return count; } -static DEVICE_ATTR(silent, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(silent, S_IWUSR | S_IRUGO, get_silent_state, set_silent_state); diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index adb93f21c0d6..65b231178f05 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c @@ -62,7 +62,6 @@ void speakup_remove_virtual_keyboard(void) { if (virt_keyboard != NULL) { input_unregister_device(virt_keyboard); - input_free_device(virt_keyboard); virt_keyboard = NULL; } } diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c index c7932da03c56..63a9d0adf32d 100644 --- a/drivers/staging/spectra/ffsport.c +++ b/drivers/staging/spectra/ffsport.c @@ -656,7 +656,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which) /* Here we force report 512 byte hardware sector size to Kernel */ blk_queue_logical_block_size(dev->queue, 512); - blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH); + blk_queue_flush(dev->queue, REQ_FLUSH); dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); if (IS_ERR(dev->thread)) { diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/staging/tm6000/tm6000-cards.c index 664e6038090d..b143258f094a 100644 --- a/drivers/staging/tm6000/tm6000-cards.c +++ b/drivers/staging/tm6000/tm6000-cards.c @@ -545,7 +545,7 @@ static void tm6000_config_tuner(struct tm6000_core *dev) /* Load tuner module */ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tuner", dev->tuner_addr, NULL); + "tuner", dev->tuner_addr, NULL); memset(&tun_setup, 0, sizeof(tun_setup)); tun_setup.type = dev->tuner_type; @@ -683,7 +683,7 @@ static int tm6000_init_dev(struct tm6000_core *dev) if (dev->caps.has_tda9874) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tvaudio", I2C_ADDR_TDA9874, NULL); + "tvaudio", I2C_ADDR_TDA9874, NULL); /* register and initialize V4L2 */ rc = tm6000_v4l2_register(dev); diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c index fed25105970a..b7ac16005265 100644 --- a/drivers/staging/udlfb/udlfb.c +++ b/drivers/staging/udlfb/udlfb.c @@ -1441,7 +1441,7 @@ static struct device_attribute fb_device_attrs[] = { __ATTR_RO(metrics_bytes_identical), __ATTR_RO(metrics_bytes_sent), __ATTR_RO(metrics_cpu_kcycles_used), - __ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store), + __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store), }; /* diff --git a/drivers/staging/winbond/sysdef.h b/drivers/staging/winbond/sysdef.h index 9195adf98e14..d0d71f69bc8c 100644 --- a/drivers/staging/winbond/sysdef.h +++ b/drivers/staging/winbond/sysdef.h @@ -2,6 +2,9 @@ #ifndef SYS_DEF_H #define SYS_DEF_H + +#include <linux/delay.h> + #define WB_LINUX #define WB_LINUX_WPA_PSK diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c index 6c574a994d11..6b3cf00b0ff4 100644 --- a/drivers/staging/zram/zram_sysfs.c +++ b/drivers/staging/zram/zram_sysfs.c @@ -189,10 +189,10 @@ static ssize_t mem_used_total_show(struct device *dev, return sprintf(buf, "%llu\n", val); } -static DEVICE_ATTR(disksize, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR, disksize_show, disksize_store); static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); -static DEVICE_ATTR(reset, S_IWUGO, NULL, reset_store); +static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store); static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL); static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL); static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index c05c5af5aa04..35480dd57a30 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -559,6 +559,9 @@ void __tty_hangup(struct tty_struct *tty) tty_lock(); + /* some functions below drop BTM, so we need this bit */ + set_bit(TTY_HUPPING, &tty->flags); + /* inuse_filps is protected by the single tty lock, this really needs to change if we want to flush the workqueue with the lock held */ @@ -578,6 +581,10 @@ void __tty_hangup(struct tty_struct *tty) } spin_unlock(&tty_files_lock); + /* + * it drops BTM and thus races with reopen + * we protect the race by TTY_HUPPING + */ tty_ldisc_hangup(tty); read_lock(&tasklist_lock); @@ -615,7 +622,6 @@ void __tty_hangup(struct tty_struct *tty) tty->session = NULL; tty->pgrp = NULL; tty->ctrl_status = 0; - set_bit(TTY_HUPPED, &tty->flags); spin_unlock_irqrestore(&tty->ctrl_lock, flags); /* Account for the p->signal references we killed */ @@ -641,6 +647,7 @@ void __tty_hangup(struct tty_struct *tty) * can't yet guarantee all that. */ set_bit(TTY_HUPPED, &tty->flags); + clear_bit(TTY_HUPPING, &tty->flags); tty_ldisc_enable(tty); tty_unlock(); @@ -1310,7 +1317,9 @@ static int tty_reopen(struct tty_struct *tty) { struct tty_driver *driver = tty->driver; - if (test_bit(TTY_CLOSING, &tty->flags)) + if (test_bit(TTY_CLOSING, &tty->flags) || + test_bit(TTY_HUPPING, &tty->flags) || + test_bit(TTY_LDISC_CHANGING, &tty->flags)) return -EIO; if (driver->type == TTY_DRIVER_TYPE_PTY && diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index d8e96b005023..4214d58276f7 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -454,6 +454,8 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld) /* BTM here locks versus a hangup event */ WARN_ON(!tty_locked()); ret = ld->ops->open(tty); + if (ret) + clear_bit(TTY_LDISC_OPEN, &tty->flags); return ret; } return 0; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index a858d2b87b94..51fe1795d5a8 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -3,7 +3,7 @@ * * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> - * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> + * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de> * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> * * Userspace IO diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c index a8ea2f19a0cc..a84a451159ed 100644 --- a/drivers/uio/uio_cif.c +++ b/drivers/uio/uio_cif.c @@ -1,7 +1,7 @@ /* * UIO Hilscher CIF card driver * - * (C) 2007 Hans J. Koch <hjk@linutronix.de> + * (C) 2007 Hans J. Koch <hjk@hansjkoch.de> * Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de> * * Licensed under GPL version 2 only. diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c index 5a18e9f7b836..5ffdb483b015 100644 --- a/drivers/uio/uio_netx.c +++ b/drivers/uio/uio_netx.c @@ -2,7 +2,7 @@ * UIO driver for Hilscher NetX based fieldbus cards (cifX, comX). * See http://www.hilscher.com for details. * - * (C) 2007 Hans J. Koch <hjk@linutronix.de> + * (C) 2007 Hans J. Koch <hjk@hansjkoch.de> * (C) 2008 Manuel Traut <manut@linutronix.de> * * Licensed under GPL version 2 only. diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index ea071a5b6eee..44447f54942f 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -2301,7 +2301,7 @@ out: return ret; } -static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot); +static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot); static ssize_t read_human_status(struct device *dev, struct device_attribute *attr, char *buf) @@ -2364,8 +2364,7 @@ out: return ret; } -static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, - read_human_status, NULL); +static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL); static ssize_t read_delin(struct device *dev, struct device_attribute *attr, char *buf) @@ -2397,7 +2396,7 @@ out: return ret; } -static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL); +static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL); #define UEA_ATTR(name, reset) \ \ diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 61800f77dac8..ced846ac4141 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1330,6 +1330,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, */ if (usb_endpoint_xfer_control(&urb->ep->desc)) { + if (hcd->self.uses_pio_for_control) + return ret; if (hcd->self.uses_dma) { urb->setup_dma = dma_map_single( hcd->self.controller, diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index b5e20e873cba..717ff653fa23 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c @@ -2017,7 +2017,7 @@ static int __init usba_udc_probe(struct platform_device *pdev) } } else { /* gpio_request fail so use -EINVAL for gpio_is_valid */ - ubc->vbus_pin = -EINVAL; + udc->vbus_pin = -EINVAL; } } diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 86afdc73322f..6e2599661b5b 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -1067,7 +1067,7 @@ static inline void create_debug_files (struct ehci_hcd *ehci) &debug_registers_fops)) goto file_error; - if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus, + if (!debugfs_create_file("lpm", S_IRUGO|S_IWUSR, ehci->debug_dir, bus, &debug_lpm_fops)) goto file_error; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 502a7e6fef42..e9062806d4a2 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -1063,10 +1063,11 @@ rescan: tmp && tmp != qh; tmp = tmp->qh_next.qh) continue; - /* periodic qh self-unlinks on empty */ - if (!tmp) - goto nogood; - unlink_async (ehci, qh); + /* periodic qh self-unlinks on empty, and a COMPLETING qh + * may already be unlinked. + */ + if (tmp) + unlink_async(ehci, qh); /* FALL THROUGH */ case QH_STATE_UNLINK: /* wait for hw to finish? */ case QH_STATE_UNLINK_WAIT: @@ -1083,7 +1084,6 @@ idle_timeout: } /* else FALL THROUGH */ default: -nogood: /* caller was supposed to have unlinked any requests; * that's not our job. just leak this memory. */ diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index d36e4e75e08d..12f70c302b0b 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -141,6 +141,10 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci) qh_put (ehci->async); ehci->async = NULL; + if (ehci->dummy) + qh_put(ehci->dummy); + ehci->dummy = NULL; + /* DMA consistent memory and pools */ if (ehci->qtd_pool) dma_pool_destroy (ehci->qtd_pool); @@ -227,8 +231,26 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) if (ehci->periodic == NULL) { goto fail; } - for (i = 0; i < ehci->periodic_size; i++) - ehci->periodic [i] = EHCI_LIST_END(ehci); + + if (ehci->use_dummy_qh) { + struct ehci_qh_hw *hw; + ehci->dummy = ehci_qh_alloc(ehci, flags); + if (!ehci->dummy) + goto fail; + + hw = ehci->dummy->hw; + hw->hw_next = EHCI_LIST_END(ehci); + hw->hw_qtd_next = EHCI_LIST_END(ehci); + hw->hw_alt_next = EHCI_LIST_END(ehci); + hw->hw_token &= ~QTD_STS_ACTIVE; + ehci->dummy->hw = hw; + + for (i = 0; i < ehci->periodic_size; i++) + ehci->periodic[i] = ehci->dummy->qh_dma; + } else { + for (i = 0; i < ehci->periodic_size; i++) + ehci->periodic[i] = EHCI_LIST_END(ehci); + } /* software shadow of hardware table */ ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index a1e8d273103f..655f3c9f88bf 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -103,6 +103,19 @@ static int ehci_pci_setup(struct usb_hcd *hcd) if (retval) return retval; + if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) || + (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) { + /* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may + * read/write memory space which does not belong to it when + * there is NULL pointer with T-bit set to 1 in the frame list + * table. To avoid the issue, the frame list link pointer + * should always contain a valid pointer to a inactive qh. + */ + ehci->use_dummy_qh = 1; + ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI " + "dummy qh workaround\n"); + } + /* data structure init */ retval = ehci_init(hcd); if (retval) @@ -148,6 +161,18 @@ static int ehci_pci_setup(struct usb_hcd *hcd) if (pdev->revision < 0xa4) ehci->no_selective_suspend = 1; break; + + /* MCP89 chips on the MacBookAir3,1 give EPROTO when + * fetching device descriptors unless LPM is disabled. + * There are also intermittent problems enumerating + * devices with PPCD enabled. + */ + case 0x0d9d: + ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89"); + ehci->has_lpm = 0; + ehci->has_ppcd = 0; + ehci->command &= ~CMD_PPCEE; + break; } break; case PCI_VENDOR_ID_VIA: diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index a92526d6e5ae..d9f78eb26572 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -98,7 +98,14 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) */ *prev_p = *periodic_next_shadow(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); - *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); + + if (!ehci->use_dummy_qh || + *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)) + != EHCI_LIST_END(ehci)) + *hw_p = *shadow_next_periodic(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); + else + *hw_p = ehci->dummy->qh_dma; } /* how many of the uframe's 125 usecs are allocated? */ @@ -2335,7 +2342,11 @@ restart: * pointer for much longer, if at all. */ *q_p = q.itd->itd_next; - *hw_p = q.itd->hw_next; + if (!ehci->use_dummy_qh || + q.itd->hw_next != EHCI_LIST_END(ehci)) + *hw_p = q.itd->hw_next; + else + *hw_p = ehci->dummy->qh_dma; type = Q_NEXT_TYPE(ehci, q.itd->hw_next); wmb(); modified = itd_complete (ehci, q.itd); @@ -2368,7 +2379,11 @@ restart: * URB completion. */ *q_p = q.sitd->sitd_next; - *hw_p = q.sitd->hw_next; + if (!ehci->use_dummy_qh || + q.sitd->hw_next != EHCI_LIST_END(ehci)) + *hw_p = q.sitd->hw_next; + else + *hw_p = ehci->dummy->qh_dma; type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); wmb(); modified = sitd_complete (ehci, q.sitd); diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index bde823f704e9..ba8eab366b82 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -73,6 +73,7 @@ struct ehci_hcd { /* one per controller */ /* async schedule support */ struct ehci_qh *async; + struct ehci_qh *dummy; /* For AMD quirk use */ struct ehci_qh *reclaim; unsigned scanning : 1; @@ -131,6 +132,7 @@ struct ehci_hcd { /* one per controller */ unsigned need_io_watchdog:1; unsigned broken_periodic:1; unsigned fs_i_thresh:1; /* Intel iso scheduling */ + unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ /* required for usb32 quirk */ #define OHCI_CTRL_HCFS (3 << 6) diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 6c4fb4efb4bb..43a39eb56cc6 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -2683,7 +2683,7 @@ static int __devexit isp1362_remove(struct platform_device *pdev) return 0; } -static int __init isp1362_probe(struct platform_device *pdev) +static int __devinit isp1362_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct isp1362_hcd *isp1362_hcd; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index fef5a1f9d483..5d963e350494 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -229,6 +229,13 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id) static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex, u32 __iomem *addr, u32 port_status) { + /* Don't allow the USB core to disable SuperSpeed ports. */ + if (xhci->port_array[wIndex] == 0x03) { + xhci_dbg(xhci, "Ignoring request to disable " + "SuperSpeed port.\n"); + return; + } + /* Write 1 to disable the port */ xhci_writel(xhci, port_status | PORT_PE, addr); port_status = xhci_readl(xhci, addr); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 202770676da3..0fae58ef8afe 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1045,7 +1045,7 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, if (udev->speed == USB_SPEED_SUPER) return ep->ss_ep_comp.wBytesPerInterval; - max_packet = ep->desc.wMaxPacketSize & 0x3ff; + max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; /* A 0 in max burst means 1 transfer per ESIT */ return max_packet * (max_burst + 1); @@ -1135,7 +1135,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Fall through */ case USB_SPEED_FULL: case USB_SPEED_LOW: - max_packet = ep->desc.wMaxPacketSize & 0x3ff; + max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); ep_ctx->ep_info2 |= MAX_PACKET(max_packet); break; default: @@ -1443,6 +1443,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->dcbaa = NULL; scratchpad_free(xhci); + + xhci->num_usb2_ports = 0; + xhci->num_usb3_ports = 0; + kfree(xhci->usb2_ports); + kfree(xhci->usb3_ports); + kfree(xhci->port_array); + xhci->page_size = 0; xhci->page_shift = 0; xhci->bus_suspended = 0; @@ -1627,6 +1634,161 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) &xhci->ir_set->erst_dequeue); } +static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, + u32 __iomem *addr, u8 major_revision) +{ + u32 temp, port_offset, port_count; + int i; + + if (major_revision > 0x03) { + xhci_warn(xhci, "Ignoring unknown port speed, " + "Ext Cap %p, revision = 0x%x\n", + addr, major_revision); + /* Ignoring port protocol we can't understand. FIXME */ + return; + } + + /* Port offset and count in the third dword, see section 7.2 */ + temp = xhci_readl(xhci, addr + 2); + port_offset = XHCI_EXT_PORT_OFF(temp); + port_count = XHCI_EXT_PORT_COUNT(temp); + xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " + "count = %u, revision = 0x%x\n", + addr, port_offset, port_count, major_revision); + /* Port count includes the current port offset */ + if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) + /* WTF? "Valid values are ‘1’ to MaxPorts" */ + return; + port_offset--; + for (i = port_offset; i < (port_offset + port_count); i++) { + /* Duplicate entry. Ignore the port if the revisions differ. */ + if (xhci->port_array[i] != 0) { + xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," + " port %u\n", addr, i); + xhci_warn(xhci, "Port was marked as USB %u, " + "duplicated as USB %u\n", + xhci->port_array[i], major_revision); + /* Only adjust the roothub port counts if we haven't + * found a similar duplicate. + */ + if (xhci->port_array[i] != major_revision && + xhci->port_array[i] != (u8) -1) { + if (xhci->port_array[i] == 0x03) + xhci->num_usb3_ports--; + else + xhci->num_usb2_ports--; + xhci->port_array[i] = (u8) -1; + } + /* FIXME: Should we disable the port? */ + } + xhci->port_array[i] = major_revision; + if (major_revision == 0x03) + xhci->num_usb3_ports++; + else + xhci->num_usb2_ports++; + } + /* FIXME: Should we disable ports not in the Extended Capabilities? */ +} + +/* + * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that + * specify what speeds each port is supposed to be. We can't count on the port + * speed bits in the PORTSC register being correct until a device is connected, + * but we need to set up the two fake roothubs with the correct number of USB + * 3.0 and USB 2.0 ports at host controller initialization time. + */ +static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) +{ + u32 __iomem *addr; + u32 offset; + unsigned int num_ports; + int i, port_index; + + addr = &xhci->cap_regs->hcc_params; + offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); + if (offset == 0) { + xhci_err(xhci, "No Extended Capability registers, " + "unable to set up roothub.\n"); + return -ENODEV; + } + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); + if (!xhci->port_array) + return -ENOMEM; + + /* + * For whatever reason, the first capability offset is from the + * capability register base, not from the HCCPARAMS register. + * See section 5.3.6 for offset calculation. + */ + addr = &xhci->cap_regs->hc_capbase + offset; + while (1) { + u32 cap_id; + + cap_id = xhci_readl(xhci, addr); + if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) + xhci_add_in_port(xhci, num_ports, addr, + (u8) XHCI_EXT_PORT_MAJOR(cap_id)); + offset = XHCI_EXT_CAPS_NEXT(cap_id); + if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports) + == num_ports) + break; + /* + * Once you're into the Extended Capabilities, the offset is + * always relative to the register holding the offset. + */ + addr += offset; + } + + if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { + xhci_warn(xhci, "No ports on the roothubs?\n"); + return -ENODEV; + } + xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", + xhci->num_usb2_ports, xhci->num_usb3_ports); + /* + * Note we could have all USB 3.0 ports, or all USB 2.0 ports. + * Not sure how the USB core will handle a hub with no ports... + */ + if (xhci->num_usb2_ports) { + xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)* + xhci->num_usb2_ports, flags); + if (!xhci->usb2_ports) + return -ENOMEM; + + port_index = 0; + for (i = 0; i < num_ports; i++) + if (xhci->port_array[i] != 0x03) { + xhci->usb2_ports[port_index] = + &xhci->op_regs->port_status_base + + NUM_PORT_REGS*i; + xhci_dbg(xhci, "USB 2.0 port at index %u, " + "addr = %p\n", i, + xhci->usb2_ports[port_index]); + port_index++; + } + } + if (xhci->num_usb3_ports) { + xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)* + xhci->num_usb3_ports, flags); + if (!xhci->usb3_ports) + return -ENOMEM; + + port_index = 0; + for (i = 0; i < num_ports; i++) + if (xhci->port_array[i] == 0x03) { + xhci->usb3_ports[port_index] = + &xhci->op_regs->port_status_base + + NUM_PORT_REGS*i; + xhci_dbg(xhci, "USB 3.0 port at index %u, " + "addr = %p\n", i, + xhci->usb3_ports[port_index]); + port_index++; + } + } + return 0; +} int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { @@ -1809,6 +1971,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (scratchpad_alloc(xhci, flags)) goto fail; + if (xhci_setup_port_arrays(xhci, flags)) + goto fail; return 0; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9f3115e729b1..df558f6f84e3 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2104,7 +2104,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) if (!(status & STS_EINT)) { spin_unlock(&xhci->lock); - xhci_warn(xhci, "Spurious interrupt.\n"); return IRQ_NONE; } xhci_dbg(xhci, "op reg status = %08x\n", status); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 5d7d4e951ea4..45e4a3108cc3 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -577,6 +577,65 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); } +static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) +{ + u64 val_64; + + /* step 2: initialize command ring buffer */ + val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | + (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, + xhci->cmd_ring->dequeue) & + (u64) ~CMD_RING_RSVD_BITS) | + xhci->cmd_ring->cycle_state; + xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", + (long unsigned long) val_64); + xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); +} + +/* + * The whole command ring must be cleared to zero when we suspend the host. + * + * The host doesn't save the command ring pointer in the suspend well, so we + * need to re-program it on resume. Unfortunately, the pointer must be 64-byte + * aligned, because of the reserved bits in the command ring dequeue pointer + * register. Therefore, we can't just set the dequeue pointer back in the + * middle of the ring (TRBs are 16-byte aligned). + */ +static void xhci_clear_command_ring(struct xhci_hcd *xhci) +{ + struct xhci_ring *ring; + struct xhci_segment *seg; + + ring = xhci->cmd_ring; + seg = ring->deq_seg; + do { + memset(seg->trbs, 0, SEGMENT_SIZE); + seg = seg->next; + } while (seg != ring->deq_seg); + + /* Reset the software enqueue and dequeue pointers */ + ring->deq_seg = ring->first_seg; + ring->dequeue = ring->first_seg->trbs; + ring->enq_seg = ring->deq_seg; + ring->enqueue = ring->dequeue; + + /* + * Ring is now zeroed, so the HW should look for change of ownership + * when the cycle bit is set to 1. + */ + ring->cycle_state = 1; + + /* + * Reset the hardware dequeue pointer. + * Yes, this will need to be re-written after resume, but we're paranoid + * and want to make sure the hardware doesn't access bogus memory + * because, say, the BIOS or an SMI started the host without changing + * the command ring pointers. + */ + xhci_set_cmd_ring_deq(xhci); +} + /* * Stop HC (not bus-specific) * @@ -604,6 +663,7 @@ int xhci_suspend(struct xhci_hcd *xhci) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } + xhci_clear_command_ring(xhci); /* step 3: save registers */ xhci_save_registers(xhci); @@ -635,7 +695,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - u64 val_64; int old_state, retval; old_state = hcd->state; @@ -648,15 +707,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* step 1: restore register */ xhci_restore_registers(xhci); /* step 2: initialize command ring buffer */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue) & - (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", - (long unsigned long) val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + xhci_set_cmd_ring_deq(xhci); /* step 3: restore state and start state*/ /* step 3: set CRS flag */ command = xhci_readl(xhci, &xhci->op_regs->command); @@ -714,6 +765,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) return retval; } + spin_unlock_irq(&xhci->lock); /* Re-setup MSI-X */ if (hcd->irq) free_irq(hcd->irq, hcd); @@ -736,6 +788,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) hcd->irq = pdev->irq; } + spin_lock_irq(&xhci->lock); /* step 4: set Run/Stop bit */ command = xhci_readl(xhci, &xhci->op_regs->command); command |= CMD_RUN; @@ -1496,6 +1549,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, cmd_completion = command->completion; cmd_status = &command->status; command->command_trb = xhci->cmd_ring->enqueue; + + /* Enqueue pointer can be left pointing to the link TRB, + * we must handle that + */ + if ((command->command_trb->link.control & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_LINK)) + command->command_trb = + xhci->cmd_ring->enq_seg->next->trbs; + list_add_tail(&command->cmd_list, &virt_dev->cmd_list); } else { in_ctx = virt_dev->in_ctx; @@ -2219,6 +2281,15 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Attempt to submit the Reset Device command to the command ring */ spin_lock_irqsave(&xhci->lock, flags); reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; + + /* Enqueue pointer can be left pointing to the link TRB, + * we must handle that + */ + if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_LINK)) + reset_device_cmd->command_trb = + xhci->cmd_ring->enq_seg->next->trbs; + list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); ret = xhci_queue_reset_device(xhci, slot_id); if (ret) { diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 93d3bf4d213c..170c367112d2 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -454,6 +454,24 @@ struct xhci_doorbell_array { /** + * struct xhci_protocol_caps + * @revision: major revision, minor revision, capability ID, + * and next capability pointer. + * @name_string: Four ASCII characters to say which spec this xHC + * follows, typically "USB ". + * @port_info: Port offset, count, and protocol-defined information. + */ +struct xhci_protocol_caps { + u32 revision; + u32 name_string; + u32 port_info; +}; + +#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff) +#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) + +/** * struct xhci_container_ctx * @type: Type of context. Used to calculated offsets to contained contexts. * @size: Size of the context data @@ -621,6 +639,11 @@ struct xhci_ep_ctx { #define MAX_PACKET_MASK (0xffff << 16) #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) +/* Get max packet size from ep desc. Bit 10..0 specify the max packet size. + * USB2.0 spec 9.6.6. + */ +#define GET_MAX_PACKET(p) ((p) & 0x7ff) + /* tx_info bitmasks */ #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) @@ -1235,6 +1258,14 @@ struct xhci_hcd { u32 suspended_ports[8]; /* which ports are suspended */ unsigned long resume_done[MAX_HC_PORTS]; + /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ + u8 *port_array; + /* Array of pointers to USB 3.0 PORTSC registers */ + u32 __iomem **usb3_ports; + unsigned int num_usb3_ports; + /* Array of pointers to USB 2.0 PORTSC registers */ + u32 __iomem **usb2_ports; + unsigned int num_usb2_ports; }; /* For testing purposes */ diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index 2f43c57743c9..9251773ecef4 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -196,11 +196,9 @@ static ssize_t get_port1_handler(struct device *dev, return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1); } -static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO, - get_port0_handler, set_port0_handler); +static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler); -static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO, - get_port1_handler, set_port1_handler); +static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler); static int cypress_probe(struct usb_interface *interface, diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c index d77aba46ae85..f63776a48e2a 100644 --- a/drivers/usb/misc/trancevibrator.c +++ b/drivers/usb/misc/trancevibrator.c @@ -86,7 +86,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed); +static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed); static int tv_probe(struct usb_interface *interface, const struct usb_device_id *id) diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c index 63da2c3c838f..c96f51de1696 100644 --- a/drivers/usb/misc/usbled.c +++ b/drivers/usb/misc/usbled.c @@ -94,7 +94,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co change_color(led); \ return count; \ } \ -static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); +static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value); show_set(blue); show_set(red); show_set(green); diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index de8ef945b536..417b8f207e8b 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c @@ -192,7 +192,7 @@ static ssize_t set_attr_##name(struct device *dev, \ \ return count; \ } \ -static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name); +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name); static ssize_t show_attr_text(struct device *dev, struct device_attribute *attr, char *buf) @@ -223,7 +223,7 @@ static ssize_t set_attr_text(struct device *dev, return count; } -static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text); +static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text); static ssize_t show_attr_decimals(struct device *dev, struct device_attribute *attr, char *buf) @@ -272,8 +272,7 @@ static ssize_t set_attr_decimals(struct device *dev, return count; } -static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO, - show_attr_decimals, set_attr_decimals); +static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals); static ssize_t show_attr_textmode(struct device *dev, struct device_attribute *attr, char *buf) @@ -319,8 +318,7 @@ static ssize_t set_attr_textmode(struct device *dev, return -EINVAL; } -static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO, - show_attr_textmode, set_attr_textmode); +static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode); MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered); diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 719c6180b31f..ac5bfd619e62 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -536,6 +536,7 @@ static const struct file_operations yurex_fops = { .open = yurex_open, .release = yurex_release, .fasync = yurex_fasync, + .llseek = default_llseek, }; diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index e6669fc3b804..99beebce8550 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2116,12 +2116,15 @@ bad_config: * Otherwise, wait till the gadget driver hooks up. */ if (!is_otg_enabled(musb) && is_host_enabled(musb)) { + struct usb_hcd *hcd = musb_to_hcd(musb); + MUSB_HST_MODE(musb); musb->xceiv->default_a = 1; musb->xceiv->state = OTG_STATE_A_IDLE; status = usb_add_hcd(musb_to_hcd(musb), -1, 0); + hcd->self.uses_pio_for_control = 1; DBG(1, "%s mode, status %d, devctl %02x %c\n", "HOST", status, musb_readb(musb->mregs, MUSB_DEVCTL), diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 36cfd060dbe5..9d6ade82b9f2 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -92,6 +92,59 @@ /* ----------------------------------------------------------------------- */ +/* Maps the buffer to dma */ + +static inline void map_dma_buffer(struct musb_request *request, + struct musb *musb) +{ + if (request->request.dma == DMA_ADDR_INVALID) { + request->request.dma = dma_map_single( + musb->controller, + request->request.buf, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->mapped = 1; + } else { + dma_sync_single_for_device(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->mapped = 0; + } +} + +/* Unmap the buffer from dma and maps it back to cpu */ +static inline void unmap_dma_buffer(struct musb_request *request, + struct musb *musb) +{ + if (request->request.dma == DMA_ADDR_INVALID) { + DBG(20, "not unmapping a never mapped buffer\n"); + return; + } + if (request->mapped) { + dma_unmap_single(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->request.dma = DMA_ADDR_INVALID; + request->mapped = 0; + } else { + dma_sync_single_for_cpu(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + + } +} + /* * Immediately complete a request. * @@ -119,24 +172,8 @@ __acquires(ep->musb->lock) ep->busy = 1; spin_unlock(&musb->lock); - if (is_dma_capable()) { - if (req->mapped) { - dma_unmap_single(musb->controller, - req->request.dma, - req->request.length, - req->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - req->request.dma = DMA_ADDR_INVALID; - req->mapped = 0; - } else if (req->request.dma != DMA_ADDR_INVALID) - dma_sync_single_for_cpu(musb->controller, - req->request.dma, - req->request.length, - req->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - } + if (is_dma_capable() && ep->dma) + unmap_dma_buffer(req, musb); if (request->status == 0) DBG(5, "%s done request %p, %d/%d\n", ep->end_point.name, request, @@ -395,6 +432,13 @@ static void txstate(struct musb *musb, struct musb_request *req) #endif if (!use_dma) { + /* + * Unmap the dma buffer back to cpu if dma channel + * programming fails + */ + if (is_dma_capable() && musb_ep->dma) + unmap_dma_buffer(req, musb); + musb_write_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); request->actual += fifo_count; @@ -713,6 +757,21 @@ static void rxstate(struct musb *musb, struct musb_request *req) return; } #endif + /* + * Unmap the dma buffer back to cpu if dma channel + * programming fails. This buffer is mapped if the + * channel allocation is successful + */ + if (is_dma_capable() && musb_ep->dma) { + unmap_dma_buffer(req, musb); + + /* + * Clear DMAENAB and AUTOCLEAR for the + * PIO mode transfer + */ + csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); + musb_writew(epio, MUSB_RXCSR, csr); + } musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); @@ -837,7 +896,9 @@ void musb_g_rx(struct musb *musb, u8 epnum) if (!request) return; } +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) exit: +#endif /* Analyze request */ rxstate(musb, to_musb_request(request)); } @@ -1150,26 +1211,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, request->epnum = musb_ep->current_epnum; request->tx = musb_ep->is_in; - if (is_dma_capable() && musb_ep->dma) { - if (request->request.dma == DMA_ADDR_INVALID) { - request->request.dma = dma_map_single( - musb->controller, - request->request.buf, - request->request.length, - request->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - request->mapped = 1; - } else { - dma_sync_single_for_device(musb->controller, - request->request.dma, - request->request.length, - request->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - request->mapped = 0; - } - } else + if (is_dma_capable() && musb_ep->dma) + map_dma_buffer(request, musb); + else request->mapped = 0; spin_lock_irqsave(&musb->lock, lockflags); @@ -1789,6 +1833,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, spin_unlock_irqrestore(&musb->lock, flags); if (is_otg_enabled(musb)) { + struct usb_hcd *hcd = musb_to_hcd(musb); + DBG(3, "OTG startup...\n"); /* REVISIT: funcall to other code, which also @@ -1803,6 +1849,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, musb->gadget_driver = NULL; musb->g.dev.driver = NULL; spin_unlock_irqrestore(&musb->lock, flags); + } else { + hcd->self.uses_pio_for_control = 1; } } } diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c index bdc3ea66be69..9fea48264fa2 100644 --- a/drivers/usb/otg/langwell_otg.c +++ b/drivers/usb/otg/langwell_otg.c @@ -1896,7 +1896,7 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr, } return count; } -static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req); +static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req); static ssize_t get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) @@ -1942,8 +1942,7 @@ set_a_bus_drop(struct device *dev, struct device_attribute *attr, } return count; } -static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO, - get_a_bus_drop, set_a_bus_drop); +static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop); static ssize_t get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) @@ -1988,7 +1987,7 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr, } return count; } -static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req); +static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req); static ssize_t set_a_clr_err(struct device *dev, struct device_attribute *attr, @@ -2012,7 +2011,7 @@ set_a_clr_err(struct device *dev, struct device_attribute *attr, } return count; } -static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err); +static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err); static struct attribute *inputs_attrs[] = { &dev_attr_a_bus_req.attr, diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 76f8b3556672..6a50965e23f2 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -201,6 +201,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, + { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, @@ -696,6 +697,7 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 263f62551197..1286f1e23d8c 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -114,6 +114,9 @@ /* Lenz LI-USB Computer Interface. */ #define FTDI_LENZ_LIUSB_PID 0xD780 +/* Vardaan Enterprises Serial Interface VEUSB422R3 */ +#define FTDI_VARDAAN_PID 0xF070 + /* * Xsens Technologies BV products (http://www.xsens.com). */ @@ -721,6 +724,7 @@ */ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ +#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ /* * Bayer Ascensia Contour blood glucose meter USB-converter cable. diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 861223f2af6e..6954de50c0ff 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -51,6 +51,7 @@ static struct usb_driver usb_serial_driver = { .suspend = usb_serial_suspend, .resume = usb_serial_resume, .no_dynamic_id = 1, + .supports_autosuspend = 1, }; /* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead @@ -1343,6 +1344,8 @@ int usb_serial_register(struct usb_serial_driver *driver) return -ENODEV; fixup_generic(driver); + if (driver->usb_driver) + driver->usb_driver->supports_autosuspend = 1; if (!driver->description) driver->description = driver->driver.name; diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c index 57fc2f532cab..ceba512f84d0 100644 --- a/drivers/usb/storage/sierra_ms.c +++ b/drivers/usb/storage/sierra_ms.c @@ -121,7 +121,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, } return result; } -static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); +static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL); int sierra_ms_init(struct us_data *us) { diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 4b4da5b86ff9..f442668a1e52 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -129,8 +129,9 @@ static void handle_tx(struct vhost_net *net) size_t hdr_size; struct socket *sock; - sock = rcu_dereference_check(vq->private_data, - lockdep_is_held(&vq->mutex)); + /* TODO: check that we are running from vhost_worker? + * Not sure it's worth it, it's straight-forward enough. */ + sock = rcu_dereference_check(vq->private_data, 1); if (!sock) return; diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index e207810bba3c..08703299ef61 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -197,12 +197,12 @@ static int backlight_suspend(struct device *dev, pm_message_t state) { struct backlight_device *bd = to_backlight_device(dev); - if (bd->ops->options & BL_CORE_SUSPENDRESUME) { - mutex_lock(&bd->ops_lock); + mutex_lock(&bd->ops_lock); + if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state |= BL_CORE_SUSPENDED; backlight_update_status(bd); - mutex_unlock(&bd->ops_lock); } + mutex_unlock(&bd->ops_lock); return 0; } @@ -211,12 +211,12 @@ static int backlight_resume(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); - if (bd->ops->options & BL_CORE_SUSPENDRESUME) { - mutex_lock(&bd->ops_lock); + mutex_lock(&bd->ops_lock); + if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state &= ~BL_CORE_SUSPENDED; backlight_update_status(bd); - mutex_unlock(&bd->ops_lock); } + mutex_unlock(&bd->ops_lock); return 0; } diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index cad7d45c8bac..c265aed09e04 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -1029,10 +1029,6 @@ static int __init fb_probe(struct platform_device *device) goto err_release_pl_mem; } - ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par); - if (ret) - goto err_release_pl_mem; - /* Initialize par */ da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp; @@ -1060,7 +1056,7 @@ static int __init fb_probe(struct platform_device *device) ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); if (ret) - goto err_free_irq; + goto err_release_pl_mem; da8xx_fb_info->cmap.len = par->palette_sz; /* initialize var_screeninfo */ @@ -1088,8 +1084,13 @@ static int __init fb_probe(struct platform_device *device) goto err_cpu_freq; } #endif + + ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par); + if (ret) + goto irq_freq; return 0; +irq_freq: #ifdef CONFIG_CPU_FREQ err_cpu_freq: unregister_framebuffer(da8xx_fb_info); @@ -1098,9 +1099,6 @@ err_cpu_freq: err_dealloc_cmap: fb_dealloc_cmap(&da8xx_fb_info->cmap); -err_free_irq: - free_irq(par->irq, par); - err_release_pl_mem: dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base, par->p_palette_base); diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index f53b9f1d6aba..5c3960da755a 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c @@ -80,6 +80,7 @@ static const struct fb_cmap default_16_colors = { * @cmap: frame buffer colormap structure * @len: length of @cmap * @transp: boolean, 1 if there is transparency, 0 otherwise + * @flags: flags for kmalloc memory allocation * * Allocates memory for a colormap @cmap. @len is the * number of entries in the palette. @@ -88,34 +89,48 @@ static const struct fb_cmap default_16_colors = { * */ -int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) +int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) { - int size = len*sizeof(u16); - - if (cmap->len != len) { - fb_dealloc_cmap(cmap); - if (!len) - return 0; - if (!(cmap->red = kmalloc(size, GFP_ATOMIC))) - goto fail; - if (!(cmap->green = kmalloc(size, GFP_ATOMIC))) - goto fail; - if (!(cmap->blue = kmalloc(size, GFP_ATOMIC))) - goto fail; - if (transp) { - if (!(cmap->transp = kmalloc(size, GFP_ATOMIC))) + int size = len * sizeof(u16); + int ret = -ENOMEM; + + if (cmap->len != len) { + fb_dealloc_cmap(cmap); + if (!len) + return 0; + + cmap->red = kmalloc(size, flags); + if (!cmap->red) + goto fail; + cmap->green = kmalloc(size, flags); + if (!cmap->green) + goto fail; + cmap->blue = kmalloc(size, flags); + if (!cmap->blue) + goto fail; + if (transp) { + cmap->transp = kmalloc(size, flags); + if (!cmap->transp) + goto fail; + } else { + cmap->transp = NULL; + } + } + cmap->start = 0; + cmap->len = len; + ret = fb_copy_cmap(fb_default_cmap(len), cmap); + if (ret) goto fail; - } else - cmap->transp = NULL; - } - cmap->start = 0; - cmap->len = len; - fb_copy_cmap(fb_default_cmap(len), cmap); - return 0; + return 0; fail: - fb_dealloc_cmap(cmap); - return -ENOMEM; + fb_dealloc_cmap(cmap); + return ret; +} + +int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) +{ + return fb_alloc_cmap_gfp(cmap, len, transp, GFP_ATOMIC); } /** @@ -250,8 +265,12 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) int rc, size = cmap->len * sizeof(u16); struct fb_cmap umap; + if (size < 0 || size < cmap->len) + return -E2BIG; + memset(&umap, 0, sizeof(struct fb_cmap)); - rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL); + rc = fb_alloc_cmap_gfp(&umap, cmap->len, cmap->transp != NULL, + GFP_KERNEL); if (rc) return rc; if (copy_from_user(umap.red, cmap->red, size) || diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h index e4c4d89b7860..be8ccb47ebe0 100644 --- a/drivers/video/geode/lxfb.h +++ b/drivers/video/geode/lxfb.h @@ -22,6 +22,7 @@ #define DC_HFILT_COUNT 0x100 #define DC_VFILT_COUNT 0x100 #define VP_COEFF_SIZE 0x1000 +#define VP_PAL_COUNT 0x100 #define OUTPUT_CRT 0x01 #define OUTPUT_PANEL 0x02 @@ -48,7 +49,8 @@ struct lxfb_par { uint64_t vp[VP_REG_COUNT]; uint64_t fp[FP_REG_COUNT]; - uint32_t pal[DC_PAL_COUNT]; + uint32_t dc_pal[DC_PAL_COUNT]; + uint32_t vp_pal[VP_PAL_COUNT]; uint32_t hcoeff[DC_HFILT_COUNT * 2]; uint32_t vcoeff[DC_VFILT_COUNT]; uint32_t vp_coeff[VP_COEFF_SIZE / 4]; diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c index bc35a95e59d4..79e9abc72b83 100644 --- a/drivers/video/geode/lxfb_ops.c +++ b/drivers/video/geode/lxfb_ops.c @@ -276,10 +276,10 @@ static void lx_graphics_enable(struct fb_info *info) write_fp(par, FP_PT1, 0); temp = FP_PT2_SCRC; - if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) + if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT)) temp |= FP_PT2_HSP; - if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) + if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT)) temp |= FP_PT2_VSP; write_fp(par, FP_PT2, temp); @@ -610,10 +610,15 @@ static void lx_save_regs(struct lxfb_par *par) memcpy(par->vp, par->vp_regs, sizeof(par->vp)); memcpy(par->fp, par->vp_regs + VP_FP_START, sizeof(par->fp)); - /* save the palette */ + /* save the display controller palette */ write_dc(par, DC_PAL_ADDRESS, 0); - for (i = 0; i < ARRAY_SIZE(par->pal); i++) - par->pal[i] = read_dc(par, DC_PAL_DATA); + for (i = 0; i < ARRAY_SIZE(par->dc_pal); i++) + par->dc_pal[i] = read_dc(par, DC_PAL_DATA); + + /* save the video processor palette */ + write_vp(par, VP_PAR, 0); + for (i = 0; i < ARRAY_SIZE(par->vp_pal); i++) + par->vp_pal[i] = read_vp(par, VP_PDR); /* save the horizontal filter coefficients */ filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL; @@ -706,8 +711,8 @@ static void lx_restore_display_ctlr(struct lxfb_par *par) /* restore the palette */ write_dc(par, DC_PAL_ADDRESS, 0); - for (i = 0; i < ARRAY_SIZE(par->pal); i++) - write_dc(par, DC_PAL_DATA, par->pal[i]); + for (i = 0; i < ARRAY_SIZE(par->dc_pal); i++) + write_dc(par, DC_PAL_DATA, par->dc_pal[i]); /* restore the horizontal filter coefficients */ filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL; @@ -751,6 +756,11 @@ static void lx_restore_video_proc(struct lxfb_par *par) } } + /* restore video processor palette */ + write_vp(par, VP_PAR, 0); + for (i = 0; i < ARRAY_SIZE(par->vp_pal); i++) + write_vp(par, VP_PDR, par->vp_pal[i]); + /* restore video coeff ram */ memcpy(par->vp_regs + VP_VCR, par->vp_coeff, sizeof(par->vp_coeff)); } diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index 7cfc170bce19..ca0f6be9d12e 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -27,6 +27,7 @@ #include <linux/clk.h> #include <linux/mutex.h> +#include <mach/dma.h> #include <mach/hardware.h> #include <mach/ipu.h> #include <mach/mx3fb.h> @@ -1420,6 +1421,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg) struct device *dev; struct mx3fb_platform_data *mx3fb_pdata; + if (!imx_dma_is_ipu(chan)) + return false; + if (!rq) return false; diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 9b1364723c65..b02d97a879d6 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -860,7 +860,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) /* Couldn't reconfigure, hopefully, can continue as before */ return; - info->fix.line_length = mode2.xres * (ch->cfg.bpp / 8); + info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8); /* * fb_set_var() calls the notifier change internally, only if @@ -868,7 +868,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) * user event, we have to call the chain ourselves. */ event.info = info; - event.data = &mode2; + event.data = &mode1; fb_notifier_call_chain(evnt, &event); } diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c index c311ad3c3687..31137adc8fba 100644 --- a/drivers/video/sis/init.c +++ b/drivers/video/sis/init.c @@ -62,11 +62,11 @@ #include "init.h" -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 #include "300vtbl.h" #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 #include "310vtbl.h" #endif @@ -78,7 +78,7 @@ /* POINTER INITIALIZATION */ /*********************************************/ -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void InitCommonPointer(struct SiS_Private *SiS_Pr) { @@ -160,7 +160,7 @@ InitCommonPointer(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void InitTo300Pointer(struct SiS_Private *SiS_Pr) { @@ -237,7 +237,7 @@ InitTo300Pointer(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void InitTo310Pointer(struct SiS_Private *SiS_Pr) { @@ -321,13 +321,13 @@ bool SiSInitPtr(struct SiS_Private *SiS_Pr) { if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 InitTo300Pointer(SiS_Pr); #else return false; #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 InitTo310Pointer(SiS_Pr); #else return false; @@ -340,9 +340,7 @@ SiSInitPtr(struct SiS_Private *SiS_Pr) /* HELPER: Get ModeID */ /*********************************************/ -#ifndef SIS_XORG_XF86 static -#endif unsigned short SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, int Depth, bool FSTN, int LCDwidth, int LCDheight) @@ -884,51 +882,51 @@ SiS_GetModeID_VGA2(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDispl void SiS_SetReg(SISIOADDRESS port, unsigned short index, unsigned short data) { - OutPortByte(port, index); - OutPortByte(port + 1, data); + outb((u8)index, port); + outb((u8)data, port + 1); } void SiS_SetRegByte(SISIOADDRESS port, unsigned short data) { - OutPortByte(port, data); + outb((u8)data, port); } void SiS_SetRegShort(SISIOADDRESS port, unsigned short data) { - OutPortWord(port, data); + outw((u16)data, port); } void SiS_SetRegLong(SISIOADDRESS port, unsigned int data) { - OutPortLong(port, data); + outl((u32)data, port); } unsigned char SiS_GetReg(SISIOADDRESS port, unsigned short index) { - OutPortByte(port, index); - return(InPortByte(port + 1)); + outb((u8)index, port); + return inb(port + 1); } unsigned char SiS_GetRegByte(SISIOADDRESS port) { - return(InPortByte(port)); + return inb(port); } unsigned short SiS_GetRegShort(SISIOADDRESS port) { - return(InPortWord(port)); + return inw(port); } unsigned int SiS_GetRegLong(SISIOADDRESS port) { - return(InPortLong(port)); + return inl(port); } void @@ -1089,7 +1087,7 @@ static void SiSInitPCIetc(struct SiS_Private *SiS_Pr) { switch(SiS_Pr->ChipType) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case SIS_300: case SIS_540: case SIS_630: @@ -1108,7 +1106,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr) SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A); break; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 case SIS_315H: case SIS_315: case SIS_315PRO: @@ -1152,9 +1150,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr) /* HELPER: SetLVDSetc */ /*********************************************/ -#ifdef SIS_LINUX_KERNEL static -#endif void SiSSetLVDSetc(struct SiS_Private *SiS_Pr) { @@ -1174,7 +1170,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr) if((temp == 1) || (temp == 2)) return; switch(SiS_Pr->ChipType) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case SIS_540: case SIS_630: case SIS_730: @@ -1188,7 +1184,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr) } break; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 case SIS_550: case SIS_650: case SIS_740: @@ -1420,9 +1416,7 @@ SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr) /* HELPER: GetVBType */ /*********************************************/ -#ifdef SIS_LINUX_KERNEL static -#endif void SiS_GetVBType(struct SiS_Private *SiS_Pr) { @@ -1487,7 +1481,6 @@ SiS_GetVBType(struct SiS_Private *SiS_Pr) /* HELPER: Check RAM size */ /*********************************************/ -#ifdef SIS_LINUX_KERNEL static bool SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) @@ -1501,13 +1494,12 @@ SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, if(AdapterMemSize < memorysize) return false; return true; } -#endif /*********************************************/ /* HELPER: Get DRAM type */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static unsigned char SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) { @@ -1574,7 +1566,6 @@ SiS_GetMCLK(struct SiS_Private *SiS_Pr) /* HELPER: ClearBuffer */ /*********************************************/ -#ifdef SIS_LINUX_KERNEL static void SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { @@ -1587,7 +1578,7 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(SiS_Pr->SiS_ModeType >= ModeEGA) { if(ModeNo > 0x13) { - SiS_SetMemory(memaddr, memsize, 0); + memset_io(memaddr, 0, memsize); } else { pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]); @@ -1596,10 +1587,9 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]); } else { - SiS_SetMemory(memaddr, 0x8000, 0); + memset_io(memaddr, 0, 0x8000); } } -#endif /*********************************************/ /* HELPER: SearchModeID */ @@ -2132,7 +2122,7 @@ SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F); } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_20) { SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1); if(!(temp = crt1data[5] & 0x1f)) { @@ -2215,7 +2205,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); if(SiS_Pr->ChipType >= SIS_315H) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01); if(SiS_Pr->ChipType == XGI_20) { unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); @@ -2236,7 +2226,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, /* FIFO */ /*********************************************/ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, unsigned short *idx2) @@ -2506,11 +2496,7 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data); /* Write foreground and background queue */ -#ifdef SIS_LINUX_KERNEL templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); -#else - templ = pciReadLong(0x00000000, 0x50); -#endif if(SiS_Pr->ChipType == SIS_730) { @@ -2530,13 +2516,8 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } -#ifdef SIS_LINUX_KERNEL sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ); templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0); -#else - pciWriteLong(0x00000000, 0x50, templ); - templ = pciReadLong(0x00000000, 0xA0); -#endif /* GUI grant timer (PCI config 0xA3) */ if(SiS_Pr->ChipType == SIS_730) { @@ -2552,15 +2533,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } -#ifdef SIS_LINUX_KERNEL sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ); -#else - pciWriteLong(0x00000000, 0xA0, templ); -#endif } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { @@ -2612,7 +2589,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(VCLK > 150) data |= 0x80; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data); @@ -2621,7 +2598,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data); #endif } else if(SiS_Pr->ChipType < XGI_20) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(VCLK >= 166) data |= 0x0c; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); @@ -2630,7 +2607,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(VCLK >= 200) data |= 0x0c; if(SiS_Pr->ChipType == XGI_20) data &= ~0x04; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); @@ -2675,7 +2652,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI) { unsigned short data, infoflag = 0, modeflag, resindex; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short data2, data3; #endif @@ -2736,7 +2713,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data); } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb); } @@ -2826,7 +2803,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) || (SiS_Pr->ChipType == XGI_40)) { if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { @@ -2845,7 +2822,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, #endif } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_SetupDualChip(struct SiS_Private *SiS_Pr) { @@ -2999,11 +2976,6 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho SiS_Pr->SiS_SelectCRT2Rate = 0; SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2); -#ifdef SIS_XORG_XF86 - xf86DrvMsgVerb(0, X_PROBED, 4, "(init: VBType=0x%04x, VBInfo=0x%04x)\n", - SiS_Pr->SiS_VBType, SiS_Pr->SiS_VBInfo); -#endif - if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; @@ -3028,7 +3000,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho } switch(SiS_Pr->ChipType) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case SIS_300: SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex); break; @@ -3039,7 +3011,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho break; #endif default: -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_20) { unsigned char sr2b = 0, sr2c = 0; switch(ModeNo) { @@ -3062,7 +3034,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_40) { SiS_SetupDualChip(SiS_Pr); } @@ -3070,11 +3042,9 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); -#ifdef SIS_LINUX_KERNEL if(SiS_Pr->SiS_flag_clearbuffer) { SiS_ClearBuffer(SiS_Pr, ModeNo); } -#endif if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) { SiS_WaitRetrace1(SiS_Pr); @@ -3104,7 +3074,7 @@ SiS_InitVB(struct SiS_Private *SiS_Pr) static void SiS_ResetVB(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short temp; @@ -3139,7 +3109,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr) * which locks CRT2 in some way to CRT1 timing. Disable * this here. */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if((IS_SIS651) || (IS_SISM650) || SiS_Pr->ChipType == SIS_340 || SiS_Pr->ChipType == XGI_40) { @@ -3160,7 +3130,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr) static void SiS_Handle760(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned int somebase; unsigned char temp1, temp2, temp3; @@ -3170,11 +3140,7 @@ SiS_Handle760(struct SiS_Private *SiS_Pr) (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) ) return; -#ifdef SIS_LINUX_KERNEL somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74); -#else - somebase = pciReadWord(0x00001000, 0x74); -#endif somebase &= 0xffff; if(somebase == 0) return; @@ -3190,105 +3156,34 @@ SiS_Handle760(struct SiS_Private *SiS_Pr) temp2 = 0x0b; } -#ifdef SIS_LINUX_KERNEL sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1); sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2); -#else - pciWriteByte(0x00000000, 0x7e, temp1); - pciWriteByte(0x00000000, 0x8d, temp2); -#endif SiS_SetRegByte((somebase + 0x85), temp3); #endif } /*********************************************/ -/* X.org/XFree86: SET SCREEN PITCH */ -/*********************************************/ - -#ifdef SIS_XORG_XF86 -static void -SiS_SetPitchCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) -{ - SISPtr pSiS = SISPTR(pScrn); - unsigned short HDisplay = pSiS->scrnPitch >> 3; - - SiS_SetReg(SiS_Pr->SiS_P3d4,0x13,(HDisplay & 0xFF)); - SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0E,0xF0,(HDisplay >> 8)); -} - -static void -SiS_SetPitchCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) -{ - SISPtr pSiS = SISPTR(pScrn); - unsigned short HDisplay = pSiS->scrnPitch2 >> 3; - - /* Unlock CRT2 */ - if(pSiS->VGAEngine == SIS_315_VGA) - SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2F, 0x01); - else - SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24, 0x01); - - SiS_SetReg(SiS_Pr->SiS_Part1Port,0x07,(HDisplay & 0xFF)); - SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x09,0xF0,(HDisplay >> 8)); -} - -static void -SiS_SetPitch(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) -{ - SISPtr pSiS = SISPTR(pScrn); - bool isslavemode = false; - - if( (pSiS->VBFlags2 & VB2_VIDEOBRIDGE) && - ( ((pSiS->VGAEngine == SIS_300_VGA) && - (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0xa0) == 0x20) || - ((pSiS->VGAEngine == SIS_315_VGA) && - (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x50) == 0x10) ) ) { - isslavemode = true; - } - - /* We need to set pitch for CRT1 if bridge is in slave mode, too */ - if((pSiS->VBFlags & DISPTYPE_DISP1) || (isslavemode)) { - SiS_SetPitchCRT1(SiS_Pr, pScrn); - } - /* We must not set the pitch for CRT2 if bridge is in slave mode */ - if((pSiS->VBFlags & DISPTYPE_DISP2) && (!isslavemode)) { - SiS_SetPitchCRT2(SiS_Pr, pScrn); - } -} -#endif - -/*********************************************/ /* SiSSetMode() */ /*********************************************/ -#ifdef SIS_XORG_XF86 -/* We need pScrn for setting the pitch correctly */ -bool -SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, bool dosetpitch) -#else bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) -#endif { SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; unsigned short RealModeNo, ModeIdIndex; unsigned char backupreg = 0; -#ifdef SIS_LINUX_KERNEL unsigned short KeepLockReg; SiS_Pr->UseCustomMode = false; SiS_Pr->CRT1UsesCustomMode = false; -#endif SiS_Pr->SiS_flag_clearbuffer = 0; if(SiS_Pr->UseCustomMode) { ModeNo = 0xfe; } else { -#ifdef SIS_LINUX_KERNEL if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1; -#endif ModeNo &= 0x7f; } @@ -3301,13 +3196,8 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) SiS_GetSysFlags(SiS_Pr); SiS_Pr->SiS_VGAINFO = 0x11; -#if defined(SIS_XORG_XF86) && (defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__)) - if(pScrn) SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); -#endif -#ifdef SIS_LINUX_KERNEL KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05); -#endif SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); SiSInitPCIetc(SiS_Pr); @@ -3344,12 +3234,10 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); SiS_SetLowModeTest(SiS_Pr, ModeNo); -#ifdef SIS_LINUX_KERNEL /* Check memory size (kernel framebuffer driver only) */ if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) { return false; } -#endif SiS_OpenCRTC(SiS_Pr); @@ -3384,7 +3272,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) SiS_DisplayOn(SiS_Pr); SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { if(!(SiS_IsDualEdge(SiS_Pr))) { @@ -3396,7 +3284,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { if(SiS_Pr->ChipType >= SIS_315H) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(!SiS_Pr->SiS_ROMNew) { if(SiS_IsVAMode(SiS_Pr)) { SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); @@ -3424,424 +3312,16 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) } } -#ifdef SIS_XORG_XF86 - if(pScrn) { - /* SetPitch: Adapt to virtual size & position */ - if((ModeNo > 0x13) && (dosetpitch)) { - SiS_SetPitch(SiS_Pr, pScrn); - } - - /* Backup/Set ModeNo in BIOS scratch area */ - SiS_GetSetModeID(pScrn, ModeNo); - } -#endif - SiS_CloseCRTC(SiS_Pr); SiS_Handle760(SiS_Pr); -#ifdef SIS_LINUX_KERNEL /* We never lock registers in XF86 */ if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00); -#endif return true; } -/*********************************************/ -/* X.org/XFree86: SiSBIOSSetMode() */ -/* for non-Dual-Head mode */ -/*********************************************/ - -#ifdef SIS_XORG_XF86 -bool -SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom) -{ - SISPtr pSiS = SISPTR(pScrn); - unsigned short ModeNo = 0; - - SiS_Pr->UseCustomMode = false; - - if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting custom mode %dx%d\n", - SiS_Pr->CHDisplay, - (mode->Flags & V_INTERLACE ? SiS_Pr->CVDisplay * 2 : - (mode->Flags & V_DBLSCAN ? SiS_Pr->CVDisplay / 2 : - SiS_Pr->CVDisplay))); - - } else { - - /* Don't need vbflags here; checks done earlier */ - ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags); - if(!ModeNo) return false; - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting standard mode 0x%x\n", ModeNo); - - } - - return(SiSSetMode(SiS_Pr, pScrn, ModeNo, true)); -} - -/*********************************************/ -/* X.org/XFree86: SiSBIOSSetModeCRT2() */ -/* for Dual-Head modes */ -/*********************************************/ - -bool -SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom) -{ - SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; - SISPtr pSiS = SISPTR(pScrn); -#ifdef SISDUALHEAD - SISEntPtr pSiSEnt = pSiS->entityPrivate; -#endif - unsigned short ModeIdIndex; - unsigned short ModeNo = 0; - unsigned char backupreg = 0; - - SiS_Pr->UseCustomMode = false; - - /* Remember: Custom modes for CRT2 are ONLY supported - * -) on the 30x/B/C, and - * -) if CRT2 is LCD or VGA, or CRT1 is LCDA - */ - - if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { - - ModeNo = 0xfe; - - } else { - - ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags); - if(!ModeNo) return false; - - } - - SiSRegInit(SiS_Pr, BaseAddr); - SiSInitPtr(SiS_Pr); - SiS_GetSysFlags(SiS_Pr); -#if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__) - SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); -#else - SiS_Pr->SiS_VGAINFO = 0x11; -#endif - - SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); - - SiSInitPCIetc(SiS_Pr); - SiSSetLVDSetc(SiS_Pr); - SiSDetermineROMUsage(SiS_Pr); - - /* Save mode info so we can set it from within SetMode for CRT1 */ -#ifdef SISDUALHEAD - if(pSiS->DualHeadMode) { - pSiSEnt->CRT2ModeNo = ModeNo; - pSiSEnt->CRT2DMode = mode; - pSiSEnt->CRT2IsCustom = IsCustom; - pSiSEnt->CRT2CR30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30); - pSiSEnt->CRT2CR31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31); - pSiSEnt->CRT2CR35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); - pSiSEnt->CRT2CR38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); -#if 0 - /* We can't set CRT2 mode before CRT1 mode is set - says who...? */ - if(pSiSEnt->CRT1ModeNo == -1) { - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting CRT2 mode delayed until after setting CRT1 mode\n"); - return true; - } -#endif - pSiSEnt->CRT2ModeSet = true; - } -#endif - - if(SiS_Pr->UseCustomMode) { - - unsigned short temptemp = SiS_Pr->CVDisplay; - - if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1; - else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1; - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting custom mode %dx%d on CRT2\n", - SiS_Pr->CHDisplay, temptemp); - - } else { - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting standard mode 0x%x on CRT2\n", ModeNo); - - } - - SiS_UnLockCRT2(SiS_Pr); - - if(!SiS_Pr->UseCustomMode) { - if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; - } else { - ModeIdIndex = 0; - } - - SiS_GetVBType(SiS_Pr); - - SiS_InitVB(SiS_Pr); - if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { - if(SiS_Pr->ChipType >= SIS_315H) { - SiS_ResetVB(SiS_Pr); - SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x32,0x10); - SiS_SetRegOR(SiS_Pr->SiS_Part2Port,0x00,0x0c); - backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); - } else { - backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); - } - } - - /* Get VB information (connectors, connected devices) */ - if(!SiS_Pr->UseCustomMode) { - SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 1); - } else { - /* If this is a custom mode, we don't check the modeflag for CRT2Mode */ - SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0); - } - SiS_SetYPbPr(SiS_Pr); - SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); - SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); - SiS_SetLowModeTest(SiS_Pr, ModeNo); - - SiS_ResetSegmentRegisters(SiS_Pr); - - /* Set mode on CRT2 */ - if( (SiS_Pr->SiS_VBType & VB_SISVB) || - (SiS_Pr->SiS_IF_DEF_LVDS == 1) || - (SiS_Pr->SiS_IF_DEF_CH70xx != 0) || - (SiS_Pr->SiS_IF_DEF_TRUMPION != 0) ) { - SiS_SetCRT2Group(SiS_Pr, ModeNo); - } - - SiS_StrangeStuff(SiS_Pr); - - SiS_DisplayOn(SiS_Pr); - SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); - - if(SiS_Pr->ChipType >= SIS_315H) { - if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { - if(!(SiS_IsDualEdge(SiS_Pr))) { - SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x13,0xfb); - } - } - } - - if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { - if(SiS_Pr->ChipType >= SIS_315H) { - if(!SiS_Pr->SiS_ROMNew) { - if(SiS_IsVAMode(SiS_Pr)) { - SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); - } else { - SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x35,0xFE); - } - } - - SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); - - if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x30) & SetCRT2ToLCD) { - SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x38,0xfc); - } - } else if((SiS_Pr->ChipType == SIS_630) || - (SiS_Pr->ChipType == SIS_730)) { - SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); - } - } - - /* SetPitch: Adapt to virtual size & position */ - SiS_SetPitchCRT2(SiS_Pr, pScrn); - - SiS_Handle760(SiS_Pr); - - return true; -} - -/*********************************************/ -/* X.org/XFree86: SiSBIOSSetModeCRT1() */ -/* for Dual-Head modes */ -/*********************************************/ - -bool -SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom) -{ - SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; - SISPtr pSiS = SISPTR(pScrn); - unsigned short ModeIdIndex, ModeNo = 0; - unsigned char backupreg = 0; -#ifdef SISDUALHEAD - SISEntPtr pSiSEnt = pSiS->entityPrivate; - unsigned char backupcr30, backupcr31, backupcr38, backupcr35, backupp40d=0; - bool backupcustom; -#endif - - SiS_Pr->UseCustomMode = false; - - if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { - - unsigned short temptemp = SiS_Pr->CVDisplay; - - if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1; - else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1; - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting custom mode %dx%d on CRT1\n", - SiS_Pr->CHDisplay, temptemp); - ModeNo = 0xfe; - - } else { - - ModeNo = SiS_GetModeNumber(pScrn, mode, 0); /* don't give VBFlags */ - if(!ModeNo) return false; - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting standard mode 0x%x on CRT1\n", ModeNo); - } - - SiSInitPtr(SiS_Pr); - SiSRegInit(SiS_Pr, BaseAddr); - SiS_GetSysFlags(SiS_Pr); -#if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__) - SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); -#else - SiS_Pr->SiS_VGAINFO = 0x11; -#endif - - SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); - - SiSInitPCIetc(SiS_Pr); - SiSSetLVDSetc(SiS_Pr); - SiSDetermineROMUsage(SiS_Pr); - - SiS_UnLockCRT2(SiS_Pr); - - if(!SiS_Pr->UseCustomMode) { - if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; - } else { - ModeIdIndex = 0; - } - - /* Determine VBType */ - SiS_GetVBType(SiS_Pr); - - SiS_InitVB(SiS_Pr); - if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { - if(SiS_Pr->ChipType >= SIS_315H) { - backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); - } else { - backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); - } - } - - /* Get VB information (connectors, connected devices) */ - /* (We don't care if the current mode is a CRT2 mode) */ - SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0); - SiS_SetYPbPr(SiS_Pr); - SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); - SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); - SiS_SetLowModeTest(SiS_Pr, ModeNo); - - SiS_OpenCRTC(SiS_Pr); - - /* Set mode on CRT1 */ - SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex); - if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { - SiS_SetCRT2Group(SiS_Pr, ModeNo); - } - - /* SetPitch: Adapt to virtual size & position */ - SiS_SetPitchCRT1(SiS_Pr, pScrn); - - SiS_HandleCRT1(SiS_Pr); - - SiS_StrangeStuff(SiS_Pr); - - SiS_CloseCRTC(SiS_Pr); - -#ifdef SISDUALHEAD - if(pSiS->DualHeadMode) { - pSiSEnt->CRT1ModeNo = ModeNo; - pSiSEnt->CRT1DMode = mode; - } -#endif - - if(SiS_Pr->UseCustomMode) { - SiS_Pr->CRT1UsesCustomMode = true; - SiS_Pr->CSRClock_CRT1 = SiS_Pr->CSRClock; - SiS_Pr->CModeFlag_CRT1 = SiS_Pr->CModeFlag; - } else { - SiS_Pr->CRT1UsesCustomMode = false; - } - - /* Reset CRT2 if changing mode on CRT1 */ -#ifdef SISDUALHEAD - if(pSiS->DualHeadMode) { - if(pSiSEnt->CRT2ModeNo != -1) { - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "(Re-)Setting mode for CRT2\n"); - backupcustom = SiS_Pr->UseCustomMode; - backupcr30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30); - backupcr31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31); - backupcr35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); - backupcr38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); - if(SiS_Pr->SiS_VBType & VB_SISVB) { - /* Backup LUT-enable */ - if(pSiSEnt->CRT2ModeSet) { - backupp40d = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x0d) & 0x08; - } - } - if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { - SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,pSiSEnt->CRT2CR30); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,pSiSEnt->CRT2CR31); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,pSiSEnt->CRT2CR35); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,pSiSEnt->CRT2CR38); - } - - SiSBIOSSetModeCRT2(SiS_Pr, pSiSEnt->pScrn_1, - pSiSEnt->CRT2DMode, pSiSEnt->CRT2IsCustom); - - SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,backupcr30); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,backupcr31); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupcr35); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupcr38); - if(SiS_Pr->SiS_VBType & VB_SISVB) { - SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x0d, ~0x08, backupp40d); - } - SiS_Pr->UseCustomMode = backupcustom; - } - } -#endif - - /* Warning: From here, the custom mode entries in SiS_Pr are - * possibly overwritten - */ - - SiS_DisplayOn(SiS_Pr); - SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); - - if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { - if(SiS_Pr->ChipType >= SIS_315H) { - SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); - } else if((SiS_Pr->ChipType == SIS_630) || - (SiS_Pr->ChipType == SIS_730)) { - SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); - } - } - - SiS_Handle760(SiS_Pr); - - /* Backup/Set ModeNo in BIOS scratch area */ - SiS_GetSetModeID(pScrn,ModeNo); - - return true; -} -#endif /* Linux_XF86 */ - #ifndef GETBITSTR #define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l)) #define GENMASK(mask) BITMASK(1?mask,0?mask) @@ -3927,7 +3407,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE; if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 tempbx = SiS_Pr->SiS_VGAHT; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempbx = SiS_Pr->PanelHT; @@ -3936,7 +3416,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, remaining = tempbx % 8; #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 /* OK for LCDA, LVDS */ tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes; tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */ @@ -3950,7 +3430,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx; if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) { SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1); SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE; @@ -3982,7 +3462,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 tempax = VGAHDE; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempbx = SiS_Pr->PanelXRes; @@ -4001,7 +3481,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempax = SiS_Pr->PanelYRes; } else if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 /* Stupid hack for 640x400/320x200 */ if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) { if((tempax + tempbx) == 438) tempbx += 16; @@ -4054,36 +3534,12 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, if(modeflag & DoubleScanMode) tempax |= 0x80; SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax); -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n", - SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal, - SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal, - SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd); - xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", - SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1], - SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3], - SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5], - SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]); - xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", - SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9], - SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11], - SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13], - SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]); - xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]); -#endif -#endif } void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, int yres, -#ifdef SIS_XORG_XF86 - DisplayModePtr current -#endif -#ifdef SIS_LINUX_KERNEL struct fb_var_screeninfo *var, bool writeres -#endif ) { unsigned short HRE, HBE, HRS, HBS, HDE, HT; @@ -4127,25 +3583,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, D = B - F - C; -#ifdef SIS_XORG_XF86 - current->HDisplay = (E * 8); - current->HSyncStart = (E * 8) + (F * 8); - current->HSyncEnd = (E * 8) + (F * 8) + (C * 8); - current->HTotal = (E * 8) + (F * 8) + (C * 8) + (D * 8); -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, - "H: A %d B %d C %d D %d E %d F %d HT %d HDE %d HRS %d HBS %d HBE %d HRE %d\n", - A, B, C, D, E, F, HT, HDE, HRS, HBS, HBE, HRE); -#else - (void)VBS; (void)HBS; (void)A; -#endif -#endif -#ifdef SIS_LINUX_KERNEL if(writeres) var->xres = xres = E * 8; var->left_margin = D * 8; var->right_margin = F * 8; var->hsync_len = C * 8; -#endif /* Vertical */ sr_data = crdata[13]; @@ -4192,30 +3633,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, D = B - F - C; -#ifdef SIS_XORG_XF86 - current->VDisplay = VDE + 1; - current->VSyncStart = VRS + 1; - current->VSyncEnd = ((VRS & ~0x1f) | VRE) + 1; - if(VRE <= (VRS & 0x1f)) current->VSyncEnd += 32; - current->VTotal = E + D + C + F; -#if 0 - current->VDisplay = E; - current->VSyncStart = E + D; - current->VSyncEnd = E + D + C; - current->VTotal = E + D + C + F; -#endif -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, - "V: A %d B %d C %d D %d E %d F %d VT %d VDE %d VRS %d VBS %d VBE %d VRE %d\n", - A, B, C, D, E, F, VT, VDE, VRS, VBS, VBE, VRE); -#endif -#endif -#ifdef SIS_LINUX_KERNEL if(writeres) var->yres = yres = E; var->upper_margin = D; var->lower_margin = F; var->vsync_len = C; -#endif if((xres == 320) && ((yres == 200) || (yres == 240))) { /* Terrible hack, but correct CRTC data for @@ -4224,17 +3645,9 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, * a negative D. The CRT controller does not * seem to like correcting HRE to 50) */ -#ifdef SIS_XORG_XF86 - current->HDisplay = 320; - current->HSyncStart = 328; - current->HSyncEnd = 376; - current->HTotal = 400; -#endif -#ifdef SIS_LINUX_KERNEL var->left_margin = (400 - 376); var->right_margin = (328 - 320); var->hsync_len = (376 - 328); -#endif } diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h index b96005c39c67..ee8ed3c203da 100644 --- a/drivers/video/sis/init.h +++ b/drivers/video/sis/init.h @@ -53,21 +53,8 @@ #ifndef _INIT_H_ #define _INIT_H_ -#include "osdef.h" #include "initdef.h" -#ifdef SIS_XORG_XF86 -#include "sis.h" -#define SIS_NEED_inSISREG -#define SIS_NEED_inSISREGW -#define SIS_NEED_inSISREGL -#define SIS_NEED_outSISREG -#define SIS_NEED_outSISREGW -#define SIS_NEED_outSISREGL -#include "sis_regs.h" -#endif - -#ifdef SIS_LINUX_KERNEL #include "vgatypes.h" #include "vstruct.h" #ifdef SIS_CP @@ -78,7 +65,6 @@ #include <linux/fb.h> #include "sis.h" #include <video/sisfb.h> -#endif /* Mode numbers */ static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f}; @@ -286,7 +272,7 @@ static const struct SiS_ModeResInfo_S SiS_ModeResInfo[] = { 1280, 854, 8,16} /* 0x22 */ }; -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static const struct SiS_StandTable_S SiS_StandTable[]= { /* 0x00: MD_0_200 */ @@ -1521,10 +1507,6 @@ static const struct SiS_LVDSCRT1Data SiS_LVDSCRT1640x480_1_H[] = }; bool SiSInitPtr(struct SiS_Private *SiS_Pr); -#ifdef SIS_XORG_XF86 -unsigned short SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, - int Depth, bool FSTN, int LCDwith, int LCDheight); -#endif unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, int Depth, bool FSTN, unsigned short CustomT, int LCDwith, int LCDheight, @@ -1550,17 +1532,11 @@ void SiS_SetRegOR(SISIOADDRESS Port,unsigned short Index, unsigned short DataOR void SiS_DisplayOn(struct SiS_Private *SiS_Pr); void SiS_DisplayOff(struct SiS_Private *SiS_Pr); void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr); -#ifndef SIS_LINUX_KERNEL -void SiSSetLVDSetc(struct SiS_Private *SiS_Pr); -#endif void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable); void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable); unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr); -#ifndef SIS_LINUX_KERNEL -void SiS_GetVBType(struct SiS_Private *SiS_Pr); -#endif bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, unsigned short *ModeIdIndex); @@ -1572,37 +1548,19 @@ unsigned short SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short Mode unsigned short ModeIdIndex); unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr,unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, unsigned short *idx2); unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2); unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); #endif void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); -#ifdef SIS_XORG_XF86 -bool SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, - bool dosetpitch); -bool SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom); -bool SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom); -bool SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom); -#endif -#ifdef SIS_LINUX_KERNEL bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); -#endif void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); -#ifdef SIS_XORG_XF86 -void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, - int yres, DisplayModePtr current); -#endif -#ifdef SIS_LINUX_KERNEL void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, int yres, struct fb_var_screeninfo *var, bool writeres); -#endif /* From init301.c: */ extern void SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, @@ -1626,29 +1584,16 @@ extern unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short extern bool SiS_IsVAMode(struct SiS_Private *); extern bool SiS_IsDualEdge(struct SiS_Private *); -#ifdef SIS_XORG_XF86 -/* From other modules: */ -extern unsigned short SiS_CheckBuildCustomMode(ScrnInfoPtr pScrn, DisplayModePtr mode, - unsigned int VBFlags); -extern unsigned char SiS_GetSetBIOSScratch(ScrnInfoPtr pScrn, unsigned short offset, - unsigned char value); -extern unsigned char SiS_GetSetModeID(ScrnInfoPtr pScrn, unsigned char id); -extern unsigned short SiS_GetModeNumber(ScrnInfoPtr pScrn, DisplayModePtr mode, - unsigned int VBFlags); -#endif - -#ifdef SIS_LINUX_KERNEL -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); extern void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, unsigned int val); #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 extern void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, unsigned char val); extern unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg); #endif -#endif #endif diff --git a/drivers/video/sis/init301.c b/drivers/video/sis/init301.c index da33d801c22e..9fa66fd4052a 100644 --- a/drivers/video/sis/init301.c +++ b/drivers/video/sis/init301.c @@ -75,11 +75,11 @@ #include "init301.h" -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 #include "oem300.h" #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 #include "oem310.h" #endif @@ -87,9 +87,7 @@ #define SiS_I2CDELAYSHORT 150 static unsigned short SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr); -#ifdef SIS_LINUX_KERNEL static void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); -#endif /*********************************************/ /* HELPER: Lock/Unlock CRT2 */ @@ -106,9 +104,7 @@ SiS_UnLockCRT2(struct SiS_Private *SiS_Pr) SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24,0x01); } -#ifdef SIS_LINUX_KERNEL static -#endif void SiS_LockCRT2(struct SiS_Private *SiS_Pr) { @@ -138,7 +134,7 @@ SiS_SetRegSR11ANDOR(struct SiS_Private *SiS_Pr, unsigned short DataAND, unsigned /* HELPER: Get Pointer to LCD structure */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static unsigned char * GetLCDStructPtr661(struct SiS_Private *SiS_Pr) { @@ -404,7 +400,7 @@ SiS_SaveCRT2Info(struct SiS_Private *SiS_Pr, unsigned short ModeNo) /* HELPER: GET SOME DATA FROM BIOS ROM */ /*********************************************/ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static bool SiS_CR36BIOSWord23b(struct SiS_Private *SiS_Pr) { @@ -449,7 +445,7 @@ SiS_DDC2Delay(struct SiS_Private *SiS_Pr, unsigned int delaytime) SiS_GetReg(SiS_Pr->SiS_P3c4, 0x05); } -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) { @@ -457,7 +453,7 @@ SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) { @@ -467,7 +463,7 @@ SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) } #endif -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) { @@ -480,14 +476,14 @@ SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) static void SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) { -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short PanelID, DelayIndex, Delay=0; #endif if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); if(SiS_Pr->SiS_VBType & VB_SISVB) { @@ -513,11 +509,11 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) } SiS_ShortDelay(SiS_Pr, Delay); -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if((SiS_Pr->ChipType >= SIS_661) || (SiS_Pr->ChipType <= SIS_315PRO) || @@ -579,12 +575,12 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_PanelDelayLoop(struct SiS_Private *SiS_Pr, unsigned short DelayTime, unsigned short DelayLoop) { @@ -613,7 +609,7 @@ SiS_WaitRetrace1(struct SiS_Private *SiS_Pr) while((!(SiS_GetRegByte(SiS_Pr->SiS_P3da) & 0x08)) && --watchdog); } -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void SiS_WaitRetrace2(struct SiS_Private *SiS_Pr, unsigned short reg) { @@ -630,7 +626,7 @@ static void SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) { if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x20)) return; } @@ -641,7 +637,7 @@ SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) } #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x40)) { SiS_WaitRetrace1(SiS_Pr); } else { @@ -686,7 +682,7 @@ SiS_VBLongWait(struct SiS_Private *SiS_Pr) /* HELPER: MISC */ /*********************************************/ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static bool SiS_Is301B(struct SiS_Private *SiS_Pr) { @@ -708,7 +704,7 @@ SiS_CRT2IsLCD(struct SiS_Private *SiS_Pr) bool SiS_IsDualEdge(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if((SiS_Pr->ChipType != SIS_650) || (SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0)) { if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & EnableDualEdge) return true; @@ -721,7 +717,7 @@ SiS_IsDualEdge(struct SiS_Private *SiS_Pr) bool SiS_IsVAMode(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short flag; if(SiS_Pr->ChipType >= SIS_315H) { @@ -732,7 +728,7 @@ SiS_IsVAMode(struct SiS_Private *SiS_Pr) return false; } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) { @@ -745,7 +741,7 @@ SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) static bool SiS_IsDualLink(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if((SiS_CRT2IsLCD(SiS_Pr)) || (SiS_IsVAMode(SiS_Pr))) { @@ -756,7 +752,7 @@ SiS_IsDualLink(struct SiS_Private *SiS_Pr) return false; } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_TVEnabled(struct SiS_Private *SiS_Pr) { @@ -768,7 +764,7 @@ SiS_TVEnabled(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) { @@ -777,7 +773,7 @@ SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) { @@ -788,7 +784,7 @@ SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) { @@ -804,7 +800,7 @@ SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsYPbPr(struct SiS_Private *SiS_Pr) { @@ -816,7 +812,7 @@ SiS_IsYPbPr(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsChScart(struct SiS_Private *SiS_Pr) { @@ -828,7 +824,7 @@ SiS_IsChScart(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) { @@ -848,7 +844,7 @@ SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsLCDOrLCDA(struct SiS_Private *SiS_Pr) { @@ -914,7 +910,7 @@ SiS_BridgeInSlavemode(struct SiS_Private *SiS_Pr) /*********************************************/ /* Setup general purpose IO for Chrontel communication */ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) { @@ -923,11 +919,7 @@ SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) if(!(SiS_Pr->SiS_ChSW)) return; -#ifdef SIS_LINUX_KERNEL acpibase = sisfb_read_lpc_pci_dword(SiS_Pr, 0x74); -#else - acpibase = pciReadLong(0x00000800, 0x74); -#endif acpibase &= 0xFFFF; if(!acpibase) return; temp = SiS_GetRegShort((acpibase + 0x3c)); /* ACPI register 0x3c: GP Event 1 I/O mode select */ @@ -969,7 +961,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, tempax &= (DriverMode | LoadDACFlag | SetNotSimuMode | SetPALTV); tempbx |= tempax; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_VBType & VB_SISLCDA) { if(ModeNo == 0x03) { @@ -1019,7 +1011,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ if(!(SiS_Pr->SiS_VBType & VB_SISVGA2)) { tempbx &= ~(SetCRT2ToRAMDAC); @@ -1154,24 +1146,16 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_Pr->SiS_VBInfo = tempbx; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->ChipType == SIS_630) { SiS_SetChrontelGPIO(SiS_Pr, SiS_Pr->SiS_VBInfo); } #endif -#ifdef SIS_LINUX_KERNEL #if 0 printk(KERN_DEBUG "sisfb: (init301: VBInfo= 0x%04x, SetFlag=0x%04x)\n", SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); #endif -#endif -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_PROBED, "(init301: VBInfo=0x%04x, SetFlag=0x%04x)\n", - SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); -#endif -#endif } /*********************************************/ @@ -1415,12 +1399,6 @@ SiS_SetTVMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } SiS_Pr->SiS_VBInfo &= ~SetPALTV; - -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "(init301: TVMode %x, VBInfo %x)\n", SiS_Pr->SiS_TVMode, SiS_Pr->SiS_VBInfo); -#endif -#endif } /*********************************************/ @@ -1443,22 +1421,10 @@ SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr) static void SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr; unsigned short temp; -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Paneldata driver: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n", - SiS_Pr->PanelHT, SiS_Pr->PanelVT, - SiS_Pr->PanelHRS, SiS_Pr->PanelHRE, - SiS_Pr->PanelVRS, SiS_Pr->PanelVRE, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B); -#endif -#endif - if((ROMAddr = GetLCDStructPtr661(SiS_Pr))) { if((temp = SISGETROMW(6)) != SiS_Pr->PanelHT) { SiS_Pr->SiS_NeedRomModeData = true; @@ -1480,18 +1446,6 @@ SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) SiS_Pr->SiS_VCLKData[VCLK_CUSTOM_315].SR2C = SiS_Pr->SiS_VBVCLKData[VCLK_CUSTOM_315].Part4_B = ROMAddr[20]; -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Paneldata BIOS: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n", - SiS_Pr->PanelHT, SiS_Pr->PanelVT, - SiS_Pr->PanelHRS, SiS_Pr->PanelHRE, - SiS_Pr->PanelVRS, SiS_Pr->PanelVRE, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B); -#endif -#endif - } #endif } @@ -1517,13 +1471,13 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh { unsigned short temp,modeflag,resinfo=0,modexres=0,modeyres=0; bool panelcanscale = false; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; static const unsigned char SiS300SeriesLCDRes[] = { 0, 1, 2, 3, 7, 4, 5, 8, 0, 0, 10, 0, 0, 0, 0, 15 }; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *myptr = NULL; #endif @@ -1562,7 +1516,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh SiS_Pr->SiS_LCDTypeInfo = (temp & 0x0F) - 1; } temp &= 0x0f; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->ChipType < SIS_315H) { /* Very old BIOSes only know 7 sizes (NetVista 2179, 1.01g) */ if(SiS_Pr->SiS_VBType & VB_SIS301) { @@ -1574,7 +1528,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh #endif /* Translate to our internal types */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == SIS_550) { if (temp == Panel310_1152x768) temp = Panel_320x240_2; /* Verified working */ else if(temp == Panel310_320x240_2) temp = Panel_320x240_2; @@ -1597,7 +1551,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh SiS_Pr->SiS_LCDResInfo = temp; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { SiS_Pr->SiS_LCDResInfo = Panel_Barco1366; @@ -1639,7 +1593,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh else if(SiS_Pr->UsePanelScaler == 1) SiS_Pr->SiS_LCDInfo |= DontExpandLCD; /* Dual link, Pass 1:1 BIOS default, etc. */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_661) { if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { if(temp & 0x08) SiS_Pr->SiS_LCDInfo |= LCDPass11; @@ -2076,7 +2030,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh } } -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { if(SiS_Pr->SiS_CustomT == CUT_PANEL848 || SiS_Pr->SiS_CustomT == CUT_PANEL856) { SiS_Pr->SiS_LCDInfo = 0x80 | 0x40 | 0x20; /* neg h/v sync, RGB24(D0 = 0) */ @@ -2186,17 +2140,10 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh SiS_Pr->SiS_SetFlag |= LCDVESATiming; } -#ifdef SIS_LINUX_KERNEL #if 0 printk(KERN_DEBUG "sisfb: (LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x)\n", SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo); #endif -#endif -#ifdef SIS_XORG_XF86 - xf86DrvMsgVerb(0, X_PROBED, 4, - "(init301: LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x SetFlag=0x%04x)\n", - SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo, SiS_Pr->SiS_SetFlag); -#endif } /*********************************************/ @@ -2359,7 +2306,7 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor VCLKIndex = SiS_Pr->PanelVCLKIdx315; } -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 /* Special Timing: Barco iQ Pro R series */ if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) VCLKIndex = 0x44; @@ -2410,12 +2357,6 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "VCLKIndex %d (0x%x)\n", VCLKIndex, VCLKIndex); -#endif -#endif - return VCLKIndex; } @@ -2428,10 +2369,10 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned { unsigned short i, j, modeflag, tempah=0; short tempcl; -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) unsigned short tempbl; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short tempah2, tempbl2; #endif @@ -2454,7 +2395,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* ---- 300 series ---- */ +#ifdef CONFIG_FB_SIS_300 /* ---- 300 series ---- */ /* For 301BDH: (with LCD via LVDS) */ if(SiS_Pr->SiS_VBType & VB_NoLCD) { @@ -2477,11 +2418,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0xA0; -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* ------- 315/330 series ------ */ +#ifdef CONFIG_FB_SIS_315 /* ------- 315/330 series ------ */ if(ModeNo > 0x13) { tempcl -= ModeVGA; @@ -2494,7 +2435,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0x50; -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } @@ -2503,7 +2444,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->ChipType < SIS_315H) { SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,tempah); } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x00,0xa0,tempah); } else if(SiS_Pr->SiS_VBType & VB_SISVB) { @@ -2584,7 +2525,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->ChipType >= SIS_315H) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 /* LVDS can only be slave in 8bpp modes */ tempah = 0x80; if((modeflag & CRT2Mode) && (SiS_Pr->SiS_ModeType > ModeVGA)) { @@ -2604,7 +2545,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned } else { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 tempah = 0; if( (!(SiS_Pr->SiS_VBInfo & SetInSlaveMode)) && (SiS_Pr->SiS_ModeType > ModeVGA) ) { tempah |= 0x02; @@ -2626,7 +2567,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->ChipType >= SIS_315H) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 /* unsigned char bridgerev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); */ /* The following is nearly unpreditable and varies from machine @@ -2718,11 +2659,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x23,tempbl,tempah); } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } else if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x21,0x3f); if((SiS_Pr->SiS_VBInfo & DisableCRT2Display) || @@ -2745,7 +2686,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned } else { /* LVDS */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { @@ -2931,7 +2872,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) { if(SiS_Pr->SiS_LCDResInfo == Panel_1280x1024) { if(!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) { @@ -3036,7 +2977,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short case Panel_1280x1024: tempbx = 24; break; case Panel_1400x1050: tempbx = 26; break; case Panel_1600x1200: tempbx = 28; break; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case Panel_Barco1366: tempbx = 80; break; #endif } @@ -3053,7 +2994,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->SiS_LCDInfo & LCDPass11) tempbx = 30; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_CustomT == CUT_BARCO1024) { tempbx = 82; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) tempbx++; @@ -3189,7 +3130,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if((SiS_Pr->SiS_VBType & VB_SISVB) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_CalcPanelLinkTiming(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); SiS_CalcLCDACRT1Timing(SiS_Pr, ModeNo, ModeIdIndex); #endif @@ -3214,7 +3155,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned case 16: LVDSData = SiS_Pr->SiS_LVDS800x600Data_1; break; case 18: LVDSData = SiS_Pr->SiS_LVDS1024x600Data_1; break; case 20: LVDSData = SiS_Pr->SiS_LVDS1024x768Data_1; break; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case 80: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_1; break; case 81: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_2; break; case 82: LVDSData = SiS_Pr->SiS_LVDSBARCO1024Data_1; break; @@ -3248,7 +3189,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned (SiS_Pr->SiS_SetFlag & SetDOSMode) ) { SiS_Pr->SiS_HDE = SiS_Pr->PanelXRes; SiS_Pr->SiS_VDE = SiS_Pr->PanelYRes; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { if(ResIndex < 0x08) { SiS_Pr->SiS_HDE = 1280; @@ -3270,7 +3211,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s unsigned short resinfo, CRT2Index, ResIndex; const struct SiS_LCDData *LCDPtr = NULL; const struct SiS_TVData *TVPtr = NULL; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 short resinfo661; #endif @@ -3283,7 +3224,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 resinfo661 = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].ROMMODEIDX661; if( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) && (SiS_Pr->SiS_SetFlag & LCDVESATiming) && @@ -3460,7 +3401,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } else if( (!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) && (romptr) && (ROMAddr) ) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_Pr->SiS_RVBHCMAX = ROMAddr[romptr]; SiS_Pr->SiS_RVBHCFACT = ROMAddr[romptr+1]; SiS_Pr->SiS_VGAHT = ROMAddr[romptr+2] | ((ROMAddr[romptr+3] & 0x0f) << 8); @@ -3520,19 +3461,13 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s case Panel_1680x1050 : case Panel_1680x1050 + 32: LCDPtr = SiS_Pr->SiS_LCD1680x1050Data; break; case 100 : LCDPtr = SiS_Pr->SiS_NoScaleData; break; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 case 200 : LCDPtr = SiS310_ExtCompaq1280x1024Data; break; case 201 : LCDPtr = SiS_Pr->SiS_St2LCD1280x1024Data; break; #endif default : LCDPtr = SiS_Pr->SiS_ExtLCD1024x768Data; break; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "GetCRT2Data: Index %d ResIndex %d\n", CRT2Index, ResIndex); -#endif -#endif - SiS_Pr->SiS_RVBHCMAX = (LCDPtr+ResIndex)->RVBHCMAX; SiS_Pr->SiS_RVBHCFACT = (LCDPtr+ResIndex)->RVBHCFACT; SiS_Pr->SiS_VGAHT = (LCDPtr+ResIndex)->VGAHT; @@ -3624,7 +3559,7 @@ SiS_GetLVDSDesPtr(struct SiS_Private *SiS_Pr) { const struct SiS_LVDSDes *PanelDesPtr = NULL; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) { if(SiS_Pr->ChipType < SIS_315H) { @@ -3696,7 +3631,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { /* non-pass 1:1 only, see above */ if(SiS_Pr->SiS_VGAHDE != SiS_Pr->PanelXRes) { @@ -3771,7 +3706,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } else { if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 switch(SiS_Pr->SiS_LCDResInfo) { case Panel_800x600: if(SiS_Pr->SiS_VGAVDE == SiS_Pr->PanelYRes) { @@ -3816,7 +3751,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 switch(SiS_Pr->SiS_LCDResInfo) { case Panel_1024x768: case Panel_1280x1024: @@ -3844,7 +3779,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s if(SiS_Pr->ChipType < SIS_315H) { if(!(modeflag & HalfDCLK)) SiS_Pr->SiS_LCDHDES = 320; } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) SiS_Pr->SiS_LCDHDES = 480; if(SiS_Pr->SiS_LCDResInfo == Panel_1400x1050) SiS_Pr->SiS_LCDHDES = 804; if(SiS_Pr->SiS_LCDResInfo == Panel_1600x1200) SiS_Pr->SiS_LCDHDES = 704; @@ -3866,7 +3801,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s /* DISABLE VIDEO BRIDGE */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static int SiS_HandlePWD(struct SiS_Private *SiS_Pr) { @@ -3891,11 +3826,6 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr) ret = 1; } SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x27,0x7f,temp); -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, 0, "Setting PWD %x\n", temp); -#endif -#endif } #endif return ret; @@ -3909,7 +3839,7 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr) void SiS_DisableBridge(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short tempah, pushax=0, modenum; #endif unsigned short temp=0; @@ -3920,7 +3850,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { if(SiS_Pr->SiS_VBType & VB_SISLVDS) { @@ -3953,11 +3883,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) } } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ int didpwd = 0; bool custom1 = (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || @@ -4081,14 +4011,14 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } } else { /* ============ For 301 ================ */ if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { SiS_SetRegSR11ANDOR(SiS_Pr,0xF7,0x08); SiS_PanelDelay(SiS_Pr, 3); @@ -4111,7 +4041,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x20); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,temp); } else { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1E,0xDF); /* disable CRT2 */ if( (!(SiS_CRT2IsLCD(SiS_Pr))) || (!(SiS_CR36BIOSWord23d(SiS_Pr))) ) { @@ -4127,7 +4057,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { SiS_SetCH700x(SiS_Pr,0x0E,0x09); @@ -4171,11 +4101,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) SiS_SetRegSR11ANDOR(SiS_Pr,0xFB,0x04); } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ if(!(SiS_IsNotM650orLater(SiS_Pr))) { /*if(SiS_Pr->ChipType < SIS_340) { */ /* XGI needs this */ @@ -4288,7 +4218,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } /* 315 series */ @@ -4304,14 +4234,12 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) * from outside the context of a mode switch! * MUST call getVBType before calling this */ -#ifdef SIS_LINUX_KERNEL static -#endif void SiS_EnableBridge(struct SiS_Private *SiS_Pr) { unsigned short temp=0, tempah; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short temp1, pushax=0; bool delaylong = false; #endif @@ -4322,7 +4250,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ if(SiS_CRT2IsLCD(SiS_Pr)) { if(SiS_Pr->SiS_VBType & VB_SISLVDS) { @@ -4385,11 +4313,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ #ifdef SET_EMI unsigned char r30=0, r31=0, r32=0, r33=0, cr36=0; @@ -4688,7 +4616,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x00,0x7f); } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } @@ -4739,7 +4667,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ if(SiS_CRT2IsLCD(SiS_Pr)) { if(SiS_Pr->ChipType == SIS_730) { @@ -4783,11 +4711,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) } } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ if(!(SiS_IsNotM650orLater(SiS_Pr))) { /*if(SiS_Pr->ChipType < SIS_340) {*/ /* XGI needs this */ @@ -4881,7 +4809,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } /* 310 series */ @@ -4971,7 +4899,7 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* ---- 300 series --- */ +#ifdef CONFIG_FB_SIS_300 /* ---- 300 series --- */ if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { /* 630 - 301B(-DH) */ @@ -5000,11 +4928,11 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* ------- 315 series ------ */ +#ifdef CONFIG_FB_SIS_315 /* ------- 315 series ------ */ if(SiS_Pr->SiS_VBType & VB_SISLVDS) { /* 315 - LVDS */ @@ -5076,13 +5004,13 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } } } /* Set CRT2 FIFO on 300/540/630/730 */ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) { @@ -5154,13 +5082,8 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) } else { -#ifdef SIS_LINUX_KERNEL pci50 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); pciA0 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xa0); -#else - pci50 = pciReadLong(0x00000000, 0x50); - pciA0 = pciReadLong(0x00000000, 0xA0); -#endif if(SiS_Pr->ChipType == SIS_730) { @@ -5262,7 +5185,7 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) #endif /* Set CRT2 FIFO on 315/330 series */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_SetCRT2FIFO_310(struct SiS_Private *SiS_Pr) { @@ -5420,27 +5343,6 @@ SiS_SetGroup1_301(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned sho temp = SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1b,temp); /* ? */ - -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n", - SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal, - SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal, - SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd); - - xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", - SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1], - SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3], - SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5], - SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]); - xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", - SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9], - SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11], - SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13], - SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]); - xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]); -#endif -#endif } /* Setup panel link @@ -5455,17 +5357,17 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s unsigned short push2, tempax, tempbx, tempcx, temp; unsigned int tempeax = 0, tempebx, tempecx, tempvcfact = 0; bool islvds = false, issis = false, chkdclkfirst = false; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 unsigned short crt2crtc = 0; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short pushcx; #endif if(ModeNo <= 0x13) { modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 crt2crtc = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC; #endif } else if(SiS_Pr->UseCustomMode) { @@ -5473,7 +5375,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 crt2crtc = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC; #endif } @@ -5494,7 +5396,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { if(IS_SIS330) { SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2D,0x10); @@ -5744,7 +5646,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ tempeax = SiS_Pr->SiS_VGAVDE << 6; temp = (tempeax % (unsigned int)SiS_Pr->SiS_VDE); tempeax = tempeax / (unsigned int)SiS_Pr->SiS_VDE; @@ -5755,11 +5657,11 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s temp = (unsigned short)(tempeax & 0x00FF); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1E,temp); /* BPLVCFACT */ tempvcfact = temp; -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ tempeax = SiS_Pr->SiS_VGAVDE << 18; tempebx = SiS_Pr->SiS_VDE; temp = (tempeax % tempebx); @@ -5845,7 +5747,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s temp = (unsigned short)(tempecx & 0x00FF); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x23,temp); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { if((islvds) || (SiS_Pr->SiS_VBInfo & VB_SISLVDS)) { @@ -5863,7 +5765,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } #endif -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_IF_DEF_TRUMPION) { unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned char *trumpdata; @@ -5899,7 +5801,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_IF_DEF_FSTN || SiS_Pr->SiS_IF_DEF_DSTN) { SiS_SetReg(SiS_Pr->SiS_Part1Port,0x25,0x00); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x26,0x00); @@ -5999,7 +5901,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s SiS_SetReg(SiS_Pr->SiS_Part1Port,0x45,0x0a); } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } /* Set Part 1 */ @@ -6007,12 +5909,12 @@ static void SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex) { -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; #endif unsigned short temp=0, tempax=0, tempbx=0, tempcx=0, bridgeadd=0; unsigned short pushbx=0, CRT1Index=0, modeflag, resinfo=0; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short tempbl=0; #endif @@ -6038,11 +5940,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short (SiS_Pr->SiS_VBInfo & SetInSlaveMode)) ) { if(SiS_Pr->ChipType < SIS_315H ) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_SetCRT2FIFO_300(SiS_Pr, ModeNo); #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetCRT2FIFO_310(SiS_Pr); #endif } @@ -6051,7 +5953,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->ChipType < SIS_315H ) { -#ifdef SIS300 /* ------------- 300 series --------------*/ +#ifdef CONFIG_FB_SIS_300 /* ------------- 300 series --------------*/ temp = (SiS_Pr->SiS_VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */ SiS_SetReg(SiS_Pr->SiS_Part1Port,0x08,temp); /* CRT2 Horizontal Total */ @@ -6070,11 +5972,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short bridgeadd = 12; -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* ------------------- 315/330 series --------------- */ +#ifdef CONFIG_FB_SIS_315 /* ------------------- 315/330 series --------------- */ tempcx = SiS_Pr->SiS_VGAHT; /* BTVGA2HT 0x08,0x09 */ if(modeflag & HalfDCLK) { @@ -6125,7 +6027,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } /* 315/330 series */ @@ -6256,7 +6158,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* ---------- 300 series -------------- */ +#ifdef CONFIG_FB_SIS_300 /* ---------- 300 series -------------- */ if(SiS_Pr->SiS_VBType & VB_SISVB) { temp = 0x20; @@ -6310,11 +6212,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x13,~0x3C,temp); /* Panel Link Delay Compensation; (Software Command Reset; Power Saving) */ -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* --------------- 315/330 series ---------------*/ +#ifdef CONFIG_FB_SIS_315 /* --------------- 315/330 series ---------------*/ if(SiS_Pr->ChipType < SIS_661) { @@ -6349,7 +6251,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(modeflag & HalfDCLK) tempax |= 0x40; SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x2C,0x3f,tempax); -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } @@ -6381,7 +6283,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short /* SET PART 2 REGISTER GROUP */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static unsigned char * SiS_GetGroup2CLVXPtr(struct SiS_Private *SiS_Pr, int tabletype) { @@ -6478,7 +6380,7 @@ SiS_GetCRT2Part2Ptr(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned sh } #endif -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void SiS_Group2LCDSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short crt2crtc) { @@ -6690,7 +6592,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short unsigned int longtemp, PhaseIndex; bool newtvphase; const unsigned char *TimingPoint; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short resindex, CRT2Index; const struct SiS_Part2PortTbl *CRT2Part2Ptr = NULL; @@ -7069,7 +6971,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x17,0xFB); SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x18,0xDF); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_GetCRT2Part2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex, &CRT2Index, &resindex)) { switch(CRT2Index) { @@ -7130,12 +7032,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short /* Non-expanding: lcdvdes = tempcx = VT-1; lcdvdee = tempbx = VDE-1 */ -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdvdes 0x%x lcdvdee 0x%x\n", tempcx, tempbx); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x05,tempcx); /* lcdvdes */ SiS_SetReg(SiS_Pr->SiS_Part2Port,0x06,tempbx); /* lcdvdee */ @@ -7184,12 +7080,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short tempbx = SiS_Pr->CVSyncStart; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdvrs 0x%x\n", tempbx); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x04,tempbx); /* lcdvrs */ temp = (tempbx >> 4) & 0xF0; @@ -7201,15 +7091,9 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short temp |= (SiS_Pr->CVSyncEnd & 0x0f); } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdvre[3:0] 0x%x\n", (temp & 0x0f)); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x01,temp); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_Group2LCDSpecial(SiS_Pr, ModeNo, crt2crtc); #endif @@ -7245,12 +7129,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short tempax >>= 1; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdhdee 0x%x\n", tempbx); -#endif -#endif - tempbx += bridgeoffset; SiS_SetReg(SiS_Pr->SiS_Part2Port,0x23,tempbx); /* lcdhdee */ @@ -7276,12 +7154,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short tempbx += bridgeoffset; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdhrs 0x%x\n", tempbx); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x1C,tempbx); /* lcdhrs */ SiS_SetRegANDOR(SiS_Pr->SiS_Part2Port,0x1D,0x0F,((tempbx >> 4) & 0xf0)); @@ -7300,20 +7172,14 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short tempbx += bridgeoffset; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdhre 0x%x\n", tempbx); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x21,tempbx); /* lcdhre */ SiS_SetGroup2_Tail(SiS_Pr, ModeNo); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_Set300Part2Regs(SiS_Pr, ModeIdIndex, RefreshRateTableIndex, ModeNo); #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 } /* CRT2-LCD from table */ #endif } @@ -7382,7 +7248,7 @@ SiS_SetGroup3(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short /* SET PART 4 REGISTER GROUP */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 #if 0 static void SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift) @@ -8011,7 +7877,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 /* Chrontel 7005 - I assume that it does not come with a 315 series chip */ @@ -8124,7 +7990,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short /* Chrontel 7019 - assumed that it does not come with a 300 series chip */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short temp; @@ -8175,7 +8041,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } -#ifdef SIS315H /* ----------- 315 series only ---------- */ +#ifdef CONFIG_FB_SIS_315 /* ----------- 315 series only ---------- */ void SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr) @@ -8657,7 +8523,7 @@ SiS_ChrontelDoSomething1(struct SiS_Private *SiS_Pr) bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; #endif unsigned short ModeIdIndex, RefreshRateTableIndex; @@ -8703,16 +8569,6 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) SiS_GetLVDSDesData(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "(init301: LCDHDES 0x%03x LCDVDES 0x%03x)\n", SiS_Pr->SiS_LCDHDES, SiS_Pr->SiS_LCDVDES); - xf86DrvMsg(0, X_INFO, "(init301: HDE 0x%03x VDE 0x%03x)\n", SiS_Pr->SiS_HDE, SiS_Pr->SiS_VDE); - xf86DrvMsg(0, X_INFO, "(init301: VGAHDE 0x%03x VGAVDE 0x%03x)\n", SiS_Pr->SiS_VGAHDE, SiS_Pr->SiS_VGAVDE); - xf86DrvMsg(0, X_INFO, "(init301: HT 0x%03x VT 0x%03x)\n", SiS_Pr->SiS_HT, SiS_Pr->SiS_VT); - xf86DrvMsg(0, X_INFO, "(init301: VGAHT 0x%03x VGAVT 0x%03x)\n", SiS_Pr->SiS_VGAHT, SiS_Pr->SiS_VGAVT); -#endif -#endif - if(SiS_Pr->SiS_SetFlag & LowModeTests) { SiS_SetGroup1(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); } @@ -8722,12 +8578,12 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(SiS_Pr->SiS_SetFlag & LowModeTests) { SiS_SetGroup2(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetGroup2_C_ELV(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); #endif SiS_SetGroup3(SiS_Pr, ModeNo, ModeIdIndex); SiS_SetGroup4(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetGroup4_C_ELV(SiS_Pr, ModeNo, ModeIdIndex); #endif SiS_SetGroup5(SiS_Pr, ModeNo, ModeIdIndex); @@ -8758,7 +8614,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { if(SiS_Pr->SiS_IF_DEF_CH70xx == 2) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetCH701xForLCD(SiS_Pr); #endif } @@ -8771,7 +8627,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) } -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->ChipType < SIS_315H) { if(SiS_Pr->SiS_SetFlag & LowModeTests) { if(SiS_Pr->SiS_UseOEM) { @@ -8794,7 +8650,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_SetFlag & LowModeTests) { if(SiS_Pr->ChipType < SIS_661) { @@ -8873,7 +8729,7 @@ SiS_SetupDDCN(struct SiS_Private *SiS_Pr) } } -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static unsigned char * SiS_SetTrumpBlockLoop(struct SiS_Private *SiS_Pr, unsigned char *dataptr) { @@ -8923,11 +8779,6 @@ SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr) dataptr = SiS_SetTrumpBlockLoop(SiS_Pr, dataptr); if(!dataptr) return false; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Trumpion block success\n"); -#endif -#endif return true; } #endif @@ -9002,9 +8853,7 @@ SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) SiS_SetChReg(SiS_Pr, reg, val, 0); } -#ifdef SIS_LINUX_KERNEL static -#endif void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) { @@ -9091,9 +8940,7 @@ SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempbx) /* Read from Chrontel 70xx */ /* Parameter is [Register no (S7-S0)] */ -#ifdef SIS_LINUX_KERNEL static -#endif unsigned short SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempbx) { @@ -9114,9 +8961,7 @@ SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, } /* Our own DDC functions */ -#ifndef SIS_XORG_XF86 static -#endif unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, @@ -9224,12 +9069,6 @@ SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, SiS_SetupDDCN(SiS_Pr); -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "DDC Port %x Index %x Shift %d\n", - SiS_Pr->SiS_DDC_Port, SiS_Pr->SiS_DDC_Index, temp); -#endif -#endif return 0; } @@ -9292,11 +9131,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) SiS_SetSwitchDDC2(SiS_Pr); if(SiS_PrepareDDC(SiS_Pr)) { SiS_SetStop(SiS_Pr); -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Probe: Prepare failed\n"); -#endif -#endif return 0xFFFF; } mask = 0xf0; @@ -9310,11 +9144,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) } else { failed = true; ret = 0xFFFF; -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Probe: Read 1 failed\n"); -#endif -#endif } } if(!failed) { @@ -9324,11 +9153,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) if(temp == value) ret = 0; else { ret = 0xFFFF; -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Probe: Read 2 failed\n"); -#endif -#endif if(SiS_Pr->SiS_DDC_DeviceAddr == 0xa0) { if(temp == 0x30) ret = 0; } @@ -9338,9 +9162,7 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) return ret; } -#ifndef SIS_XORG_XF86 static -#endif unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr) { @@ -9357,9 +9179,7 @@ SiS_ProbeDDC(struct SiS_Private *SiS_Pr) return flag; } -#ifndef SIS_XORG_XF86 static -#endif unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer) { @@ -9606,11 +9426,6 @@ SiS_SetSCLKHigh(struct SiS_Private *SiS_Pr) temp = SiS_GetReg(SiS_Pr->SiS_DDC_Port,SiS_Pr->SiS_DDC_Index); } while((!(temp & SiS_Pr->SiS_DDC_Clk)) && --watchdog); if (!watchdog) { -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "SetClkHigh failed\n"); -#endif -#endif return 0xFFFF; } SiS_DDC2Delay(SiS_Pr,SiS_I2CDELAYSHORT); @@ -9641,7 +9456,7 @@ SiS_CheckACK(struct SiS_Private *SiS_Pr) /* =============== SiS 315/330 O.E.M. ================= */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static unsigned short GetRAMDACromptr(struct SiS_Private *SiS_Pr) @@ -10829,7 +10644,7 @@ SiS_FinalizeLCD(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor /* ================= SiS 300 O.E.M. ================== */ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, diff --git a/drivers/video/sis/init301.h b/drivers/video/sis/init301.h index 51d99222375d..e1fd31d0fddf 100644 --- a/drivers/video/sis/init301.h +++ b/drivers/video/sis/init301.h @@ -53,15 +53,8 @@ #ifndef _INIT301_H_ #define _INIT301_H_ -#include "osdef.h" #include "initdef.h" -#ifdef SIS_XORG_XF86 -#include "sis.h" -#include "sis_regs.h" -#endif - -#ifdef SIS_LINUX_KERNEL #include "vgatypes.h" #include "vstruct.h" #ifdef SIS_CP @@ -72,7 +65,6 @@ #include <linux/fb.h> #include "sis.h" #include <video/sisfb.h> -#endif static const unsigned char SiS_YPbPrTable[3][64] = { { @@ -237,7 +229,7 @@ static const unsigned char SiS_Part2CLVX_6[] = { /* 1080i */ 0xFF,0xFF, }; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 /* 661 et al LCD data structure (2.03.00) */ static const unsigned char SiS_LCDStruct661[] = { /* 1024x768 */ @@ -279,7 +271,7 @@ static const unsigned char SiS_LCDStruct661[] = { }; #endif -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static unsigned char SiS300_TrumpionData[14][80] = { { 0x02,0x0A,0x0A,0x01,0x04,0x01,0x00,0x03,0x0D,0x00,0x0D,0x10,0x7F,0x00,0x80,0x02, 0x20,0x03,0x0B,0x00,0x90,0x01,0xC1,0x01,0x60,0x0C,0x30,0x10,0x00,0x00,0x04,0x23, @@ -356,9 +348,6 @@ static unsigned char SiS300_TrumpionData[14][80] = { #endif void SiS_UnLockCRT2(struct SiS_Private *SiS_Pr); -#ifndef SIS_LINUX_KERNEL -void SiS_LockCRT2(struct SiS_Private *SiS_Pr); -#endif void SiS_EnableCRT2(struct SiS_Private *SiS_Pr); unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); void SiS_WaitRetrace1(struct SiS_Private *SiS_Pr); @@ -375,9 +364,6 @@ unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo unsigned short RefreshRateTableIndex); unsigned short SiS_GetResInfo(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned short ModeIdIndex); void SiS_DisableBridge(struct SiS_Private *SiS_Pr); -#ifndef SIS_LINUX_KERNEL -void SiS_EnableBridge(struct SiS_Private *SiS_Pr); -#endif bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo); void SiS_SiS30xBLOn(struct SiS_Private *SiS_Pr); void SiS_SiS30xBLOff(struct SiS_Private *SiS_Pr); @@ -386,13 +372,9 @@ void SiS_SetCH700x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned cha unsigned short SiS_GetCH700x(struct SiS_Private *SiS_Pr, unsigned short tempax); void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempax); -#ifndef SIS_LINUX_KERNEL -void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); -unsigned short SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempax); -#endif void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char orval,unsigned short andval); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_Chrontel701xOn(struct SiS_Private *SiS_Pr); static void SiS_Chrontel701xOff(struct SiS_Private *SiS_Pr); static void SiS_ChrontelInitTVVSync(struct SiS_Private *SiS_Pr); @@ -401,7 +383,7 @@ void SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr); void SiS_Chrontel701xBLOff(struct SiS_Private *SiS_Pr); #endif /* 315 */ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static bool SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr); void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo); #endif @@ -412,21 +394,12 @@ unsigned short SiS_HandleDDC(struct SiS_Private *SiS_Pr, unsigned int VBFlags, i unsigned short adaptnum, unsigned short DDCdatatype, unsigned char *buffer, unsigned int VBFlags2); -#ifdef SIS_XORG_XF86 -unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, - int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, - bool checkcr32, unsigned int VBFlags2); -unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); -unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, - unsigned char *buffer); -#else static unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, unsigned int VBFlags2); static unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); static unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer); -#endif static void SiS_SetSwitchDDC2(struct SiS_Private *SiS_Pr); static unsigned short SiS_SetStart(struct SiS_Private *SiS_Pr); static unsigned short SiS_SetStop(struct SiS_Private *SiS_Pr); @@ -441,13 +414,13 @@ static unsigned short SiS_PrepareDDC(struct SiS_Private *SiS_Pr); static void SiS_SendACK(struct SiS_Private *SiS_Pr, unsigned short yesno); static unsigned short SiS_DoProbeDDC(struct SiS_Private *SiS_Pr); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void SiS_OEM300Setting(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefTabindex); static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex,unsigned short RefTableIndex); #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_OEM310Setting(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, unsigned short RRTI); static void SiS_OEM661Setting(struct SiS_Private *SiS_Pr, @@ -482,15 +455,13 @@ extern void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short M extern void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); extern unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); extern unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 extern void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *tempbx, unsigned short *tempcl); extern unsigned short SiS_GetFIFOThresholdB300(unsigned short tempbx, unsigned short tempcl); extern unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); -#ifdef SIS_LINUX_KERNEL extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); extern unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg); #endif -#endif #endif diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/sis/initextlfb.c index 99c04a4855d1..9dec64da4015 100644 --- a/drivers/video/sis/initextlfb.c +++ b/drivers/video/sis/initextlfb.c @@ -25,7 +25,6 @@ * Author: Thomas Winischhofer <thomas@winischhofer.net> */ -#include "osdef.h" #include "initdef.h" #include "vgatypes.h" #include "vstruct.h" @@ -59,7 +58,7 @@ sisfb_mode_rate_to_dclock(struct SiS_Private *SiS_Pr, unsigned char modeno, if(rateindex > 0) rateindex--; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 switch(ModeNo) { case 0x5a: ModeNo = 0x50; break; case 0x5b: ModeNo = 0x56; @@ -103,7 +102,7 @@ sisfb_mode_rate_to_ddata(struct SiS_Private *SiS_Pr, unsigned char modeno, if(rateindex > 0) rateindex--; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 switch(ModeNo) { case 0x5a: ModeNo = 0x50; break; case 0x5b: ModeNo = 0x56; @@ -187,7 +186,7 @@ sisfb_gettotalfrommode(struct SiS_Private *SiS_Pr, unsigned char modeno, int *ht if(rateindex > 0) rateindex--; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 switch(ModeNo) { case 0x5a: ModeNo = 0x50; break; case 0x5b: ModeNo = 0x56; diff --git a/drivers/video/sis/osdef.h b/drivers/video/sis/osdef.h deleted file mode 100644 index 6ff8f988a1a7..000000000000 --- a/drivers/video/sis/osdef.h +++ /dev/null @@ -1,133 +0,0 @@ -/* $XFree86$ */ -/* $XdotOrg$ */ -/* - * OS depending defines - * - * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria - * - * If distributed as part of the Linux kernel, the following license terms - * apply: - * - * * This program is free software; you can redistribute it and/or modify - * * it under the terms of the GNU General Public License as published by - * * the Free Software Foundation; either version 2 of the named License, - * * or any later version. - * * - * * This program is distributed in the hope that it will be useful, - * * but WITHOUT ANY WARRANTY; without even the implied warranty of - * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * * GNU General Public License for more details. - * * - * * You should have received a copy of the GNU General Public License - * * along with this program; if not, write to the Free Software - * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA - * - * Otherwise, the following license terms apply: - * - * * Redistribution and use in source and binary forms, with or without - * * modification, are permitted provided that the following conditions - * * are met: - * * 1) Redistributions of source code must retain the above copyright - * * notice, this list of conditions and the following disclaimer. - * * 2) Redistributions in binary form must reproduce the above copyright - * * notice, this list of conditions and the following disclaimer in the - * * documentation and/or other materials provided with the distribution. - * * 3) The name of the author may not be used to endorse or promote products - * * derived from this software without specific prior written permission. - * * - * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Thomas Winischhofer <thomas@winischhofer.net> - * Silicon Integrated Systems, Inc. (used by permission) - * - */ - -#ifndef _SIS_OSDEF_H_ -#define _SIS_OSDEF_H_ - -/* The choices are: */ -#define SIS_LINUX_KERNEL /* Linux kernel framebuffer */ -#undef SIS_XORG_XF86 /* XFree86/X.org */ - -#ifdef OutPortByte -#undef OutPortByte -#endif - -#ifdef OutPortWord -#undef OutPortWord -#endif - -#ifdef OutPortLong -#undef OutPortLong -#endif - -#ifdef InPortByte -#undef InPortByte -#endif - -#ifdef InPortWord -#undef InPortWord -#endif - -#ifdef InPortLong -#undef InPortLong -#endif - -/**********************************************************************/ -/* LINUX KERNEL */ -/**********************************************************************/ - -#ifdef SIS_LINUX_KERNEL - -#ifdef CONFIG_FB_SIS_300 -#define SIS300 -#endif - -#ifdef CONFIG_FB_SIS_315 -#define SIS315H -#endif - -#if !defined(SIS300) && !defined(SIS315H) -#warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set -#warning sisfb will not work! -#endif - -#define OutPortByte(p,v) outb((u8)(v),(SISIOADDRESS)(p)) -#define OutPortWord(p,v) outw((u16)(v),(SISIOADDRESS)(p)) -#define OutPortLong(p,v) outl((u32)(v),(SISIOADDRESS)(p)) -#define InPortByte(p) inb((SISIOADDRESS)(p)) -#define InPortWord(p) inw((SISIOADDRESS)(p)) -#define InPortLong(p) inl((SISIOADDRESS)(p)) -#define SiS_SetMemory(MemoryAddress,MemorySize,value) memset_io(MemoryAddress, value, MemorySize) - -#endif /* LINUX_KERNEL */ - -/**********************************************************************/ -/* XFree86/X.org */ -/**********************************************************************/ - -#ifdef SIS_XORG_XF86 - -#define SIS300 -#define SIS315H - -#define OutPortByte(p,v) outSISREG((IOADDRESS)(p),(CARD8)(v)) -#define OutPortWord(p,v) outSISREGW((IOADDRESS)(p),(CARD16)(v)) -#define OutPortLong(p,v) outSISREGL((IOADDRESS)(p),(CARD32)(v)) -#define InPortByte(p) inSISREG((IOADDRESS)(p)) -#define InPortWord(p) inSISREGW((IOADDRESS)(p)) -#define InPortLong(p) inSISREGL((IOADDRESS)(p)) -#define SiS_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize) - -#endif /* XF86 */ - -#endif /* _OSDEF_H_ */ diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h index 7c5710e3fb56..80d89d37c414 100644 --- a/drivers/video/sis/sis.h +++ b/drivers/video/sis/sis.h @@ -24,7 +24,6 @@ #ifndef _SIS_H_ #define _SIS_H_ -#include "osdef.h" #include <video/sisfb.h> #include "vgatypes.h" diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 3dde12b0ab06..7e3370f115b6 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -60,6 +60,11 @@ #include "sis.h" #include "sis_main.h" +#if !defined(CONFIG_FB_SIS_300) && !defined(CONFIG_FB_SIS_315) +#warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set +#warning sisfb will not work! +#endif + static void sisfb_handle_command(struct sis_video_info *ivideo, struct sisfb_cmd *sisfb_command); @@ -4114,14 +4119,6 @@ sisfb_find_rom(struct pci_dev *pdev) if(sisfb_check_rom(rom_base, ivideo)) { if((myrombase = vmalloc(65536))) { - - /* Work around bug in pci/rom.c: Folks forgot to check - * whether the size retrieved from the BIOS image eventually - * is larger than the mapped size - */ - if(pci_resource_len(pdev, PCI_ROM_RESOURCE) < romsize) - romsize = pci_resource_len(pdev, PCI_ROM_RESOURCE); - memcpy_fromio(myrombase, rom_base, (romsize > 65536) ? 65536 : romsize); } @@ -4155,23 +4152,6 @@ sisfb_find_rom(struct pci_dev *pdev) } -#else - - pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &temp); - pci_write_config_dword(pdev, PCI_ROM_ADDRESS, - (ivideo->video_base & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE); - - rom_base = ioremap(ivideo->video_base, 65536); - if(rom_base) { - if(sisfb_check_rom(rom_base, ivideo)) { - if((myrombase = vmalloc(65536))) - memcpy_fromio(myrombase, rom_base, 65536); - } - iounmap(rom_base); - } - - pci_write_config_dword(pdev, PCI_ROM_ADDRESS, temp); - #endif return myrombase; diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/sis/vgatypes.h index 81a22eaabfde..12c0dfaf2518 100644 --- a/drivers/video/sis/vgatypes.h +++ b/drivers/video/sis/vgatypes.h @@ -55,21 +55,10 @@ #define SISIOMEMTYPE -#ifdef SIS_LINUX_KERNEL typedef unsigned long SISIOADDRESS; #include <linux/types.h> /* Need __iomem */ #undef SISIOMEMTYPE #define SISIOMEMTYPE __iomem -#endif - -#ifdef SIS_XORG_XF86 -#if XF86_VERSION_CURRENT < XF86_VERSION_NUMERIC(4,2,0,0,0) -typedef unsigned long IOADDRESS; -typedef unsigned long SISIOADDRESS; -#else -typedef IOADDRESS SISIOADDRESS; -#endif -#endif typedef enum _SIS_CHIP_TYPE { SIS_VGALegacy = 0, diff --git a/drivers/video/sis/vstruct.h b/drivers/video/sis/vstruct.h index bef4aae388d0..ea94d214dcff 100644 --- a/drivers/video/sis/vstruct.h +++ b/drivers/video/sis/vstruct.h @@ -233,24 +233,15 @@ struct SiS_Private { unsigned char ChipType; unsigned char ChipRevision; -#ifdef SIS_XORG_XF86 - PCITAG PciTag; -#endif -#ifdef SIS_LINUX_KERNEL void *ivideo; -#endif unsigned char *VirtualRomBase; bool UseROM; -#ifdef SIS_LINUX_KERNEL unsigned char SISIOMEMTYPE *VideoMemoryAddress; unsigned int VideoMemorySize; -#endif SISIOADDRESS IOAddress; SISIOADDRESS IOAddress2; /* For dual chip XGI volari */ -#ifdef SIS_LINUX_KERNEL SISIOADDRESS RelIO; -#endif SISIOADDRESS SiS_P3c4; SISIOADDRESS SiS_P3d4; SISIOADDRESS SiS_P3c0; @@ -280,9 +271,6 @@ struct SiS_Private unsigned short SiS_IF_DEF_FSTN; unsigned short SiS_SysFlags; unsigned char SiS_VGAINFO; -#ifdef SIS_XORG_XF86 - unsigned short SiS_CP1, SiS_CP2, SiS_CP3, SiS_CP4; -#endif bool SiS_UseROM; bool SiS_ROMNew; bool SiS_XGIROM; diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 3a43ebf83a49..efb35aa8309a 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -9,19 +9,19 @@ static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - return sprintf(buf, "%hu", dev->id.device); + return sprintf(buf, "0x%04x\n", dev->id.device); } static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - return sprintf(buf, "%hu", dev->id.vendor); + return sprintf(buf, "0x%04x\n", dev->id.vendor); } static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - return sprintf(buf, "0x%08x", dev->config->get_status(dev)); + return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 1475ed6b575f..cc2f73e03475 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -230,9 +230,6 @@ add_head: pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); - /* If we're indirect, we can fit many (assuming not OOM). */ - if (vq->indirect) - return vq->num_free ? vq->vring.num : 0; return vq->num_free; } EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 4a291045ebac..a5ad77ef4266 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -558,6 +558,9 @@ config IT8712F_WDT This is the driver for the built-in watchdog timer on the IT8712F Super I/0 chipset used on many motherboards. + If the driver does not work, then make sure that the game port in + the BIOS is enabled. + To compile this driver as a module, choose M here: the module will be called it8712f_wdt. diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c index a1debc89356b..3c5045a206dd 100644 --- a/drivers/watchdog/bcm63xx_wdt.c +++ b/drivers/watchdog/bcm63xx_wdt.c @@ -18,7 +18,6 @@ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/reboot.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/watchdog.h> @@ -220,14 +219,6 @@ static long bcm63xx_wdt_ioctl(struct file *file, unsigned int cmd, } } -static int bcm63xx_wdt_notify_sys(struct notifier_block *this, - unsigned long code, void *unused) -{ - if (code == SYS_DOWN || code == SYS_HALT) - bcm63xx_wdt_pause(); - return NOTIFY_DONE; -} - static const struct file_operations bcm63xx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, @@ -243,12 +234,8 @@ static struct miscdevice bcm63xx_wdt_miscdev = { .fops = &bcm63xx_wdt_fops, }; -static struct notifier_block bcm63xx_wdt_notifier = { - .notifier_call = bcm63xx_wdt_notify_sys, -}; - -static int bcm63xx_wdt_probe(struct platform_device *pdev) +static int __devinit bcm63xx_wdt_probe(struct platform_device *pdev) { int ret; struct resource *r; @@ -280,16 +267,10 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev) wdt_time); } - ret = register_reboot_notifier(&bcm63xx_wdt_notifier); - if (ret) { - dev_err(&pdev->dev, "failed to register reboot_notifier\n"); - goto unregister_timer; - } - ret = misc_register(&bcm63xx_wdt_miscdev); if (ret < 0) { dev_err(&pdev->dev, "failed to register watchdog device\n"); - goto unregister_reboot_notifier; + goto unregister_timer; } dev_info(&pdev->dev, " started, timer margin: %d sec\n", @@ -297,8 +278,6 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev) return 0; -unregister_reboot_notifier: - unregister_reboot_notifier(&bcm63xx_wdt_notifier); unregister_timer: bcm63xx_timer_unregister(TIMER_WDT_ID); unmap: @@ -306,25 +285,28 @@ unmap: return ret; } -static int bcm63xx_wdt_remove(struct platform_device *pdev) +static int __devexit bcm63xx_wdt_remove(struct platform_device *pdev) { if (!nowayout) bcm63xx_wdt_pause(); misc_deregister(&bcm63xx_wdt_miscdev); - - iounmap(bcm63xx_wdt_device.regs); - - unregister_reboot_notifier(&bcm63xx_wdt_notifier); bcm63xx_timer_unregister(TIMER_WDT_ID); - + iounmap(bcm63xx_wdt_device.regs); return 0; } +static void bcm63xx_wdt_shutdown(struct platform_device *pdev) +{ + bcm63xx_wdt_pause(); +} + static struct platform_driver bcm63xx_wdt = { .probe = bcm63xx_wdt_probe, - .remove = bcm63xx_wdt_remove, + .remove = __devexit_p(bcm63xx_wdt_remove), + .shutdown = bcm63xx_wdt_shutdown, .driver = { + .owner = THIS_MODULE, .name = "bcm63xx-wdt", } }; diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index 9c21d19043a6..f6bd6f10fcec 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> +#include <linux/fs.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/io.h> diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index f7e90fe47b71..b8838d2c67a6 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -32,6 +32,7 @@ * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH) * document number 320066-003, 320257-008: EP80597 (IICH) * document number TBD : Cougar Point (CPT) + * document number TBD : Patsburg (PBG) */ /* @@ -146,7 +147,8 @@ enum iTCO_chipsets { TCO_CPT29, /* Cougar Point */ TCO_CPT30, /* Cougar Point */ TCO_CPT31, /* Cougar Point */ - TCO_PBG, /* Patsburg */ + TCO_PBG1, /* Patsburg */ + TCO_PBG2, /* Patsburg */ }; static struct { @@ -235,6 +237,7 @@ static struct { {"Cougar Point", 2}, {"Cougar Point", 2}, {"Patsburg", 2}, + {"Patsburg", 2}, {NULL, 0} }; @@ -350,7 +353,8 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)}, { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)}, { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)}, - { ITCO_PCI_DEVICE(0x1d40, TCO_PBG)}, + { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)}, + { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)}, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index eb8a78d77d9d..533a199e7a3f 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -8,9 +8,12 @@ obj-$(CONFIG_BLOCK) += biomerge.o obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_XEN_XENCOMM) += xencomm.o obj-$(CONFIG_XEN_BALLOON) += balloon.o -obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o +obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o obj-$(CONFIG_XEN_DOM0) += pci.o + +xen-evtchn-y := evtchn.o + diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 500290b150bb..43f9f02c7db0 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -50,6 +50,7 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/tlb.h> +#include <asm/e820.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> @@ -119,7 +120,7 @@ static void scrub_page(struct page *page) } /* balloon_append: add the given page to the balloon. */ -static void balloon_append(struct page *page) +static void __balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { @@ -130,7 +131,11 @@ static void balloon_append(struct page *page) list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; } +} +static void balloon_append(struct page *page) +{ + __balloon_append(page); totalram_pages--; } @@ -191,7 +196,7 @@ static unsigned long current_target(void) static int increase_reservation(unsigned long nr_pages) { - unsigned long pfn, i, flags; + unsigned long pfn, i; struct page *page; long rc; struct xen_memory_reservation reservation = { @@ -203,8 +208,6 @@ static int increase_reservation(unsigned long nr_pages) if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); - spin_lock_irqsave(&xen_reservation_lock, flags); - page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); @@ -247,14 +250,12 @@ static int increase_reservation(unsigned long nr_pages) balloon_stats.current_pages += rc; out: - spin_unlock_irqrestore(&xen_reservation_lock, flags); - return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { - unsigned long pfn, i, flags; + unsigned long pfn, i; struct page *page; int need_sleep = 0; int ret; @@ -292,8 +293,6 @@ static int decrease_reservation(unsigned long nr_pages) kmap_flush_unused(); flush_tlb_all(); - spin_lock_irqsave(&xen_reservation_lock, flags); - /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); @@ -308,8 +307,6 @@ static int decrease_reservation(unsigned long nr_pages) balloon_stats.current_pages -= nr_pages; - spin_unlock_irqrestore(&xen_reservation_lock, flags); - return need_sleep; } @@ -395,7 +392,7 @@ static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { - unsigned long pfn; + unsigned long pfn, extra_pfn_end; struct page *page; if (!xen_pv_domain()) @@ -415,11 +412,24 @@ static int __init balloon_init(void) register_balloon(&balloon_sysdev); - /* Initialise the balloon with excess memory space. */ - for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { + /* + * Initialise the balloon with excess memory space. We need + * to make sure we don't add memory which doesn't exist or + * logically exist. The E820 map can be trimmed to be smaller + * than the amount of physical memory due to the mem= command + * line parameter. And if this is a 32-bit non-HIGHMEM kernel + * on a system with memory which requires highmem to access, + * don't try to use it. + */ + extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()), + (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size)); + for (pfn = PFN_UP(xen_extra_mem_start); + pfn < extra_pfn_end; + pfn++) { page = pfn_to_page(pfn); - if (!PageReserved(page)) - balloon_append(page); + /* totalram_pages doesn't include the boot-time + balloon extension, so don't subtract from it. */ + __balloon_append(page); } target_watch.callback = watch_target; diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 321a0c8346e5..31af0ac31a98 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -105,7 +105,6 @@ struct irq_info static struct irq_info *irq_info; static int *pirq_to_irq; -static int nr_pirqs; static int *evtchn_to_irq; struct cpu_evtchn_s { @@ -278,17 +277,17 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); #endif - __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); - __set_bit(chn, cpu_evtchn_mask(cpu)); + clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); + set_bit(chn, cpu_evtchn_mask(cpu)); irq_info[irq].cpu = cpu; } static void init_evtchn_cpu_bindings(void) { + int i; #ifdef CONFIG_SMP struct irq_desc *desc; - int i; /* By default all event channels notify CPU#0. */ for_each_irq_desc(i, desc) { @@ -296,7 +295,10 @@ static void init_evtchn_cpu_bindings(void) } #endif - memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s)); + for_each_possible_cpu(i) + memset(cpu_evtchn_mask(i), + (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s)); + } static inline void clear_evtchn(int port) @@ -382,12 +384,17 @@ static int get_nr_hw_irqs(void) return ret; } -/* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs - * succeeded otherwise nr_pirqs won't hold the right value */ -static int find_unbound_pirq(void) +static int find_unbound_pirq(int type) { - int i; - for (i = nr_pirqs-1; i >= 0; i--) { + int rc, i; + struct physdev_get_free_pirq op_get_free_pirq; + op_get_free_pirq.type = type; + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); + if (!rc) + return op_get_free_pirq.pirq; + + for (i = 0; i < nr_irqs; i++) { if (pirq_to_irq[i] < 0) return i; } @@ -420,7 +427,7 @@ static int find_unbound_irq(void) if (irq == start) goto no_irqs; - res = irq_alloc_desc_at(irq, 0); + res = irq_alloc_desc_at(irq, -1); if (WARN_ON(res != irq)) return -1; @@ -608,10 +615,10 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) spin_lock(&irq_mapping_update_lock); - if ((pirq > nr_pirqs) || (gsi > nr_irqs)) { + if ((pirq > nr_irqs) || (gsi > nr_irqs)) { printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n", - pirq > nr_pirqs ? "nr_pirqs" :"", - gsi > nr_irqs ? "nr_irqs" : ""); + pirq > nr_irqs ? "pirq" :"", + gsi > nr_irqs ? "gsi" : ""); goto out; } @@ -627,7 +634,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) if (identity_mapped_irq(gsi) || (!xen_initial_domain() && xen_pv_domain())) { irq = gsi; - irq_alloc_desc_at(irq, 0); + irq_alloc_desc_at(irq, -1); } else irq = find_unbound_irq(); @@ -661,17 +668,21 @@ out: #include <linux/msi.h> #include "../pci/msi.h" -void xen_allocate_pirq_msi(char *name, int *irq, int *pirq) +void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) { spin_lock(&irq_mapping_update_lock); - *irq = find_unbound_irq(); - if (*irq == -1) - goto out; + if (alloc & XEN_ALLOC_IRQ) { + *irq = find_unbound_irq(); + if (*irq == -1) + goto out; + } - *pirq = find_unbound_pirq(); - if (*pirq == -1) - goto out; + if (alloc & XEN_ALLOC_PIRQ) { + *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI); + if (*pirq == -1) + goto out; + } set_irq_chip_and_handler_name(*irq, &xen_pirq_chip, handle_level_irq, name); @@ -752,13 +763,14 @@ int xen_destroy_irq(int irq) goto out; if (xen_initial_domain()) { - unmap_irq.pirq = info->u.pirq.gsi; + unmap_irq.pirq = info->u.pirq.pirq; unmap_irq.domid = DOMID_SELF; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); if (rc) { printk(KERN_WARNING "unmap irq failed %d\n", rc); goto out; } + pirq_to_irq[info->u.pirq.pirq] = -1; } irq_info[irq] = mk_unbound_info(); @@ -779,6 +791,11 @@ int xen_gsi_from_irq(unsigned irq) return gsi_from_irq(irq); } +int xen_irq_from_pirq(unsigned pirq) +{ + return pirq_to_irq[pirq]; +} + int bind_evtchn_to_irq(unsigned int evtchn) { int irq; @@ -1276,6 +1293,42 @@ static int retrigger_dynirq(unsigned int irq) return ret; } +static void restore_cpu_pirqs(void) +{ + int pirq, rc, irq, gsi; + struct physdev_map_pirq map_irq; + + for (pirq = 0; pirq < nr_irqs; pirq++) { + irq = pirq_to_irq[pirq]; + if (irq == -1) + continue; + + /* save/restore of PT devices doesn't work, so at this point the + * only devices present are GSI based emulated devices */ + gsi = gsi_from_irq(irq); + if (!gsi) + continue; + + map_irq.domid = DOMID_SELF; + map_irq.type = MAP_PIRQ_TYPE_GSI; + map_irq.index = gsi; + map_irq.pirq = pirq; + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); + if (rc) { + printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", + gsi, irq, pirq, rc); + irq_info[irq] = mk_unbound_info(); + pirq_to_irq[pirq] = -1; + continue; + } + + printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); + + startup_pirq(irq); + } +} + static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; @@ -1419,6 +1472,8 @@ void xen_irq_resume(void) unmask_evtchn(evtchn); } + + restore_cpu_pirqs(); } static struct irq_chip xen_dynamic_chip __read_mostly = { @@ -1503,26 +1558,17 @@ void xen_callback_vector(void) {} void __init xen_init_IRQ(void) { - int i, rc; - struct physdev_nr_pirqs op_nr_pirqs; + int i; cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), GFP_KERNEL); irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); - rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs, &op_nr_pirqs); - if (rc < 0) { - nr_pirqs = nr_irqs; - if (rc != -ENOSYS) - printk(KERN_WARNING "PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc); - } else { - if (xen_pv_domain() && !xen_initial_domain()) - nr_pirqs = max((int)op_nr_pirqs.nr_pirqs, nr_irqs); - else - nr_pirqs = op_nr_pirqs.nr_pirqs; - } - pirq_to_irq = kcalloc(nr_pirqs, sizeof(*pirq_to_irq), GFP_KERNEL); - for (i = 0; i < nr_pirqs; i++) + /* We are using nr_irqs as the maximum number of pirq available but + * that number is actually chosen by Xen and we don't know exactly + * what it is. Be careful choosing high pirq numbers. */ + pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL); + for (i = 0; i < nr_irqs; i++) pirq_to_irq[i] = -1; evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index fec6ba3c08a8..ef11daf0cafe 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -69,20 +69,51 @@ struct per_user_data { const char *name; }; -/* Who's bound to each port? */ -static struct per_user_data *port_user[NR_EVENT_CHANNELS]; +/* + * Who's bound to each port? This is logically an array of struct + * per_user_data *, but we encode the current enabled-state in bit 0. + */ +static unsigned long *port_user; static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ -irqreturn_t evtchn_interrupt(int irq, void *data) +static inline struct per_user_data *get_port_user(unsigned port) +{ + return (struct per_user_data *)(port_user[port] & ~1); +} + +static inline void set_port_user(unsigned port, struct per_user_data *u) +{ + port_user[port] = (unsigned long)u; +} + +static inline bool get_port_enabled(unsigned port) +{ + return port_user[port] & 1; +} + +static inline void set_port_enabled(unsigned port, bool enabled) +{ + if (enabled) + port_user[port] |= 1; + else + port_user[port] &= ~1; +} + +static irqreturn_t evtchn_interrupt(int irq, void *data) { unsigned int port = (unsigned long)data; struct per_user_data *u; spin_lock(&port_user_lock); - u = port_user[port]; + u = get_port_user(port); + + WARN(!get_port_enabled(port), + "Interrupt for port %d, but apparently not enabled; per-user %p\n", + port, u); disable_irq_nosync(irq); + set_port_enabled(port, false); if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; @@ -92,9 +123,8 @@ irqreturn_t evtchn_interrupt(int irq, void *data) kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN); } - } else { + } else u->ring_overflow = 1; - } spin_unlock(&port_user_lock); @@ -198,9 +228,18 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, goto out; spin_lock_irq(&port_user_lock); - for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) - if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) - enable_irq(irq_from_evtchn(kbuf[i])); + + for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { + unsigned port = kbuf[i]; + + if (port < NR_EVENT_CHANNELS && + get_port_user(port) == u && + !get_port_enabled(port)) { + set_port_enabled(port, true); + enable_irq(irq_from_evtchn(port)); + } + } + spin_unlock_irq(&port_user_lock); rc = count; @@ -222,8 +261,9 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) * interrupt handler yet, and our caller has already * serialized bind operations.) */ - BUG_ON(port_user[port] != NULL); - port_user[port] = u; + BUG_ON(get_port_user(port) != NULL); + set_port_user(port, u); + set_port_enabled(port, true); /* start enabled */ rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, u->name, (void *)(unsigned long)port); @@ -239,10 +279,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) unbind_from_irqhandler(irq, (void *)(unsigned long)port); - /* make sure we unbind the irq handler before clearing the port */ - barrier(); - - port_user[port] = NULL; + set_port_user(port, NULL); } static long evtchn_ioctl(struct file *file, @@ -333,15 +370,17 @@ static long evtchn_ioctl(struct file *file, spin_lock_irq(&port_user_lock); rc = -ENOTCONN; - if (port_user[unbind.port] != u) { + if (get_port_user(unbind.port) != u) { spin_unlock_irq(&port_user_lock); break; } - evtchn_unbind_from_user(u, unbind.port); + disable_irq(irq_from_evtchn(unbind.port)); spin_unlock_irq(&port_user_lock); + evtchn_unbind_from_user(u, unbind.port); + rc = 0; break; } @@ -355,7 +394,7 @@ static long evtchn_ioctl(struct file *file, if (notify.port >= NR_EVENT_CHANNELS) { rc = -EINVAL; - } else if (port_user[notify.port] != u) { + } else if (get_port_user(notify.port) != u) { rc = -ENOTCONN; } else { notify_remote_via_evtchn(notify.port); @@ -431,7 +470,7 @@ static int evtchn_open(struct inode *inode, struct file *filp) filp->private_data = u; - return 0; + return nonseekable_open(inode, filp);; } static int evtchn_release(struct inode *inode, struct file *filp) @@ -444,14 +483,21 @@ static int evtchn_release(struct inode *inode, struct file *filp) free_page((unsigned long)u->ring); for (i = 0; i < NR_EVENT_CHANNELS; i++) { - if (port_user[i] != u) + if (get_port_user(i) != u) continue; - evtchn_unbind_from_user(port_user[i], i); + disable_irq(irq_from_evtchn(i)); } spin_unlock_irq(&port_user_lock); + for (i = 0; i < NR_EVENT_CHANNELS; i++) { + if (get_port_user(i) != u) + continue; + + evtchn_unbind_from_user(get_port_user(i), i); + } + kfree(u->name); kfree(u); @@ -467,12 +513,12 @@ static const struct file_operations evtchn_fops = { .fasync = evtchn_fasync, .open = evtchn_open, .release = evtchn_release, - .llseek = noop_llseek, + .llseek = no_llseek, }; static struct miscdevice evtchn_miscdev = { .minor = MISC_DYNAMIC_MINOR, - .name = "evtchn", + .name = "xen/evtchn", .fops = &evtchn_fops, }; static int __init evtchn_init(void) @@ -482,8 +528,11 @@ static int __init evtchn_init(void) if (!xen_domain()) return -ENODEV; + port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL); + if (port_user == NULL) + return -ENOMEM; + spin_lock_init(&port_user_lock); - memset(port_user, 0, sizeof(port_user)); /* Create '/dev/misc/evtchn'. */ err = misc_register(&evtchn_miscdev); @@ -499,6 +548,9 @@ static int __init evtchn_init(void) static void __exit evtchn_cleanup(void) { + kfree(port_user); + port_user = NULL; + misc_deregister(&evtchn_miscdev); } diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index ef9c7db52077..db8c4c4ac880 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -49,6 +49,7 @@ static int xen_hvm_suspend(void *data) if (!*cancelled) { xen_irq_resume(); + xen_console_resume(); xen_timer_resume(); } diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c index 0f5d4162b22d..dbd3b16fd131 100644 --- a/drivers/xen/xenfs/privcmd.c +++ b/drivers/xen/xenfs/privcmd.c @@ -265,9 +265,7 @@ static int mmap_return_errors(void *data, void *state) xen_pfn_t *mfnp = data; struct mmap_batch_state *st = state; - put_user(*mfnp, st->user++); - - return 0; + return put_user(*mfnp, st->user++); } static struct vm_operations_struct privcmd_vm_ops; @@ -322,10 +320,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) up_write(&mm->mmap_sem); if (state.err > 0) { - ret = 0; - state.user = m.arr; - traverse_pages(m.num, sizeof(xen_pfn_t), + ret = traverse_pages(m.num, sizeof(xen_pfn_t), &pagelist, mmap_return_errors, &state); } @@ -383,8 +379,9 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) if (xen_feature(XENFEAT_auto_translated_physmap)) return -ENOSYS; - /* DONTCOPY is essential for Xen as copy_page_range is broken. */ - vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; + /* DONTCOPY is essential for Xen because copy_page_range doesn't know + * how to recreate these mappings */ + vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP; vma->vm_ops = &privcmd_vm_ops; vma->vm_private_data = NULL; diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index f6339d11d59c..1aa389719846 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c @@ -12,8 +12,6 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/magic.h> -#include <linux/mm.h> -#include <linux/backing-dev.h> #include <xen/xen.h> @@ -24,28 +22,12 @@ MODULE_DESCRIPTION("Xen filesystem"); MODULE_LICENSE("GPL"); -static int xenfs_set_page_dirty(struct page *page) -{ - return !TestSetPageDirty(page); -} - -static const struct address_space_operations xenfs_aops = { - .set_page_dirty = xenfs_set_page_dirty, -}; - -static struct backing_dev_info xenfs_backing_dev_info = { - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, -}; - static struct inode *xenfs_make_inode(struct super_block *sb, int mode) { struct inode *ret = new_inode(sb); if (ret) { ret->i_mode = mode; - ret->i_mapping->a_ops = &xenfs_aops; - ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info; ret->i_uid = ret->i_gid = 0; ret->i_blocks = 0; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; @@ -121,9 +103,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent) return rc; } -static int xenfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data) +static struct dentry *xenfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) { return mount_single(fs_type, flags, data, xenfs_fill_super); } @@ -137,25 +119,11 @@ static struct file_system_type xenfs_type = { static int __init xenfs_init(void) { - int err; - if (!xen_domain()) { - printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n"); - return 0; - } - - err = register_filesystem(&xenfs_type); - if (err) { - printk(KERN_ERR "xenfs: Unable to register filesystem!\n"); - goto out; - } - - err = bdi_init(&xenfs_backing_dev_info); - if (err) - unregister_filesystem(&xenfs_type); - - out: + if (xen_domain()) + return register_filesystem(&xenfs_type); - return err; + printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n"); + return 0; } static void __exit xenfs_exit(void) |