diff options
Diffstat (limited to 'Documentation')
-rw-r--r-- | Documentation/DocBook/kgdb.tmpl | 8 | ||||
-rw-r--r-- | Documentation/filesystems/Locking | 2 | ||||
-rw-r--r-- | Documentation/filesystems/vfs.txt | 4 | ||||
-rw-r--r-- | Documentation/hwmon/w83l785ts | 3 | ||||
-rw-r--r-- | Documentation/kbuild/kconfig-language.txt | 24 | ||||
-rw-r--r-- | Documentation/kdump/kdump.txt | 5 | ||||
-rw-r--r-- | Documentation/kernel-parameters.txt | 5 | ||||
-rw-r--r-- | Documentation/lguest/lguest.c | 62 | ||||
-rw-r--r-- | Documentation/powerpc/mpc52xx-device-tree-bindings.txt | 11 | ||||
-rw-r--r-- | Documentation/s390/CommonIO | 11 | ||||
-rw-r--r-- | Documentation/scheduler/sched-design.txt | 165 | ||||
-rw-r--r-- | Documentation/scsi/ChangeLog.megaraid_sas | 22 | ||||
-rw-r--r-- | Documentation/vm/slabinfo.c | 10 |
13 files changed, 93 insertions, 239 deletions
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl index 97618bed4d65..028a8444d95e 100644 --- a/Documentation/DocBook/kgdb.tmpl +++ b/Documentation/DocBook/kgdb.tmpl @@ -72,7 +72,7 @@ kgdb is a source level debugger for linux kernel. It is used along with gdb to debug a linux kernel. The expectation is that gdb can be used to "break in" to the kernel to inspect memory, variables - and look through a cal stack information similar to what an + and look through call stack information similar to what an application developer would use gdb for. It is possible to place breakpoints in kernel code and perform some limited execution stepping. @@ -93,8 +93,10 @@ <chapter id="CompilingAKernel"> <title>Compiling a kernel</title> <para> - To enable <symbol>CONFIG_KGDB</symbol>, look under the "Kernel debugging" - and then select "KGDB: kernel debugging with remote gdb". + To enable <symbol>CONFIG_KGDB</symbol> you should first turn on + "Prompt for development and/or incomplete code/drivers" + (CONFIG_EXPERIMENTAL) in "General setup", then under the + "Kernel debugging" select "KGDB: kernel debugging with remote gdb". </para> <para> Next you should choose one of more I/O drivers to interconnect debugging diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index c2992bc54f2f..8b22d7d8b991 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -92,7 +92,6 @@ prototypes: void (*destroy_inode)(struct inode *); void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, int); - void (*put_inode) (struct inode *); void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); @@ -115,7 +114,6 @@ alloc_inode: no no no destroy_inode: no dirty_inode: no (must not sleep) write_inode: no -put_inode: no drop_inode: no !!!inode_lock!!! delete_inode: no put_super: yes yes no diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 81e5be6e6e35..b7522c6cbae3 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -205,7 +205,6 @@ struct super_operations { void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, int); - void (*put_inode) (struct inode *); void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); @@ -246,9 +245,6 @@ or bottom half). inode to disc. The second parameter indicates whether the write should be synchronous or not, not all filesystems check this flag. - put_inode: called when the VFS inode is removed from the inode - cache. - drop_inode: called when the last access to the inode is dropped, with the inode_lock spinlock held. diff --git a/Documentation/hwmon/w83l785ts b/Documentation/hwmon/w83l785ts index 1841cedc25b2..bd1fa9d4468d 100644 --- a/Documentation/hwmon/w83l785ts +++ b/Documentation/hwmon/w83l785ts @@ -33,7 +33,8 @@ Known Issues ------------ On some systems (Asus), the BIOS is known to interfere with the driver -and cause read errors. The driver will retry a given number of times +and cause read errors. Or maybe the W83L785TS-S chip is simply unreliable, +we don't really know. The driver will retry a given number of times (5 by default) and then give up, returning the old value (or 0 if there is no old value). It seems to work well enough so that you should not notice anything. Thanks to James Bolt for helping test this feature. diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt index 00b950d1c193..c412c245848f 100644 --- a/Documentation/kbuild/kconfig-language.txt +++ b/Documentation/kbuild/kconfig-language.txt @@ -377,27 +377,3 @@ config FOO limits FOO to module (=m) or disabled (=n). - -Build limited by a third config symbol which may be =y or =m -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A common idiom that we see (and sometimes have problems with) is this: - -When option C in B (module or subsystem) uses interfaces from A (module -or subsystem), and both A and B are tristate (could be =y or =m if they -were independent of each other, but they aren't), then we need to limit -C such that it cannot be built statically if A is built as a loadable -module. (C already depends on B, so there is no dependency issue to -take care of here.) - -If A is linked statically into the kernel image, C can be built -statically or as loadable module(s). However, if A is built as loadable -module(s), then C must be restricted to loadable module(s) also. This -can be expressed in kconfig language as: - -config C - depends on A = y || A = B - -or for real examples, use this command in a kernel tree: - -$ find . -name Kconfig\* | xargs grep -ns "depends on.*=.*||.*=" | grep -v orig - diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt index d0ac72cc19ff..b8e52c0355d3 100644 --- a/Documentation/kdump/kdump.txt +++ b/Documentation/kdump/kdump.txt @@ -245,6 +245,8 @@ The syntax is: crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset] range=start-[end] + 'start' is inclusive and 'end' is exclusive. + For example: crashkernel=512M-2G:64M,2G-:128M @@ -253,10 +255,11 @@ This would mean: 1) if the RAM is smaller than 512M, then don't reserve anything (this is the "rescue" case) - 2) if the RAM size is between 512M and 2G, then reserve 64M + 2) if the RAM size is between 512M and 2G (exclusive), then reserve 64M 3) if the RAM size is larger than 2G, then reserve 128M + Boot into System Kernel ======================= diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a3c35446e755..cdd5b934f43e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1094,9 +1094,6 @@ and is between 256 and 4096 characters. It is defined in the file mac5380= [HW,SCSI] Format: <can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> - mac53c9x= [HW,SCSI] Format: - <num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> - machvec= [IA64] Force the use of a particular machine-vector (machvec) in a generic kernel. Example: machvec=hpzx1_swiotlb @@ -1525,6 +1522,8 @@ and is between 256 and 4096 characters. It is defined in the file This is normally done in pci_enable_device(), so this option is a temporary workaround for broken drivers that don't call it. + skip_isa_align [X86] do not align io start addr, so can + handle more pci cards firmware [ARM] Do not re-enumerate the bus but instead just use the configuration from the bootloader. This is currently used on diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 4c1fc65a8b3d..3be8ab2a886a 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c @@ -131,6 +131,9 @@ struct device /* Any queues attached to this device */ struct virtqueue *vq; + /* Handle status being finalized (ie. feature bits stable). */ + void (*ready)(struct device *me); + /* Device-specific data. */ void *priv; }; @@ -925,24 +928,40 @@ static void enable_fd(int fd, struct virtqueue *vq) write(waker_fd, &vq->dev->fd, sizeof(vq->dev->fd)); } -/* When the Guest asks us to reset a device, it's is fairly easy. */ -static void reset_device(struct device *dev) +/* When the Guest tells us they updated the status field, we handle it. */ +static void update_device_status(struct device *dev) { struct virtqueue *vq; - verbose("Resetting device %s\n", dev->name); - /* Clear the status. */ - dev->desc->status = 0; + /* This is a reset. */ + if (dev->desc->status == 0) { + verbose("Resetting device %s\n", dev->name); - /* Clear any features they've acked. */ - memset(get_feature_bits(dev) + dev->desc->feature_len, 0, - dev->desc->feature_len); + /* Clear any features they've acked. */ + memset(get_feature_bits(dev) + dev->desc->feature_len, 0, + dev->desc->feature_len); - /* Zero out the virtqueues. */ - for (vq = dev->vq; vq; vq = vq->next) { - memset(vq->vring.desc, 0, - vring_size(vq->config.num, getpagesize())); - vq->last_avail_idx = 0; + /* Zero out the virtqueues. */ + for (vq = dev->vq; vq; vq = vq->next) { + memset(vq->vring.desc, 0, + vring_size(vq->config.num, getpagesize())); + vq->last_avail_idx = 0; + } + } else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { + warnx("Device %s configuration FAILED", dev->name); + } else if (dev->desc->status & VIRTIO_CONFIG_S_DRIVER_OK) { + unsigned int i; + + verbose("Device %s OK: offered", dev->name); + for (i = 0; i < dev->desc->feature_len; i++) + verbose(" %08x", get_feature_bits(dev)[i]); + verbose(", accepted"); + for (i = 0; i < dev->desc->feature_len; i++) + verbose(" %08x", get_feature_bits(dev) + [dev->desc->feature_len+i]); + + if (dev->ready) + dev->ready(dev); } } @@ -954,9 +973,9 @@ static void handle_output(int fd, unsigned long addr) /* Check each device and virtqueue. */ for (i = devices.dev; i; i = i->next) { - /* Notifications to device descriptors reset the device. */ + /* Notifications to device descriptors update device status. */ if (from_guest_phys(addr) == i->desc) { - reset_device(i); + update_device_status(i); return; } @@ -1170,6 +1189,7 @@ static struct device *new_device(const char *name, u16 type, int fd, dev->handle_input = handle_input; dev->name = name; dev->vq = NULL; + dev->ready = NULL; /* Append to device list. Prepending to a single-linked list is * easier, but the user expects the devices to be arranged on the bus @@ -1398,7 +1418,7 @@ static bool service_io(struct device *dev) struct vblk_info *vblk = dev->priv; unsigned int head, out_num, in_num, wlen; int ret; - struct virtio_blk_inhdr *in; + u8 *in; struct virtio_blk_outhdr *out; struct iovec iov[dev->vq->vring.num]; off64_t off; @@ -1416,7 +1436,7 @@ static bool service_io(struct device *dev) head, out_num, in_num); out = convert(&iov[0], struct virtio_blk_outhdr); - in = convert(&iov[out_num+in_num-1], struct virtio_blk_inhdr); + in = convert(&iov[out_num+in_num-1], u8); off = out->sector * 512; /* The block device implements "barriers", where the Guest indicates @@ -1430,7 +1450,7 @@ static bool service_io(struct device *dev) * It'd be nice if we supported eject, for example, but we don't. */ if (out->type & VIRTIO_BLK_T_SCSI_CMD) { fprintf(stderr, "Scsi commands unsupported\n"); - in->status = VIRTIO_BLK_S_UNSUPP; + *in = VIRTIO_BLK_S_UNSUPP; wlen = sizeof(*in); } else if (out->type & VIRTIO_BLK_T_OUT) { /* Write */ @@ -1453,7 +1473,7 @@ static bool service_io(struct device *dev) errx(1, "Write past end %llu+%u", off, ret); } wlen = sizeof(*in); - in->status = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); + *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); } else { /* Read */ @@ -1466,10 +1486,10 @@ static bool service_io(struct device *dev) verbose("READ from sector %llu: %i\n", out->sector, ret); if (ret >= 0) { wlen = sizeof(*in) + ret; - in->status = VIRTIO_BLK_S_OK; + *in = VIRTIO_BLK_S_OK; } else { wlen = sizeof(*in); - in->status = VIRTIO_BLK_S_IOERR; + *in = VIRTIO_BLK_S_IOERR; } } diff --git a/Documentation/powerpc/mpc52xx-device-tree-bindings.txt b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt index cda7a7dffa6d..6f12f1c79c0c 100644 --- a/Documentation/powerpc/mpc52xx-device-tree-bindings.txt +++ b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt @@ -237,6 +237,17 @@ Each GPIO controller node should have the empty property gpio-controller and according to the bit numbers in the GPIO control registers. The second cell is for flags which is currently unsused. +8) FEC nodes +The FEC node can specify one of the following properties to configure +the MII link: +"fsl,7-wire-mode" - An empty property that specifies the link uses 7-wire + mode instead of MII +"current-speed" - Specifies that the MII should be configured for a fixed + speed. This property should contain two cells. The + first cell specifies the speed in Mbps and the second + should be '0' for half duplex and '1' for full duplex +"phy-handle" - Contains a phandle to an Ethernet PHY. + IV - Extra Notes ================ diff --git a/Documentation/s390/CommonIO b/Documentation/s390/CommonIO index 8fbc0a852870..bf0baa19ec24 100644 --- a/Documentation/s390/CommonIO +++ b/Documentation/s390/CommonIO @@ -8,17 +8,6 @@ Command line parameters Enable logging of debug information in case of ccw device timeouts. - -* cio_msg = yes | no - - Determines whether information on found devices and sensed device - characteristics should be shown during startup or when new devices are - found, i. e. messages of the types "Detected device 0.0.4711 on subchannel - 0.0.0042" and "SenseID: Device 0.0.4711 reports: ...". - - Default is off. - - * cio_ignore = {all} | {<device> | <range of devices>} | {!<device> | !<range of devices>} diff --git a/Documentation/scheduler/sched-design.txt b/Documentation/scheduler/sched-design.txt deleted file mode 100644 index 1605bf0cba8b..000000000000 --- a/Documentation/scheduler/sched-design.txt +++ /dev/null @@ -1,165 +0,0 @@ - Goals, Design and Implementation of the - new ultra-scalable O(1) scheduler - - - This is an edited version of an email Ingo Molnar sent to - lkml on 4 Jan 2002. It describes the goals, design, and - implementation of Ingo's new ultra-scalable O(1) scheduler. - Last Updated: 18 April 2002. - - -Goal -==== - -The main goal of the new scheduler is to keep all the good things we know -and love about the current Linux scheduler: - - - good interactive performance even during high load: if the user - types or clicks then the system must react instantly and must execute - the user tasks smoothly, even during considerable background load. - - - good scheduling/wakeup performance with 1-2 runnable processes. - - - fairness: no process should stay without any timeslice for any - unreasonable amount of time. No process should get an unjustly high - amount of CPU time. - - - priorities: less important tasks can be started with lower priority, - more important tasks with higher priority. - - - SMP efficiency: no CPU should stay idle if there is work to do. - - - SMP affinity: processes which run on one CPU should stay affine to - that CPU. Processes should not bounce between CPUs too frequently. - - - plus additional scheduler features: RT scheduling, CPU binding. - -and the goal is also to add a few new things: - - - fully O(1) scheduling. Are you tired of the recalculation loop - blowing the L1 cache away every now and then? Do you think the goodness - loop is taking a bit too long to finish if there are lots of runnable - processes? This new scheduler takes no prisoners: wakeup(), schedule(), - the timer interrupt are all O(1) algorithms. There is no recalculation - loop. There is no goodness loop either. - - - 'perfect' SMP scalability. With the new scheduler there is no 'big' - runqueue_lock anymore - it's all per-CPU runqueues and locks - two - tasks on two separate CPUs can wake up, schedule and context-switch - completely in parallel, without any interlocking. All - scheduling-relevant data is structured for maximum scalability. - - - better SMP affinity. The old scheduler has a particular weakness that - causes the random bouncing of tasks between CPUs if/when higher - priority/interactive tasks, this was observed and reported by many - people. The reason is that the timeslice recalculation loop first needs - every currently running task to consume its timeslice. But when this - happens on eg. an 8-way system, then this property starves an - increasing number of CPUs from executing any process. Once the last - task that has a timeslice left has finished using up that timeslice, - the recalculation loop is triggered and other CPUs can start executing - tasks again - after having idled around for a number of timer ticks. - The more CPUs, the worse this effect. - - Furthermore, this same effect causes the bouncing effect as well: - whenever there is such a 'timeslice squeeze' of the global runqueue, - idle processors start executing tasks which are not affine to that CPU. - (because the affine tasks have finished off their timeslices already.) - - The new scheduler solves this problem by distributing timeslices on a - per-CPU basis, without having any global synchronization or - recalculation. - - - batch scheduling. A significant proportion of computing-intensive tasks - benefit from batch-scheduling, where timeslices are long and processes - are roundrobin scheduled. The new scheduler does such batch-scheduling - of the lowest priority tasks - so nice +19 jobs will get - 'batch-scheduled' automatically. With this scheduler, nice +19 jobs are - in essence SCHED_IDLE, from an interactiveness point of view. - - - handle extreme loads more smoothly, without breakdown and scheduling - storms. - - - O(1) RT scheduling. For those RT folks who are paranoid about the - O(nr_running) property of the goodness loop and the recalculation loop. - - - run fork()ed children before the parent. Andrea has pointed out the - advantages of this a few months ago, but patches for this feature - do not work with the old scheduler as well as they should, - because idle processes often steal the new child before the fork()ing - CPU gets to execute it. - - -Design -====== - -The core of the new scheduler contains the following mechanisms: - - - *two* priority-ordered 'priority arrays' per CPU. There is an 'active' - array and an 'expired' array. The active array contains all tasks that - are affine to this CPU and have timeslices left. The expired array - contains all tasks which have used up their timeslices - but this array - is kept sorted as well. The active and expired array is not accessed - directly, it's accessed through two pointers in the per-CPU runqueue - structure. If all active tasks are used up then we 'switch' the two - pointers and from now on the ready-to-go (former-) expired array is the - active array - and the empty active array serves as the new collector - for expired tasks. - - - there is a 64-bit bitmap cache for array indices. Finding the highest - priority task is thus a matter of two x86 BSFL bit-search instructions. - -the split-array solution enables us to have an arbitrary number of active -and expired tasks, and the recalculation of timeslices can be done -immediately when the timeslice expires. Because the arrays are always -access through the pointers in the runqueue, switching the two arrays can -be done very quickly. - -this is a hybride priority-list approach coupled with roundrobin -scheduling and the array-switch method of distributing timeslices. - - - there is a per-task 'load estimator'. - -one of the toughest things to get right is good interactive feel during -heavy system load. While playing with various scheduler variants i found -that the best interactive feel is achieved not by 'boosting' interactive -tasks, but by 'punishing' tasks that want to use more CPU time than there -is available. This method is also much easier to do in an O(1) fashion. - -to establish the actual 'load' the task contributes to the system, a -complex-looking but pretty accurate method is used: there is a 4-entry -'history' ringbuffer of the task's activities during the last 4 seconds. -This ringbuffer is operated without much overhead. The entries tell the -scheduler a pretty accurate load-history of the task: has it used up more -CPU time or less during the past N seconds. [the size '4' and the interval -of 4x 1 seconds was found by lots of experimentation - this part is -flexible and can be changed in both directions.] - -the penalty a task gets for generating more load than the CPU can handle -is a priority decrease - there is a maximum amount to this penalty -relative to their static priority, so even fully CPU-bound tasks will -observe each other's priorities, and will share the CPU accordingly. - -the SMP load-balancer can be extended/switched with additional parallel -computing and cache hierarchy concepts: NUMA scheduling, multi-core CPUs -can be supported easily by changing the load-balancer. Right now it's -tuned for my SMP systems. - -i skipped the prev->mm == next->mm advantage - no workload i know of shows -any sensitivity to this. It can be added back by sacrificing O(1) -schedule() [the current and one-lower priority list can be searched for a -that->mm == current->mm condition], but costs a fair number of cycles -during a number of important workloads, so i wanted to avoid this as much -as possible. - -- the SMP idle-task startup code was still racy and the new scheduler -triggered this. So i streamlined the idle-setup code a bit. We do not call -into schedule() before all processors have started up fully and all idle -threads are in place. - -- the patch also cleans up a number of aspects of sched.c - moves code -into other areas of the kernel where it's appropriate, and simplifies -certain code paths and data constructs. As a result, the new scheduler's -code is smaller than the old one. - - Ingo diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas index 91c81db0ba71..716fcc1cafb5 100644 --- a/Documentation/scsi/ChangeLog.megaraid_sas +++ b/Documentation/scsi/ChangeLog.megaraid_sas @@ -1,3 +1,25 @@ +1 Release Date : Mon. March 10 11:02:31 PDT 2008 - + (emaild-id:megaraidlinux@lsi.com) + Sumant Patro + Bo Yang + +2 Current Version : 00.00.03.20-RC1 +3 Older Version : 00.00.03.16 + +1. Rollback the sense info implementation + Sense buffer ptr data type in the ioctl path is reverted back + to u32 * as in previous versions of driver. + +2. Fixed the driver frame count. + When Driver sent wrong frame count to firmware. As this + particular command is sent to drive, FW is seeing continuous + chip resets and so the command will timeout. + +3. Add the new controller(1078DE) support to the driver + and Increase the max_wait to 60 from 10 in the controller + operational status. With this max_wait increase, driver will + make sure the FW will finish the pending cmd for KDUMP case. + 1 Release Date : Thur. Nov. 07 16:30:43 PST 2007 - (emaild-id:megaraidlinux@lsi.com) Sumant Patro diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c index d3ce295bffac..e4230ed16ee7 100644 --- a/Documentation/vm/slabinfo.c +++ b/Documentation/vm/slabinfo.c @@ -38,7 +38,7 @@ struct slabinfo { unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill; unsigned long cpuslab_flush, deactivate_full, deactivate_empty; unsigned long deactivate_to_head, deactivate_to_tail; - unsigned long deactivate_remote_frees; + unsigned long deactivate_remote_frees, order_fallback; int numa[MAX_NODES]; int numa_partial[MAX_NODES]; } slabinfo[MAX_SLABS]; @@ -293,7 +293,7 @@ int line = 0; void first_line(void) { if (show_activity) - printf("Name Objects Alloc Free %%Fast\n"); + printf("Name Objects Alloc Free %%Fast Fallb O\n"); else printf("Name Objects Objsize Space " "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n"); @@ -573,11 +573,12 @@ void slabcache(struct slabinfo *s) total_alloc = s->alloc_fastpath + s->alloc_slowpath; total_free = s->free_fastpath + s->free_slowpath; - printf("%-21s %8ld %8ld %8ld %3ld %3ld \n", + printf("%-21s %8ld %10ld %10ld %3ld %3ld %5ld %1d\n", s->name, s->objects, total_alloc, total_free, total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0, - total_free ? (s->free_fastpath * 100 / total_free) : 0); + total_free ? (s->free_fastpath * 100 / total_free) : 0, + s->order_fallback, s->order); } else printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n", @@ -1188,6 +1189,7 @@ void read_slab_dir(void) slab->deactivate_to_head = get_obj("deactivate_to_head"); slab->deactivate_to_tail = get_obj("deactivate_to_tail"); slab->deactivate_remote_frees = get_obj("deactivate_remote_frees"); + slab->order_fallback = get_obj("order_fallback"); chdir(".."); if (slab->name[0] == ':') alias_targets++; |