1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
|
// SPDX-License-Identifier: GPL-2.0
/*
* Exceptions for specific devices. Usually work-arounds for fatal design flaws.
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/suspend.h>
#include <linux/vgaarb.h>
#include <asm/amd_nb.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
static void pci_fixup_i450nx(struct pci_dev *d)
{
/*
* i450NX -- Find and scan all secondary buses on all PXB's.
*/
int pxb, reg;
u8 busno, suba, subb;
dev_warn(&d->dev, "Searching for i450NX host bridges\n");
reg = 0xd0;
for(pxb = 0; pxb < 2; pxb++) {
pci_read_config_byte(d, reg++, &busno);
pci_read_config_byte(d, reg++, &suba);
pci_read_config_byte(d, reg++, &subb);
dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno,
suba, subb);
if (busno)
pcibios_scan_root(busno); /* Bus A */
if (suba < subb)
pcibios_scan_root(suba+1); /* Bus B */
}
pcibios_last_bus = -1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx);
static void pci_fixup_i450gx(struct pci_dev *d)
{
/*
* i450GX and i450KX -- Find and scan all secondary buses.
* (called separately for each PCI bridge found)
*/
u8 busno;
pci_read_config_byte(d, 0x4a, &busno);
dev_info(&d->dev, "i440KX/GX host bridge; secondary bus %02x\n", busno);
pcibios_scan_root(busno);
pcibios_last_bus = -1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx);
static void pci_fixup_umc_ide(struct pci_dev *d)
{
/*
* UM8886BF IDE controller sets region type bits incorrectly,
* therefore they look like memory despite of them being I/O.
*/
int i;
dev_warn(&d->dev, "Fixing base address flags\n");
for(i = 0; i < 4; i++)
d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
static void pci_fixup_latency(struct pci_dev *d)
{
/*
* SiS 5597 and 5598 chipsets require latency timer set to
* at most 32 to avoid lockups.
*/
dev_dbg(&d->dev, "Setting max latency to 32\n");
pcibios_max_latency = 32;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency);
static void pci_fixup_piix4_acpi(struct pci_dev *d)
{
/*
* PIIX4 ACPI device: hardwired IRQ9
*/
d->irq = 9;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi);
/*
* Addresses issues with problems in the memory write queue timer in
* certain VIA Northbridges. This bugfix is per VIA's specifications,
* except for the KL133/KM133: clearing bit 5 on those Northbridges seems
* to trigger a bug in its integrated ProSavage video card, which
* causes screen corruption. We only clear bits 6 and 7 for that chipset,
* until VIA can provide us with definitive information on why screen
* corruption occurs, and what exactly those bits do.
*
* VIA 8363,8622,8361 Northbridges:
* - bits 5, 6, 7 at offset 0x55 need to be turned off
* VIA 8367 (KT266x) Northbridges:
* - bits 5, 6, 7 at offset 0x95 need to be turned off
* VIA 8363 rev 0x81/0x84 (KL133/KM133) Northbridges:
* - bits 6, 7 at offset 0x55 need to be turned off
*/
#define VIA_8363_KL133_REVISION_ID 0x81
#define VIA_8363_KM133_REVISION_ID 0x84
static void pci_fixup_via_northbridge_bug(struct pci_dev *d)
{
u8 v;
int where = 0x55;
int mask = 0x1f; /* clear bits 5, 6, 7 by default */
if (d->device == PCI_DEVICE_ID_VIA_8367_0) {
/* fix pci bus latency issues resulted by NB bios error
it appears on bug free^Wreduced kt266x's bios forces
NB latency to zero */
pci_write_config_byte(d, PCI_LATENCY_TIMER, 0);
where = 0x95; /* the memory write queue timer register is
different for the KT266x's: 0x95 not 0x55 */
} else if (d->device == PCI_DEVICE_ID_VIA_8363_0 &&
(d->revision == VIA_8363_KL133_REVISION_ID ||
d->revision == VIA_8363_KM133_REVISION_ID)) {
mask = 0x3f; /* clear only bits 6 and 7; clearing bit 5
causes screen corruption on the KL133/KM133 */
}
pci_read_config_byte(d, where, &v);
if (v & ~mask) {
dev_warn(&d->dev, "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \
d->device, d->revision, where, v, mask, v & mask);
v &= mask;
pci_write_config_byte(d, where, v);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
/*
* For some reasons Intel decided that certain parts of their
* 815, 845 and some other chipsets must look like PCI-to-PCI bridges
* while they are obviously not. The 82801 family (AA, AB, BAM/CAM,
* BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according
* to Intel terminology. These devices do forward all addresses from
* system to PCI bus no matter what are their window settings, so they are
* "transparent" (or subtractive decoding) from programmers point of view.
*/
static void pci_fixup_transparent_bridge(struct pci_dev *dev)
{
if ((dev->device & 0xff00) == 0x2400)
dev->transparent = 1;
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge);
/*
* Fixup for C1 Halt Disconnect problem on nForce2 systems.
*
* From information provided by "Allen Martin" <AMartin@nvidia.com>:
*
* A hang is caused when the CPU generates a very fast CONNECT/HALT cycle
* sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns.
* This allows the state-machine and timer to return to a proper state within
* 80 ns of the CONNECT and probe appearing together. Since the CPU will not
* issue another HALT within 80 ns of the initial HALT, the failure condition
* is avoided.
*/
static void pci_fixup_nforce2(struct pci_dev *dev)
{
u32 val;
/*
* Chip Old value New value
* C17 0x1F0FFF01 0x1F01FF01
* C18D 0x9F0FFF01 0x9F01FF01
*
* Northbridge chip version may be determined by
* reading the PCI revision ID (0xC1 or greater is C18D).
*/
pci_read_config_dword(dev, 0x6c, &val);
/*
* Apply fixup if needed, but don't touch disconnect state
*/
if ((val & 0x00FF0000) != 0x00010000) {
dev_warn(&dev->dev, "nForce2 C1 Halt Disconnect fixup\n");
pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
/* Max PCI Express root ports */
#define MAX_PCIEROOT 6
static int quirk_aspm_offset[MAX_PCIEROOT << 3];
#define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7))
static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{
return raw_pci_read(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
/*
* Replace the original pci bus ops for write with a new one that will filter
* the request to insure ASPM cannot be enabled.
*/
static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
{
u8 offset;
offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
if ((offset) && (where == offset))
value = value & ~PCI_EXP_LNKCTL_ASPMC;
return raw_pci_write(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
static struct pci_ops quirk_pcie_aspm_ops = {
.read = quirk_pcie_aspm_read,
.write = quirk_pcie_aspm_write,
};
/*
* Prevents PCI Express ASPM (Active State Power Management) being enabled.
*
* Save the register offset, where the ASPM control bits are located,
* for each PCI Express device that is in the device list of
* the root port in an array for fast indexing. Replace the bus ops
* with the modified one.
*/
static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
{
int i;
struct pci_bus *pbus;
struct pci_dev *dev;
if ((pbus = pdev->subordinate) == NULL)
return;
/*
* Check if the DID of pdev matches one of the six root ports. This
* check is needed in the case this function is called directly by the
* hot-plug driver.
*/
if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) ||
(pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1))
return;
if (list_empty(&pbus->devices)) {
/*
* If no device is attached to the root port at power-up or
* after hot-remove, the pbus->devices is empty and this code
* will set the offsets to zero and the bus ops to parent's bus
* ops, which is unmodified.
*/
for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
quirk_aspm_offset[i] = 0;
pci_bus_set_ops(pbus, pbus->parent->ops);
} else {
/*
* If devices are attached to the root port at power-up or
* after hot-add, the code loops through the device list of
* each root port to save the register offsets and replace the
* bus ops.
*/
list_for_each_entry(dev, &pbus->devices, bus_list)
/* There are 0 to 8 devices attached to this bus */
quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
dev->pcie_cap + PCI_EXP_LNKCTL;
pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk);
/*
* Fixup to mark boot BIOS video selected by BIOS before it changes
*
* From information provided by "Jon Smirl" <jonsmirl@gmail.com>
*
* The standard boot ROM sequence for an x86 machine uses the BIOS
* to select an initial video card for boot display. This boot video
* card will have its BIOS copied to 0xC0000 in system RAM.
* IORESOURCE_ROM_SHADOW is used to associate the boot video
* card with this copy. On laptops this copy has to be used since
* the main ROM may be compressed or combined with another image.
* See pci_map_rom() for use of this flag. Before marking the device
* with IORESOURCE_ROM_SHADOW check if a vga_default_device is already set
* by either arch code or vga-arbitration; if so only apply the fixup to this
* already-determined primary video card.
*/
static void pci_fixup_video(struct pci_dev *pdev)
{
struct pci_dev *bridge;
struct pci_bus *bus;
u16 config;
struct resource *res;
/* Is VGA routed to us? */
bus = pdev->bus;
while (bus) {
bridge = bus->self;
/*
* From information provided by
* "David Miller" <davem@davemloft.net>
* The bridge control register is valid for PCI header
* type BRIDGE, or CARDBUS. Host to PCI controllers use
* PCI header type NORMAL.
*/
if (bridge && (pci_is_bridge(bridge))) {
pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
&config);
if (!(config & PCI_BRIDGE_CTL_VGA))
return;
}
bus = bus->parent;
}
if (!vga_default_device() || pdev == vga_default_device()) {
pci_read_config_word(pdev, PCI_COMMAND, &config);
if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
res = &pdev->resource[PCI_ROM_RESOURCE];
pci_disable_rom(pdev);
if (res->parent)
release_resource(res);
res->start = 0xC0000;
res->end = res->start + 0x20000 - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
IORESOURCE_PCI_FIXED;
dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n",
res);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
static const struct dmi_system_id msi_k8t_dmi_table[] = {
{
.ident = "MSI-K8T-Neo2Fir",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
},
},
{}
};
/*
* The AMD-Athlon64 board MSI "K8T Neo2-FIR" disables the onboard sound
* card if a PCI-soundcard is added.
*
* The BIOS only gives options "DISABLED" and "AUTO". This code sets
* the corresponding register-value to enable the soundcard.
*
* The soundcard is only enabled, if the mainboard is identified
* via DMI-tables and the soundcard is detected to be off.
*/
static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
{
unsigned char val;
if (!dmi_check_system(msi_k8t_dmi_table))
return; /* only applies to MSI K8T Neo2-FIR */
pci_read_config_byte(dev, 0x50, &val);
if (val & 0x40) {
pci_write_config_byte(dev, 0x50, val & (~0x40));
/* verify the change for status output */
pci_read_config_byte(dev, 0x50, &val);
if (val & 0x40)
dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
"can't enable onboard soundcard!\n");
else
dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
"enabled onboard soundcard\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
pci_fixup_msi_k8t_onboard_sound);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
pci_fixup_msi_k8t_onboard_sound);
/*
* Some Toshiba laptops need extra code to enable their TI TSB43AB22/A.
*
* We pretend to bring them out of full D3 state, and restore the proper
* IRQ, PCI cache line size, and BARs, otherwise the device won't function
* properly. In some cases, the device will generate an interrupt on
* the wrong IRQ line, causing any devices sharing the line it's
* *supposed* to use to be disabled by the kernel's IRQ debug code.
*/
static u16 toshiba_line_size;
static const struct dmi_system_id toshiba_ohci1394_dmi_table[] = {
{
.ident = "Toshiba PS5 based laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"),
},
},
{
.ident = "Toshiba PSM4 based laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
},
},
{
.ident = "Toshiba A40 based laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
},
},
{ }
};
static void pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
{
if (!dmi_check_system(toshiba_ohci1394_dmi_table))
return; /* only applies to certain Toshibas (so far) */
dev->current_state = PCI_D3cold;
pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032,
pci_pre_fixup_toshiba_ohci1394);
static void pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
{
if (!dmi_check_system(toshiba_ohci1394_dmi_table))
return; /* only applies to certain Toshibas (so far) */
/* Restore config space on Toshiba laptops */
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
pci_resource_start(dev, 0));
pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
pci_resource_start(dev, 1));
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
pci_post_fixup_toshiba_ohci1394);
/*
* Prevent the BIOS trapping accesses to the Cyrix CS5530A video device
* configuration space.
*/
static void pci_early_fixup_cyrix_5530(struct pci_dev *dev)
{
u8 r;
/* clear 'F4 Video Configuration Trap' bit */
pci_read_config_byte(dev, 0x42, &r);
r &= 0xfd;
pci_write_config_byte(dev, 0x42, r);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
pci_early_fixup_cyrix_5530);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
pci_early_fixup_cyrix_5530);
/*
* Siemens Nixdorf AG FSC Multiprocessor Interrupt Controller:
* prevent update of the BAR0, which doesn't look like a normal BAR.
*/
static void pci_siemens_interrupt_controller(struct pci_dev *dev)
{
dev->resource[0].flags |= IORESOURCE_PCI_FIXED;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
pci_siemens_interrupt_controller);
/*
* SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
* confusing the PCI engine:
*/
static void sb600_disable_hpet_bar(struct pci_dev *dev)
{
u8 val;
/*
* The SB600 and SB700 both share the same device
* ID, but the PM register 0x55 does something different
* for the SB700, so make sure we are dealing with the
* SB600 before touching the bit:
*/
pci_read_config_byte(dev, 0x08, &val);
if (val < 0x2F) {
outb(0x55, 0xCD6);
val = inb(0xCD7);
/* Set bit 7 in PM register 0x55 */
outb(0x55, 0xCD6);
outb(val | 0x80, 0xCD7);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
#ifdef CONFIG_HPET_TIMER
static void sb600_hpet_quirk(struct pci_dev *dev)
{
struct resource *r = &dev->resource[1];
if (r->flags & IORESOURCE_MEM && r->start == hpet_address) {
r->flags |= IORESOURCE_PCI_FIXED;
dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk);
#endif
/*
* Twinhead H12Y needs us to block out a region otherwise we map devices
* there and any access kills the box.
*
* See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
*
* Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
*/
static void twinhead_reserve_killing_zone(struct pci_dev *dev)
{
if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
pr_info("Reserving memory on Twinhead H12Y\n");
request_mem_region(0xFFB00000, 0x100000, "twinhead");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
/*
* Device [8086:2fc0]
* Erratum HSE43
* CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
* https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
*
* Devices [8086:6f60,6fa0,6fc0]
* Erratum BDF2
* PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
* https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
*/
static void pci_invalid_bar(struct pci_dev *dev)
{
dev->non_compliant_bars = 1;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
/*
* Device [1022:7808]
* 23. USB Wake on Connect/Disconnect with Low Speed Devices
* https://support.amd.com/TechDocs/46837.pdf
* Appendix A2
* https://support.amd.com/TechDocs/42413.pdf
*/
static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
{
dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
dev->pme_support &= ~((PCI_PM_CAP_PME_D3hot | PCI_PM_CAP_PME_D3cold)
>> PCI_PM_CAP_PME_SHIFT);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
/*
* Device [1022:7914]
* When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
*/
static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
{
dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
/*
* Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
*
* Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
* the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
* for soft poweroff and suspend-to-RAM.
*
* As far as we know, this is related to the address space, not to the Root
* Port itself. Attaching the quirk to the Root Port is a convenience, but
* it could probably also be a standalone DMI quirk.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=103211
*/
static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
return;
res = request_mem_region(0x7fa00000, 0x200000,
"MacBook Pro poweroff workaround");
if (res)
dev_info(dev, "claimed %s %pR\n", res->name, res);
else
dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
/*
* VMD-enabled root ports will change the source ID for all messages
* to the VMD device. Rather than doing device matching with the source
* ID, the AER driver should traverse the child device tree, reading
* AER registers to find the faulting device.
*/
static void quirk_no_aersid(struct pci_dev *pdev)
{
/* VMD Domain */
if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
static void quirk_intel_th_dnv(struct pci_dev *dev)
{
struct resource *r = &dev->resource[4];
/*
* Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
* appears to be 4 MB in reality.
*/
if (r->end == r->start + 0x7ff) {
r->start = 0;
r->end = 0x3fffff;
r->flags |= IORESOURCE_UNSET;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
#define AMD_141b_MMIO_BASE_RE_MASK BIT(0)
#define AMD_141b_MMIO_BASE_WE_MASK BIT(1)
#define AMD_141b_MMIO_BASE_MMIOBASE_MASK GENMASK(31,8)
#define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8)
#define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK GENMASK(31,8)
#define AMD_141b_MMIO_HIGH(x) (0x180 + (x) * 0x4)
#define AMD_141b_MMIO_HIGH_MMIOBASE_MASK GENMASK(7,0)
#define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT 16
#define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK GENMASK(23,16)
/*
* The PCI Firmware Spec, rev 3.2, notes that ACPI should optionally allow
* configuring host bridge windows using the _PRS and _SRS methods.
*
* But this is rarely implemented, so we manually enable a large 64bit BAR for
* PCIe device on AMD Family 15h (Models 00h-1fh, 30h-3fh, 60h-7fh) Processors
* here.
*/
static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
{
static const char *name = "PCI Bus 0000:00";
struct resource *res, *conflict;
u32 base, limit, high;
struct pci_dev *other;
unsigned i;
if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
return;
/* Check that we are the only device of that type */
other = pci_get_device(dev->vendor, dev->device, NULL);
if (other != dev ||
(other = pci_get_device(dev->vendor, dev->device, other))) {
/* This is a multi-socket system, don't touch it for now */
pci_dev_put(other);
return;
}
for (i = 0; i < 8; i++) {
pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
/* Is this slot free? */
if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
AMD_141b_MMIO_BASE_WE_MASK)))
break;
base >>= 8;
base |= high << 24;
/* Abort if a slot already configures a 64bit BAR. */
if (base > 0x10000)
return;
}
if (i == 8)
return;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return;
/*
* Allocate a 256GB window directly below the 0xfd00000000 hardware
* limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
*/
res->name = name;
res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
res->start = 0xbd00000000ull;
res->end = 0xfd00000000ull - 1;
conflict = request_resource_conflict(&iomem_resource, res);
if (conflict) {
kfree(res);
if (conflict->name != name)
return;
/* We are resuming from suspend; just reenable the window */
res = conflict;
} else {
dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
res);
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
pci_bus_add_resource(dev->bus, res, 0);
}
base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
& AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
#define RS690_LOWER_TOP_OF_DRAM2 0x30
#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
#define RS690_UPPER_TOP_OF_DRAM2 0x31
#define RS690_HTIU_NB_INDEX 0xA8
#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
#define RS690_HTIU_NB_DATA 0xAC
/*
* Some BIOS implementations support RAM above 4GB, but do not configure the
* PCI host to respond to bus master accesses for these addresses. These
* implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
* works as expected for addresses below 4GB.
*
* Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
* https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
*/
static void rs690_fix_64bit_dma(struct pci_dev *pdev)
{
u32 val = 0;
phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
if (top_of_dram <= (1ULL << 32))
return;
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
RS690_LOWER_TOP_OF_DRAM2);
pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
if (val)
return;
pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
#endif
#ifdef CONFIG_AMD_NB
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
{
u32 data;
if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
pci_err(dev, "Failed to write data 0x%x\n", data);
} else {
pci_err(dev, "Failed to read data\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
#endif
/*
* When returning from D3cold to D0, firmware on some Google Coral and Reef
* family Chromebooks with Intel Apollo Lake SoC clobbers the headers of
* both the L1 PM Substates capability and the previous capability for the
* "Celeron N3350/Pentium N4200/Atom E3900 Series PCI Express Port B #1".
*
* Save those values at enumeration-time and restore them at resume.
*/
static u16 prev_cap, l1ss_cap;
static u32 prev_header, l1ss_header;
static void chromeos_save_apl_pci_l1ss_capability(struct pci_dev *dev)
{
int pos = PCI_CFG_SPACE_SIZE, prev = 0;
u32 header, pheader = 0;
while (pos) {
pci_read_config_dword(dev, pos, &header);
if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_L1SS) {
prev_cap = prev;
prev_header = pheader;
l1ss_cap = pos;
l1ss_header = header;
return;
}
prev = pos;
pheader = header;
pos = PCI_EXT_CAP_NEXT(header);
}
}
static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
{
u32 header;
if (!prev_cap || !prev_header || !l1ss_cap || !l1ss_header)
return;
/* Fixup the header of L1SS Capability if missing */
pci_read_config_dword(dev, l1ss_cap, &header);
if (header != l1ss_header) {
pci_write_config_dword(dev, l1ss_cap, l1ss_header);
pci_info(dev, "restore L1SS Capability header (was %#010x now %#010x)\n",
header, l1ss_header);
}
/* Fixup the link to L1SS Capability if missing */
pci_read_config_dword(dev, prev_cap, &header);
if (header != prev_header) {
pci_write_config_dword(dev, prev_cap, prev_header);
pci_info(dev, "restore previous Capability header (was %#010x now %#010x)\n",
header, prev_header);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
/*
* Disable D3cold on Asus B1400 PCI-NVMe bridge
*
* On this platform with VMD off, the NVMe device cannot successfully power
* back on from D3cold. This appears to be an untested transition by the
* vendor: Windows leaves the NVMe and parent bridge in D0 during suspend.
*
* We disable D3cold on the parent bridge for simplicity, and the fact that
* both parent bridge and NVMe device share the same power resource.
*
* This is only needed on BIOS versions before 308; the newer versions flip
* StorageD3Enable from 1 to 0.
*/
static const struct dmi_system_id asus_nvme_broken_d3cold_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.304"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.305"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.306"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.307"),
},
},
{}
};
static void asus_disable_nvme_d3cold(struct pci_dev *pdev)
{
if (dmi_check_system(asus_nvme_broken_d3cold_table) > 0)
pci_d3cold_disable(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x9a09, asus_disable_nvme_d3cold);
#ifdef CONFIG_SUSPEND
/*
* Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
* if the SoC is put into a hardware sleep state by the amd-pmc driver, the
* Root Ports don't generate wakeup interrupts for USB devices.
*
* When suspending, remove D3hot and D3cold from the PME_Support advertised
* by the Root Port so we don't use those states if we're expecting wakeup
* interrupts. Restore the advertised PME_Support when resuming.
*/
static void amd_rp_pme_suspend(struct pci_dev *dev)
{
struct pci_dev *rp;
/*
* PM_SUSPEND_ON means we're doing runtime suspend, which means
* amd-pmc will not be involved so PMEs during D3 work as advertised.
*
* The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
* sleep state, but we assume amd-pmc is always present.
*/
if (pm_suspend_target_state == PM_SUSPEND_ON)
return;
rp = pcie_find_root_port(dev);
if (!rp || !rp->pm_cap)
return;
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
PCI_PM_CAP_PME_SHIFT);
dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
}
static void amd_rp_pme_resume(struct pci_dev *dev)
{
struct pci_dev *rp;
u16 pmc;
rp = pcie_find_root_port(dev);
if (!rp || !rp->pm_cap)
return;
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
}
/* Rembrandt (yellow_carp) */
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume);
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume);
/* Phoenix (pink_sardine) */
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
#endif /* CONFIG_SUSPEND */
|