summaryrefslogtreecommitdiffstats
path: root/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0116-x86-UV2-Work-around-BAU-bug.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0116-x86-UV2-Work-around-BAU-bug.patch')
-rw-r--r--recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0116-x86-UV2-Work-around-BAU-bug.patch570
1 files changed, 570 insertions, 0 deletions
diff --git a/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0116-x86-UV2-Work-around-BAU-bug.patch b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0116-x86-UV2-Work-around-BAU-bug.patch
new file mode 100644
index 00000000..a729b180
--- /dev/null
+++ b/recipes-kernel/linux/linux-ti33x-psp-3.2/3.2.2/0116-x86-UV2-Work-around-BAU-bug.patch
@@ -0,0 +1,570 @@
1From da205d30c924b3f41e37510f9a3727741ebfbc44 Mon Sep 17 00:00:00 2001
2From: Cliff Wickman <cpw@sgi.com>
3Date: Mon, 16 Jan 2012 15:19:47 -0600
4Subject: [PATCH 116/130] x86/UV2: Work around BAU bug
5
6commit c5d35d399e685acccc85a675e8765c26b2a9813a upstream.
7
8This patch implements a workaround for a UV2 hardware bug.
9The bug is a non-atomic update of a memory-mapped register. When
10hardware message delivery and software message acknowledge occur
11simultaneously the pending message acknowledge for the arriving
12message may be lost. This causes the sender's message status to
13stay busy.
14
15Part of the workaround is to not acknowledge a completed message
16until it is verified that no other message is actually using the
17resource that is mistakenly recorded in the completed message.
18
19Part of the workaround is to test for long elapsed time in such
20a busy condition, then handle it by using a spare sending
21descriptor. The stay-busy condition is eventually timed out by
22hardware, and then the original sending descriptor can be
23re-used. Most of that logic change is in keeping track of the
24current descriptor and the state of the spares.
25
26The occurrences of the workaround are added to the BAU
27statistics.
28
29Signed-off-by: Cliff Wickman <cpw@sgi.com>
30Link: http://lkml.kernel.org/r/20120116211947.GC5767@sgi.com
31Signed-off-by: Ingo Molnar <mingo@elte.hu>
32Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
33---
34 arch/x86/include/asm/uv/uv_bau.h | 13 ++-
35 arch/x86/platform/uv/tlb_uv.c | 274 +++++++++++++++++++++++++++++++++-----
36 2 files changed, 254 insertions(+), 33 deletions(-)
37
38diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
39index 4a46b27..1b82f7e 100644
40--- a/arch/x86/include/asm/uv/uv_bau.h
41+++ b/arch/x86/include/asm/uv/uv_bau.h
42@@ -167,6 +167,7 @@
43 #define FLUSH_RETRY_TIMEOUT 2
44 #define FLUSH_GIVEUP 3
45 #define FLUSH_COMPLETE 4
46+#define FLUSH_RETRY_BUSYBUG 5
47
48 /*
49 * tuning the action when the numalink network is extremely delayed
50@@ -463,7 +464,6 @@ struct bau_pq_entry {
51 struct msg_desc {
52 struct bau_pq_entry *msg;
53 int msg_slot;
54- int swack_slot;
55 struct bau_pq_entry *queue_first;
56 struct bau_pq_entry *queue_last;
57 };
58@@ -517,6 +517,9 @@ struct ptc_stats {
59 unsigned long s_retry_messages; /* retry broadcasts */
60 unsigned long s_bau_reenabled; /* for bau enable/disable */
61 unsigned long s_bau_disabled; /* for bau enable/disable */
62+ unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
63+ unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
64+ unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
65 /* destination statistics */
66 unsigned long d_alltlb; /* times all tlb's on this
67 cpu were flushed */
68@@ -593,6 +596,8 @@ struct bau_control {
69 short cpus_in_socket;
70 short cpus_in_uvhub;
71 short partition_base_pnode;
72+ short using_desc; /* an index, like uvhub_cpu */
73+ unsigned int inuse_map;
74 unsigned short message_number;
75 unsigned short uvhub_quiesce;
76 short socket_acknowledge_count[DEST_Q_SIZE];
77@@ -610,6 +615,7 @@ struct bau_control {
78 int cong_response_us;
79 int cong_reps;
80 int cong_period;
81+ unsigned long clocks_per_100_usec;
82 cycles_t period_time;
83 long period_requests;
84 struct hub_and_pnode *thp;
85@@ -670,6 +676,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
86 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
87 }
88
89+static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
90+{
91+ write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
92+}
93+
94 static inline unsigned long read_mmr_sw_ack(void)
95 {
96 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
97diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
98index c425ff1..9010ca7 100644
99--- a/arch/x86/platform/uv/tlb_uv.c
100+++ b/arch/x86/platform/uv/tlb_uv.c
101@@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
102 * clear of the Timeout bit (as well) will free the resource. No reply will
103 * be sent (the hardware will only do one reply per message).
104 */
105-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
106+static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
107+ int do_acknowledge)
108 {
109 unsigned long dw;
110 struct bau_pq_entry *msg;
111
112 msg = mdp->msg;
113- if (!msg->canceled) {
114+ if (!msg->canceled && do_acknowledge) {
115 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
116 write_mmr_sw_ack(dw);
117 }
118@@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
119 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
120 unsigned long mr;
121 /*
122- * is the resource timed out?
123- * make everyone ignore the cancelled message.
124+ * Is the resource timed out?
125+ * Make everyone ignore the cancelled message.
126 */
127 msg2->canceled = 1;
128 stat->d_canceled++;
129@@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
130 * Do all the things a cpu should do for a TLB shootdown message.
131 * Other cpu's may come here at the same time for this message.
132 */
133-static void bau_process_message(struct msg_desc *mdp,
134- struct bau_control *bcp)
135+static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
136+ int do_acknowledge)
137 {
138 short socket_ack_count = 0;
139 short *sp;
140@@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
141 if (msg_ack_count == bcp->cpus_in_uvhub) {
142 /*
143 * All cpus in uvhub saw it; reply
144+ * (unless we are in the UV2 workaround)
145 */
146- reply_to_message(mdp, bcp);
147+ reply_to_message(mdp, bcp, do_acknowledge);
148 }
149 }
150
151@@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
152 /*
153 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
154 */
155-static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
156+static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
157 {
158 unsigned long descriptor_status;
159 unsigned long descriptor_status2;
160
161 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
162- descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
163+ descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
164 descriptor_status = (descriptor_status << 1) | descriptor_status2;
165 return descriptor_status;
166 }
167
168+/*
169+ * Return whether the status of the descriptor that is normally used for this
170+ * cpu (the one indexed by its hub-relative cpu number) is busy.
171+ * The status of the original 32 descriptors is always reflected in the 64
172+ * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
173+ * The bit provided by the activation_status_2 register is irrelevant to
174+ * the status if it is only being tested for busy or not busy.
175+ */
176+int normal_busy(struct bau_control *bcp)
177+{
178+ int cpu = bcp->uvhub_cpu;
179+ int mmr_offset;
180+ int right_shift;
181+
182+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
183+ right_shift = cpu * UV_ACT_STATUS_SIZE;
184+ return (((((read_lmmr(mmr_offset) >> right_shift) &
185+ UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
186+}
187+
188+/*
189+ * Entered when a bau descriptor has gone into a permanent busy wait because
190+ * of a hardware bug.
191+ * Workaround the bug.
192+ */
193+int handle_uv2_busy(struct bau_control *bcp)
194+{
195+ int busy_one = bcp->using_desc;
196+ int normal = bcp->uvhub_cpu;
197+ int selected = -1;
198+ int i;
199+ unsigned long descriptor_status;
200+ unsigned long status;
201+ int mmr_offset;
202+ struct bau_desc *bau_desc_old;
203+ struct bau_desc *bau_desc_new;
204+ struct bau_control *hmaster = bcp->uvhub_master;
205+ struct ptc_stats *stat = bcp->statp;
206+ cycles_t ttm;
207+
208+ stat->s_uv2_wars++;
209+ spin_lock(&hmaster->uvhub_lock);
210+ /* try for the original first */
211+ if (busy_one != normal) {
212+ if (!normal_busy(bcp))
213+ selected = normal;
214+ }
215+ if (selected < 0) {
216+ /* can't use the normal, select an alternate */
217+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
218+ descriptor_status = read_lmmr(mmr_offset);
219+
220+ /* scan available descriptors 32-63 */
221+ for (i = 0; i < UV_CPUS_PER_AS; i++) {
222+ if ((hmaster->inuse_map & (1 << i)) == 0) {
223+ status = ((descriptor_status >>
224+ (i * UV_ACT_STATUS_SIZE)) &
225+ UV_ACT_STATUS_MASK) << 1;
226+ if (status != UV2H_DESC_BUSY) {
227+ selected = i + UV_CPUS_PER_AS;
228+ break;
229+ }
230+ }
231+ }
232+ }
233+
234+ if (busy_one != normal)
235+ /* mark the busy alternate as not in-use */
236+ hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
237+
238+ if (selected >= 0) {
239+ /* switch to the selected descriptor */
240+ if (selected != normal) {
241+ /* set the selected alternate as in-use */
242+ hmaster->inuse_map |=
243+ (1 << (selected - UV_CPUS_PER_AS));
244+ if (selected > stat->s_uv2_wars_hw)
245+ stat->s_uv2_wars_hw = selected;
246+ }
247+ bau_desc_old = bcp->descriptor_base;
248+ bau_desc_old += (ITEMS_PER_DESC * busy_one);
249+ bcp->using_desc = selected;
250+ bau_desc_new = bcp->descriptor_base;
251+ bau_desc_new += (ITEMS_PER_DESC * selected);
252+ *bau_desc_new = *bau_desc_old;
253+ } else {
254+ /*
255+ * All are busy. Wait for the normal one for this cpu to
256+ * free up.
257+ */
258+ stat->s_uv2_war_waits++;
259+ spin_unlock(&hmaster->uvhub_lock);
260+ ttm = get_cycles();
261+ do {
262+ cpu_relax();
263+ } while (normal_busy(bcp));
264+ spin_lock(&hmaster->uvhub_lock);
265+ /* switch to the original descriptor */
266+ bcp->using_desc = normal;
267+ bau_desc_old = bcp->descriptor_base;
268+ bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
269+ bcp->using_desc = (ITEMS_PER_DESC * normal);
270+ bau_desc_new = bcp->descriptor_base;
271+ bau_desc_new += (ITEMS_PER_DESC * normal);
272+ *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
273+ }
274+ spin_unlock(&hmaster->uvhub_lock);
275+ return FLUSH_RETRY_BUSYBUG;
276+}
277+
278 static int uv2_wait_completion(struct bau_desc *bau_desc,
279 unsigned long mmr_offset, int right_shift,
280 struct bau_control *bcp, long try)
281 {
282 unsigned long descriptor_stat;
283 cycles_t ttm;
284- int cpu = bcp->uvhub_cpu;
285+ int desc = bcp->using_desc;
286+ long busy_reps = 0;
287 struct ptc_stats *stat = bcp->statp;
288
289- descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
290+ descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
291
292 /* spin on the status MMR, waiting for it to go idle */
293 while (descriptor_stat != UV2H_DESC_IDLE) {
294@@ -542,12 +655,23 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
295 bcp->conseccompletes = 0;
296 return FLUSH_RETRY_TIMEOUT;
297 } else {
298+ busy_reps++;
299+ if (busy_reps > 1000000) {
300+ /* not to hammer on the clock */
301+ busy_reps = 0;
302+ ttm = get_cycles();
303+ if ((ttm - bcp->send_message) >
304+ (bcp->clocks_per_100_usec)) {
305+ return handle_uv2_busy(bcp);
306+ }
307+ }
308 /*
309 * descriptor_stat is still BUSY
310 */
311 cpu_relax();
312 }
313- descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
314+ descriptor_stat = uv2_read_status(mmr_offset, right_shift,
315+ desc);
316 }
317 bcp->conseccompletes++;
318 return FLUSH_COMPLETE;
319@@ -563,14 +687,14 @@ static int wait_completion(struct bau_desc *bau_desc,
320 {
321 int right_shift;
322 unsigned long mmr_offset;
323- int cpu = bcp->uvhub_cpu;
324+ int desc = bcp->using_desc;
325
326- if (cpu < UV_CPUS_PER_AS) {
327+ if (desc < UV_CPUS_PER_AS) {
328 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
329- right_shift = cpu * UV_ACT_STATUS_SIZE;
330+ right_shift = desc * UV_ACT_STATUS_SIZE;
331 } else {
332 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
333- right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
334+ right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
335 }
336
337 if (bcp->uvhub_version == 1)
338@@ -752,8 +876,7 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
339 * Returns 1 if it gives up entirely and the original cpu mask is to be
340 * returned to the kernel.
341 */
342-int uv_flush_send_and_wait(struct bau_desc *bau_desc,
343- struct cpumask *flush_mask, struct bau_control *bcp)
344+int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
345 {
346 int seq_number = 0;
347 int completion_stat = 0;
348@@ -766,20 +889,24 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
349 struct bau_control *hmaster = bcp->uvhub_master;
350 struct uv1_bau_msg_header *uv1_hdr = NULL;
351 struct uv2_bau_msg_header *uv2_hdr = NULL;
352+ struct bau_desc *bau_desc;
353
354- if (bcp->uvhub_version == 1) {
355- uv1 = 1;
356+ if (bcp->uvhub_version == 1)
357 uv1_throttle(hmaster, stat);
358- uv1_hdr = &bau_desc->header.uv1_hdr;
359- } else
360- uv2_hdr = &bau_desc->header.uv2_hdr;
361
362 while (hmaster->uvhub_quiesce)
363 cpu_relax();
364
365 time1 = get_cycles();
366 do {
367- if (try == 0) {
368+ bau_desc = bcp->descriptor_base;
369+ bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
370+ if (bcp->uvhub_version == 1) {
371+ uv1 = 1;
372+ uv1_hdr = &bau_desc->header.uv1_hdr;
373+ } else
374+ uv2_hdr = &bau_desc->header.uv2_hdr;
375+ if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
376 if (uv1)
377 uv1_hdr->msg_type = MSG_REGULAR;
378 else
379@@ -797,13 +924,14 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
380 uv1_hdr->sequence = seq_number;
381 else
382 uv2_hdr->sequence = seq_number;
383- index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
384+ index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
385 bcp->send_message = get_cycles();
386
387 write_mmr_activation(index);
388
389 try++;
390 completion_stat = wait_completion(bau_desc, bcp, try);
391+ /* UV2: wait_completion() may change the bcp->using_desc */
392
393 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
394
395@@ -814,6 +942,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
396 }
397 cpu_relax();
398 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
399+ (completion_stat == FLUSH_RETRY_BUSYBUG) ||
400 (completion_stat == FLUSH_RETRY_TIMEOUT));
401
402 time2 = get_cycles();
403@@ -828,6 +957,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
404 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
405
406 if (completion_stat == FLUSH_GIVEUP)
407+ /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
408 return 1;
409 return 0;
410 }
411@@ -983,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
412 stat->s_ntargself++;
413
414 bau_desc = bcp->descriptor_base;
415- bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
416+ bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
417 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
418 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
419 return NULL;
420@@ -996,13 +1126,86 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
421 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
422 * or 1 if it gave up and the original cpumask should be returned.
423 */
424- if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
425+ if (!uv_flush_send_and_wait(flush_mask, bcp))
426 return NULL;
427 else
428 return cpumask;
429 }
430
431 /*
432+ * Search the message queue for any 'other' message with the same software
433+ * acknowledge resource bit vector.
434+ */
435+struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
436+ struct bau_control *bcp, unsigned char swack_vec)
437+{
438+ struct bau_pq_entry *msg_next = msg + 1;
439+
440+ if (msg_next > bcp->queue_last)
441+ msg_next = bcp->queue_first;
442+ while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
443+ if (msg_next->swack_vec == swack_vec)
444+ return msg_next;
445+ msg_next++;
446+ if (msg_next > bcp->queue_last)
447+ msg_next = bcp->queue_first;
448+ }
449+ return NULL;
450+}
451+
452+/*
453+ * UV2 needs to work around a bug in which an arriving message has not
454+ * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
455+ * Such a message must be ignored.
456+ */
457+void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
458+{
459+ unsigned long mmr_image;
460+ unsigned char swack_vec;
461+ struct bau_pq_entry *msg = mdp->msg;
462+ struct bau_pq_entry *other_msg;
463+
464+ mmr_image = read_mmr_sw_ack();
465+ swack_vec = msg->swack_vec;
466+
467+ if ((swack_vec & mmr_image) == 0) {
468+ /*
469+ * This message was assigned a swack resource, but no
470+ * reserved acknowlegment is pending.
471+ * The bug has prevented this message from setting the MMR.
472+ * And no other message has used the same sw_ack resource.
473+ * Do the requested shootdown but do not reply to the msg.
474+ * (the 0 means make no acknowledge)
475+ */
476+ bau_process_message(mdp, bcp, 0);
477+ return;
478+ }
479+
480+ /*
481+ * Some message has set the MMR 'pending' bit; it might have been
482+ * another message. Look for that message.
483+ */
484+ other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
485+ if (other_msg) {
486+ /* There is another. Do not ack the current one. */
487+ bau_process_message(mdp, bcp, 0);
488+ /*
489+ * Let the natural processing of that message acknowledge
490+ * it. Don't get the processing of sw_ack's out of order.
491+ */
492+ return;
493+ }
494+
495+ /*
496+ * There is no other message using this sw_ack, so it is safe to
497+ * acknowledge it.
498+ */
499+ bau_process_message(mdp, bcp, 1);
500+
501+ return;
502+}
503+
504+/*
505 * The BAU message interrupt comes here. (registered by set_intr_gate)
506 * See entry_64.S
507 *
508@@ -1038,9 +1241,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
509 count++;
510
511 msgdesc.msg_slot = msg - msgdesc.queue_first;
512- msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
513 msgdesc.msg = msg;
514- bau_process_message(&msgdesc, bcp);
515+ if (bcp->uvhub_version == 2)
516+ process_uv2_message(&msgdesc, bcp);
517+ else
518+ bau_process_message(&msgdesc, bcp, 1);
519
520 msg++;
521 if (msg > msgdesc.queue_last)
522@@ -1158,7 +1363,7 @@ static int ptc_seq_show(struct seq_file *file, void *data)
523 seq_printf(file,
524 "all one mult none retry canc nocan reset rcan ");
525 seq_printf(file,
526- "disable enable\n");
527+ "disable enable wars warshw warwaits\n");
528 }
529 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
530 stat = &per_cpu(ptcstats, cpu);
531@@ -1189,8 +1394,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
532 stat->d_nomsg, stat->d_retries, stat->d_canceled,
533 stat->d_nocanceled, stat->d_resets,
534 stat->d_rcanceled);
535- seq_printf(file, "%ld %ld\n",
536- stat->s_bau_disabled, stat->s_bau_reenabled);
537+ seq_printf(file, "%ld %ld %ld %ld %ld\n",
538+ stat->s_bau_disabled, stat->s_bau_reenabled,
539+ stat->s_uv2_wars, stat->s_uv2_wars_hw,
540+ stat->s_uv2_war_waits);
541 }
542 return 0;
543 }
544@@ -1564,6 +1771,7 @@ static void pq_init(int node, int pnode)
545 write_mmr_payload_first(pnode, pn_first);
546 write_mmr_payload_tail(pnode, first);
547 write_mmr_payload_last(pnode, last);
548+ write_gmmr_sw_ack(pnode, 0xffffUL);
549
550 /* in effect, all msg_type's are set to MSG_NOOP */
551 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
552@@ -1651,6 +1859,7 @@ static void __init init_per_cpu_tunables(void)
553 bcp->cong_response_us = congested_respns_us;
554 bcp->cong_reps = congested_reps;
555 bcp->cong_period = congested_period;
556+ bcp->clocks_per_100_usec = usec_2_cycles(100);
557 }
558 }
559
560@@ -1771,6 +1980,7 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
561 }
562 bcp->uvhub_master = *hmasterp;
563 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
564+ bcp->using_desc = bcp->uvhub_cpu;
565 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
566 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
567 bcp->uvhub_cpu);
568--
5691.7.7.4
570