Bullet Collision Detection & Physics Library
btDantzigLCP.cpp
Go to the documentation of this file.
1 /*************************************************************************
2 * *
3 * Open Dynamics Engine, Copyright (C) 2001,2002 Russell L. Smith. *
4 * All rights reserved. Email: russ@q12.org Web: www.q12.org *
5 * *
6 * This library is free software; you can redistribute it and/or *
7 * modify it under the terms of EITHER: *
8 * (1) The GNU Lesser General Public License as published by the Free *
9 * Software Foundation; either version 2.1 of the License, or (at *
10 * your option) any later version. The text of the GNU Lesser *
11 * General Public License is included with this library in the *
12 * file LICENSE.TXT. *
13 * (2) The BSD-style license that is included with this library in *
14 * the file LICENSE-BSD.TXT. *
15 * *
16 * This library is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files *
19 * LICENSE.TXT and LICENSE-BSD.TXT for more details. *
20 * *
21 *************************************************************************/
22 
23 /*
24 
25 
26 THE ALGORITHM
27 -------------
28 
29 solve A*x = b+w, with x and w subject to certain LCP conditions.
30 each x(i),w(i) must lie on one of the three line segments in the following
31 diagram. each line segment corresponds to one index set :
32 
33  w(i)
34  /|\ | :
35  | | :
36  | |i in N :
37  w>0 | |state[i]=0 :
38  | | :
39  | | : i in C
40  w=0 + +-----------------------+
41  | : |
42  | : |
43  w<0 | : |i in N
44  | : |state[i]=1
45  | : |
46  | : |
47  +-------|-----------|-----------|----------> x(i)
48  lo 0 hi
49 
50 the Dantzig algorithm proceeds as follows:
51  for i=1:n
52  * if (x(i),w(i)) is not on the line, push x(i) and w(i) positive or
53  negative towards the line. as this is done, the other (x(j),w(j))
54  for j<i are constrained to be on the line. if any (x,w) reaches the
55  end of a line segment then it is switched between index sets.
56  * i is added to the appropriate index set depending on what line segment
57  it hits.
58 
59 we restrict lo(i) <= 0 and hi(i) >= 0. this makes the algorithm a bit
60 simpler, because the starting point for x(i),w(i) is always on the dotted
61 line x=0 and x will only ever increase in one direction, so it can only hit
62 two out of the three line segments.
63 
64 
65 NOTES
66 -----
67 
68 this is an implementation of "lcp_dantzig2_ldlt.m" and "lcp_dantzig_lohi.m".
69 the implementation is split into an LCP problem object (btLCP) and an LCP
70 driver function. most optimization occurs in the btLCP object.
71 
72 a naive implementation of the algorithm requires either a lot of data motion
73 or a lot of permutation-array lookup, because we are constantly re-ordering
74 rows and columns. to avoid this and make a more optimized algorithm, a
75 non-trivial data structure is used to represent the matrix A (this is
76 implemented in the fast version of the btLCP object).
77 
78 during execution of this algorithm, some indexes in A are clamped (set C),
79 some are non-clamped (set N), and some are "don't care" (where x=0).
80 A,x,b,w (and other problem vectors) are permuted such that the clamped
81 indexes are first, the unclamped indexes are next, and the don't-care
82 indexes are last. this permutation is recorded in the array `p'.
83 initially p = 0..n-1, and as the rows and columns of A,x,b,w are swapped,
84 the corresponding elements of p are swapped.
85 
86 because the C and N elements are grouped together in the rows of A, we can do
87 lots of work with a fast dot product function. if A,x,etc were not permuted
88 and we only had a permutation array, then those dot products would be much
89 slower as we would have a permutation array lookup in some inner loops.
90 
91 A is accessed through an array of row pointers, so that element (i,j) of the
92 permuted matrix is A[i][j]. this makes row swapping fast. for column swapping
93 we still have to actually move the data.
94 
95 during execution of this algorithm we maintain an L*D*L' factorization of
96 the clamped submatrix of A (call it `AC') which is the top left nC*nC
97 submatrix of A. there are two ways we could arrange the rows/columns in AC.
98 
99 (1) AC is always permuted such that L*D*L' = AC. this causes a problem
100 when a row/column is removed from C, because then all the rows/columns of A
101 between the deleted index and the end of C need to be rotated downward.
102 this results in a lot of data motion and slows things down.
103 (2) L*D*L' is actually a factorization of a *permutation* of AC (which is
104 itself a permutation of the underlying A). this is what we do - the
105 permutation is recorded in the vector C. call this permutation A[C,C].
106 when a row/column is removed from C, all we have to do is swap two
107 rows/columns and manipulate C.
108 
109 */
110 
111 
112 #include "btDantzigLCP.h"
113 
114 #include <string.h>//memcpy
115 
116 bool s_error = false;
117 
118 //***************************************************************************
119 // code generation parameters
120 
121 
122 #define btLCP_FAST // use fast btLCP object
123 
124 // option 1 : matrix row pointers (less data copying)
125 #define BTROWPTRS
126 #define BTATYPE btScalar **
127 #define BTAROW(i) (m_A[i])
128 
129 // option 2 : no matrix row pointers (slightly faster inner loops)
130 //#define NOROWPTRS
131 //#define BTATYPE btScalar *
132 //#define BTAROW(i) (m_A+(i)*m_nskip)
133 
134 #define BTNUB_OPTIMIZATIONS
135 
136 
137 
138 /* solve L*X=B, with B containing 1 right hand sides.
139  * L is an n*n lower triangular matrix with ones on the diagonal.
140  * L is stored by rows and its leading dimension is lskip.
141  * B is an n*1 matrix that contains the right hand sides.
142  * B is stored by columns and its leading dimension is also lskip.
143  * B is overwritten with X.
144  * this processes blocks of 2*2.
145  * if this is in the factorizer source file, n must be a multiple of 2.
146  */
147 
148 static void btSolveL1_1 (const btScalar *L, btScalar *B, int n, int lskip1)
149 {
150  /* declare variables - Z matrix, p and q vectors, etc */
151  btScalar Z11,m11,Z21,m21,p1,q1,p2,*ex;
152  const btScalar *ell;
153  int i,j;
154  /* compute all 2 x 1 blocks of X */
155  for (i=0; i < n; i+=2) {
156  /* compute all 2 x 1 block of X, from rows i..i+2-1 */
157  /* set the Z matrix to 0 */
158  Z11=0;
159  Z21=0;
160  ell = L + i*lskip1;
161  ex = B;
162  /* the inner loop that computes outer products and adds them to Z */
163  for (j=i-2; j >= 0; j -= 2) {
164  /* compute outer product and add it to the Z matrix */
165  p1=ell[0];
166  q1=ex[0];
167  m11 = p1 * q1;
168  p2=ell[lskip1];
169  m21 = p2 * q1;
170  Z11 += m11;
171  Z21 += m21;
172  /* compute outer product and add it to the Z matrix */
173  p1=ell[1];
174  q1=ex[1];
175  m11 = p1 * q1;
176  p2=ell[1+lskip1];
177  m21 = p2 * q1;
178  /* advance pointers */
179  ell += 2;
180  ex += 2;
181  Z11 += m11;
182  Z21 += m21;
183  /* end of inner loop */
184  }
185  /* compute left-over iterations */
186  j += 2;
187  for (; j > 0; j--) {
188  /* compute outer product and add it to the Z matrix */
189  p1=ell[0];
190  q1=ex[0];
191  m11 = p1 * q1;
192  p2=ell[lskip1];
193  m21 = p2 * q1;
194  /* advance pointers */
195  ell += 1;
196  ex += 1;
197  Z11 += m11;
198  Z21 += m21;
199  }
200  /* finish computing the X(i) block */
201  Z11 = ex[0] - Z11;
202  ex[0] = Z11;
203  p1 = ell[lskip1];
204  Z21 = ex[1] - Z21 - p1*Z11;
205  ex[1] = Z21;
206  /* end of outer loop */
207  }
208 }
209 
210 /* solve L*X=B, with B containing 2 right hand sides.
211  * L is an n*n lower triangular matrix with ones on the diagonal.
212  * L is stored by rows and its leading dimension is lskip.
213  * B is an n*2 matrix that contains the right hand sides.
214  * B is stored by columns and its leading dimension is also lskip.
215  * B is overwritten with X.
216  * this processes blocks of 2*2.
217  * if this is in the factorizer source file, n must be a multiple of 2.
218  */
219 
220 static void btSolveL1_2 (const btScalar *L, btScalar *B, int n, int lskip1)
221 {
222  /* declare variables - Z matrix, p and q vectors, etc */
223  btScalar Z11,m11,Z12,m12,Z21,m21,Z22,m22,p1,q1,p2,q2,*ex;
224  const btScalar *ell;
225  int i,j;
226  /* compute all 2 x 2 blocks of X */
227  for (i=0; i < n; i+=2) {
228  /* compute all 2 x 2 block of X, from rows i..i+2-1 */
229  /* set the Z matrix to 0 */
230  Z11=0;
231  Z12=0;
232  Z21=0;
233  Z22=0;
234  ell = L + i*lskip1;
235  ex = B;
236  /* the inner loop that computes outer products and adds them to Z */
237  for (j=i-2; j >= 0; j -= 2) {
238  /* compute outer product and add it to the Z matrix */
239  p1=ell[0];
240  q1=ex[0];
241  m11 = p1 * q1;
242  q2=ex[lskip1];
243  m12 = p1 * q2;
244  p2=ell[lskip1];
245  m21 = p2 * q1;
246  m22 = p2 * q2;
247  Z11 += m11;
248  Z12 += m12;
249  Z21 += m21;
250  Z22 += m22;
251  /* compute outer product and add it to the Z matrix */
252  p1=ell[1];
253  q1=ex[1];
254  m11 = p1 * q1;
255  q2=ex[1+lskip1];
256  m12 = p1 * q2;
257  p2=ell[1+lskip1];
258  m21 = p2 * q1;
259  m22 = p2 * q2;
260  /* advance pointers */
261  ell += 2;
262  ex += 2;
263  Z11 += m11;
264  Z12 += m12;
265  Z21 += m21;
266  Z22 += m22;
267  /* end of inner loop */
268  }
269  /* compute left-over iterations */
270  j += 2;
271  for (; j > 0; j--) {
272  /* compute outer product and add it to the Z matrix */
273  p1=ell[0];
274  q1=ex[0];
275  m11 = p1 * q1;
276  q2=ex[lskip1];
277  m12 = p1 * q2;
278  p2=ell[lskip1];
279  m21 = p2 * q1;
280  m22 = p2 * q2;
281  /* advance pointers */
282  ell += 1;
283  ex += 1;
284  Z11 += m11;
285  Z12 += m12;
286  Z21 += m21;
287  Z22 += m22;
288  }
289  /* finish computing the X(i) block */
290  Z11 = ex[0] - Z11;
291  ex[0] = Z11;
292  Z12 = ex[lskip1] - Z12;
293  ex[lskip1] = Z12;
294  p1 = ell[lskip1];
295  Z21 = ex[1] - Z21 - p1*Z11;
296  ex[1] = Z21;
297  Z22 = ex[1+lskip1] - Z22 - p1*Z12;
298  ex[1+lskip1] = Z22;
299  /* end of outer loop */
300  }
301 }
302 
303 
304 void btFactorLDLT (btScalar *A, btScalar *d, int n, int nskip1)
305 {
306  int i,j;
307  btScalar sum,*ell,*dee,dd,p1,p2,q1,q2,Z11,m11,Z21,m21,Z22,m22;
308  if (n < 1) return;
309 
310  for (i=0; i<=n-2; i += 2) {
311  /* solve L*(D*l)=a, l is scaled elements in 2 x i block at A(i,0) */
312  btSolveL1_2 (A,A+i*nskip1,i,nskip1);
313  /* scale the elements in a 2 x i block at A(i,0), and also */
314  /* compute Z = the outer product matrix that we'll need. */
315  Z11 = 0;
316  Z21 = 0;
317  Z22 = 0;
318  ell = A+i*nskip1;
319  dee = d;
320  for (j=i-6; j >= 0; j -= 6) {
321  p1 = ell[0];
322  p2 = ell[nskip1];
323  dd = dee[0];
324  q1 = p1*dd;
325  q2 = p2*dd;
326  ell[0] = q1;
327  ell[nskip1] = q2;
328  m11 = p1*q1;
329  m21 = p2*q1;
330  m22 = p2*q2;
331  Z11 += m11;
332  Z21 += m21;
333  Z22 += m22;
334  p1 = ell[1];
335  p2 = ell[1+nskip1];
336  dd = dee[1];
337  q1 = p1*dd;
338  q2 = p2*dd;
339  ell[1] = q1;
340  ell[1+nskip1] = q2;
341  m11 = p1*q1;
342  m21 = p2*q1;
343  m22 = p2*q2;
344  Z11 += m11;
345  Z21 += m21;
346  Z22 += m22;
347  p1 = ell[2];
348  p2 = ell[2+nskip1];
349  dd = dee[2];
350  q1 = p1*dd;
351  q2 = p2*dd;
352  ell[2] = q1;
353  ell[2+nskip1] = q2;
354  m11 = p1*q1;
355  m21 = p2*q1;
356  m22 = p2*q2;
357  Z11 += m11;
358  Z21 += m21;
359  Z22 += m22;
360  p1 = ell[3];
361  p2 = ell[3+nskip1];
362  dd = dee[3];
363  q1 = p1*dd;
364  q2 = p2*dd;
365  ell[3] = q1;
366  ell[3+nskip1] = q2;
367  m11 = p1*q1;
368  m21 = p2*q1;
369  m22 = p2*q2;
370  Z11 += m11;
371  Z21 += m21;
372  Z22 += m22;
373  p1 = ell[4];
374  p2 = ell[4+nskip1];
375  dd = dee[4];
376  q1 = p1*dd;
377  q2 = p2*dd;
378  ell[4] = q1;
379  ell[4+nskip1] = q2;
380  m11 = p1*q1;
381  m21 = p2*q1;
382  m22 = p2*q2;
383  Z11 += m11;
384  Z21 += m21;
385  Z22 += m22;
386  p1 = ell[5];
387  p2 = ell[5+nskip1];
388  dd = dee[5];
389  q1 = p1*dd;
390  q2 = p2*dd;
391  ell[5] = q1;
392  ell[5+nskip1] = q2;
393  m11 = p1*q1;
394  m21 = p2*q1;
395  m22 = p2*q2;
396  Z11 += m11;
397  Z21 += m21;
398  Z22 += m22;
399  ell += 6;
400  dee += 6;
401  }
402  /* compute left-over iterations */
403  j += 6;
404  for (; j > 0; j--) {
405  p1 = ell[0];
406  p2 = ell[nskip1];
407  dd = dee[0];
408  q1 = p1*dd;
409  q2 = p2*dd;
410  ell[0] = q1;
411  ell[nskip1] = q2;
412  m11 = p1*q1;
413  m21 = p2*q1;
414  m22 = p2*q2;
415  Z11 += m11;
416  Z21 += m21;
417  Z22 += m22;
418  ell++;
419  dee++;
420  }
421  /* solve for diagonal 2 x 2 block at A(i,i) */
422  Z11 = ell[0] - Z11;
423  Z21 = ell[nskip1] - Z21;
424  Z22 = ell[1+nskip1] - Z22;
425  dee = d + i;
426  /* factorize 2 x 2 block Z,dee */
427  /* factorize row 1 */
428  dee[0] = btRecip(Z11);
429  /* factorize row 2 */
430  sum = 0;
431  q1 = Z21;
432  q2 = q1 * dee[0];
433  Z21 = q2;
434  sum += q1*q2;
435  dee[1] = btRecip(Z22 - sum);
436  /* done factorizing 2 x 2 block */
437  ell[nskip1] = Z21;
438  }
439  /* compute the (less than 2) rows at the bottom */
440  switch (n-i) {
441  case 0:
442  break;
443 
444  case 1:
445  btSolveL1_1 (A,A+i*nskip1,i,nskip1);
446  /* scale the elements in a 1 x i block at A(i,0), and also */
447  /* compute Z = the outer product matrix that we'll need. */
448  Z11 = 0;
449  ell = A+i*nskip1;
450  dee = d;
451  for (j=i-6; j >= 0; j -= 6) {
452  p1 = ell[0];
453  dd = dee[0];
454  q1 = p1*dd;
455  ell[0] = q1;
456  m11 = p1*q1;
457  Z11 += m11;
458  p1 = ell[1];
459  dd = dee[1];
460  q1 = p1*dd;
461  ell[1] = q1;
462  m11 = p1*q1;
463  Z11 += m11;
464  p1 = ell[2];
465  dd = dee[2];
466  q1 = p1*dd;
467  ell[2] = q1;
468  m11 = p1*q1;
469  Z11 += m11;
470  p1 = ell[3];
471  dd = dee[3];
472  q1 = p1*dd;
473  ell[3] = q1;
474  m11 = p1*q1;
475  Z11 += m11;
476  p1 = ell[4];
477  dd = dee[4];
478  q1 = p1*dd;
479  ell[4] = q1;
480  m11 = p1*q1;
481  Z11 += m11;
482  p1 = ell[5];
483  dd = dee[5];
484  q1 = p1*dd;
485  ell[5] = q1;
486  m11 = p1*q1;
487  Z11 += m11;
488  ell += 6;
489  dee += 6;
490  }
491  /* compute left-over iterations */
492  j += 6;
493  for (; j > 0; j--) {
494  p1 = ell[0];
495  dd = dee[0];
496  q1 = p1*dd;
497  ell[0] = q1;
498  m11 = p1*q1;
499  Z11 += m11;
500  ell++;
501  dee++;
502  }
503  /* solve for diagonal 1 x 1 block at A(i,i) */
504  Z11 = ell[0] - Z11;
505  dee = d + i;
506  /* factorize 1 x 1 block Z,dee */
507  /* factorize row 1 */
508  dee[0] = btRecip(Z11);
509  /* done factorizing 1 x 1 block */
510  break;
511 
512  //default: *((char*)0)=0; /* this should never happen! */
513  }
514 }
515 
516 /* solve L*X=B, with B containing 1 right hand sides.
517  * L is an n*n lower triangular matrix with ones on the diagonal.
518  * L is stored by rows and its leading dimension is lskip.
519  * B is an n*1 matrix that contains the right hand sides.
520  * B is stored by columns and its leading dimension is also lskip.
521  * B is overwritten with X.
522  * this processes blocks of 4*4.
523  * if this is in the factorizer source file, n must be a multiple of 4.
524  */
525 
526 void btSolveL1 (const btScalar *L, btScalar *B, int n, int lskip1)
527 {
528  /* declare variables - Z matrix, p and q vectors, etc */
529  btScalar Z11,Z21,Z31,Z41,p1,q1,p2,p3,p4,*ex;
530  const btScalar *ell;
531  int lskip2,lskip3,i,j;
532  /* compute lskip values */
533  lskip2 = 2*lskip1;
534  lskip3 = 3*lskip1;
535  /* compute all 4 x 1 blocks of X */
536  for (i=0; i <= n-4; i+=4) {
537  /* compute all 4 x 1 block of X, from rows i..i+4-1 */
538  /* set the Z matrix to 0 */
539  Z11=0;
540  Z21=0;
541  Z31=0;
542  Z41=0;
543  ell = L + i*lskip1;
544  ex = B;
545  /* the inner loop that computes outer products and adds them to Z */
546  for (j=i-12; j >= 0; j -= 12) {
547  /* load p and q values */
548  p1=ell[0];
549  q1=ex[0];
550  p2=ell[lskip1];
551  p3=ell[lskip2];
552  p4=ell[lskip3];
553  /* compute outer product and add it to the Z matrix */
554  Z11 += p1 * q1;
555  Z21 += p2 * q1;
556  Z31 += p3 * q1;
557  Z41 += p4 * q1;
558  /* load p and q values */
559  p1=ell[1];
560  q1=ex[1];
561  p2=ell[1+lskip1];
562  p3=ell[1+lskip2];
563  p4=ell[1+lskip3];
564  /* compute outer product and add it to the Z matrix */
565  Z11 += p1 * q1;
566  Z21 += p2 * q1;
567  Z31 += p3 * q1;
568  Z41 += p4 * q1;
569  /* load p and q values */
570  p1=ell[2];
571  q1=ex[2];
572  p2=ell[2+lskip1];
573  p3=ell[2+lskip2];
574  p4=ell[2+lskip3];
575  /* compute outer product and add it to the Z matrix */
576  Z11 += p1 * q1;
577  Z21 += p2 * q1;
578  Z31 += p3 * q1;
579  Z41 += p4 * q1;
580  /* load p and q values */
581  p1=ell[3];
582  q1=ex[3];
583  p2=ell[3+lskip1];
584  p3=ell[3+lskip2];
585  p4=ell[3+lskip3];
586  /* compute outer product and add it to the Z matrix */
587  Z11 += p1 * q1;
588  Z21 += p2 * q1;
589  Z31 += p3 * q1;
590  Z41 += p4 * q1;
591  /* load p and q values */
592  p1=ell[4];
593  q1=ex[4];
594  p2=ell[4+lskip1];
595  p3=ell[4+lskip2];
596  p4=ell[4+lskip3];
597  /* compute outer product and add it to the Z matrix */
598  Z11 += p1 * q1;
599  Z21 += p2 * q1;
600  Z31 += p3 * q1;
601  Z41 += p4 * q1;
602  /* load p and q values */
603  p1=ell[5];
604  q1=ex[5];
605  p2=ell[5+lskip1];
606  p3=ell[5+lskip2];
607  p4=ell[5+lskip3];
608  /* compute outer product and add it to the Z matrix */
609  Z11 += p1 * q1;
610  Z21 += p2 * q1;
611  Z31 += p3 * q1;
612  Z41 += p4 * q1;
613  /* load p and q values */
614  p1=ell[6];
615  q1=ex[6];
616  p2=ell[6+lskip1];
617  p3=ell[6+lskip2];
618  p4=ell[6+lskip3];
619  /* compute outer product and add it to the Z matrix */
620  Z11 += p1 * q1;
621  Z21 += p2 * q1;
622  Z31 += p3 * q1;
623  Z41 += p4 * q1;
624  /* load p and q values */
625  p1=ell[7];
626  q1=ex[7];
627  p2=ell[7+lskip1];
628  p3=ell[7+lskip2];
629  p4=ell[7+lskip3];
630  /* compute outer product and add it to the Z matrix */
631  Z11 += p1 * q1;
632  Z21 += p2 * q1;
633  Z31 += p3 * q1;
634  Z41 += p4 * q1;
635  /* load p and q values */
636  p1=ell[8];
637  q1=ex[8];
638  p2=ell[8+lskip1];
639  p3=ell[8+lskip2];
640  p4=ell[8+lskip3];
641  /* compute outer product and add it to the Z matrix */
642  Z11 += p1 * q1;
643  Z21 += p2 * q1;
644  Z31 += p3 * q1;
645  Z41 += p4 * q1;
646  /* load p and q values */
647  p1=ell[9];
648  q1=ex[9];
649  p2=ell[9+lskip1];
650  p3=ell[9+lskip2];
651  p4=ell[9+lskip3];
652  /* compute outer product and add it to the Z matrix */
653  Z11 += p1 * q1;
654  Z21 += p2 * q1;
655  Z31 += p3 * q1;
656  Z41 += p4 * q1;
657  /* load p and q values */
658  p1=ell[10];
659  q1=ex[10];
660  p2=ell[10+lskip1];
661  p3=ell[10+lskip2];
662  p4=ell[10+lskip3];
663  /* compute outer product and add it to the Z matrix */
664  Z11 += p1 * q1;
665  Z21 += p2 * q1;
666  Z31 += p3 * q1;
667  Z41 += p4 * q1;
668  /* load p and q values */
669  p1=ell[11];
670  q1=ex[11];
671  p2=ell[11+lskip1];
672  p3=ell[11+lskip2];
673  p4=ell[11+lskip3];
674  /* compute outer product and add it to the Z matrix */
675  Z11 += p1 * q1;
676  Z21 += p2 * q1;
677  Z31 += p3 * q1;
678  Z41 += p4 * q1;
679  /* advance pointers */
680  ell += 12;
681  ex += 12;
682  /* end of inner loop */
683  }
684  /* compute left-over iterations */
685  j += 12;
686  for (; j > 0; j--) {
687  /* load p and q values */
688  p1=ell[0];
689  q1=ex[0];
690  p2=ell[lskip1];
691  p3=ell[lskip2];
692  p4=ell[lskip3];
693  /* compute outer product and add it to the Z matrix */
694  Z11 += p1 * q1;
695  Z21 += p2 * q1;
696  Z31 += p3 * q1;
697  Z41 += p4 * q1;
698  /* advance pointers */
699  ell += 1;
700  ex += 1;
701  }
702  /* finish computing the X(i) block */
703  Z11 = ex[0] - Z11;
704  ex[0] = Z11;
705  p1 = ell[lskip1];
706  Z21 = ex[1] - Z21 - p1*Z11;
707  ex[1] = Z21;
708  p1 = ell[lskip2];
709  p2 = ell[1+lskip2];
710  Z31 = ex[2] - Z31 - p1*Z11 - p2*Z21;
711  ex[2] = Z31;
712  p1 = ell[lskip3];
713  p2 = ell[1+lskip3];
714  p3 = ell[2+lskip3];
715  Z41 = ex[3] - Z41 - p1*Z11 - p2*Z21 - p3*Z31;
716  ex[3] = Z41;
717  /* end of outer loop */
718  }
719  /* compute rows at end that are not a multiple of block size */
720  for (; i < n; i++) {
721  /* compute all 1 x 1 block of X, from rows i..i+1-1 */
722  /* set the Z matrix to 0 */
723  Z11=0;
724  ell = L + i*lskip1;
725  ex = B;
726  /* the inner loop that computes outer products and adds them to Z */
727  for (j=i-12; j >= 0; j -= 12) {
728  /* load p and q values */
729  p1=ell[0];
730  q1=ex[0];
731  /* compute outer product and add it to the Z matrix */
732  Z11 += p1 * q1;
733  /* load p and q values */
734  p1=ell[1];
735  q1=ex[1];
736  /* compute outer product and add it to the Z matrix */
737  Z11 += p1 * q1;
738  /* load p and q values */
739  p1=ell[2];
740  q1=ex[2];
741  /* compute outer product and add it to the Z matrix */
742  Z11 += p1 * q1;
743  /* load p and q values */
744  p1=ell[3];
745  q1=ex[3];
746  /* compute outer product and add it to the Z matrix */
747  Z11 += p1 * q1;
748  /* load p and q values */
749  p1=ell[4];
750  q1=ex[4];
751  /* compute outer product and add it to the Z matrix */
752  Z11 += p1 * q1;
753  /* load p and q values */
754  p1=ell[5];
755  q1=ex[5];
756  /* compute outer product and add it to the Z matrix */
757  Z11 += p1 * q1;
758  /* load p and q values */
759  p1=ell[6];
760  q1=ex[6];
761  /* compute outer product and add it to the Z matrix */
762  Z11 += p1 * q1;
763  /* load p and q values */
764  p1=ell[7];
765  q1=ex[7];
766  /* compute outer product and add it to the Z matrix */
767  Z11 += p1 * q1;
768  /* load p and q values */
769  p1=ell[8];
770  q1=ex[8];
771  /* compute outer product and add it to the Z matrix */
772  Z11 += p1 * q1;
773  /* load p and q values */
774  p1=ell[9];
775  q1=ex[9];
776  /* compute outer product and add it to the Z matrix */
777  Z11 += p1 * q1;
778  /* load p and q values */
779  p1=ell[10];
780  q1=ex[10];
781  /* compute outer product and add it to the Z matrix */
782  Z11 += p1 * q1;
783  /* load p and q values */
784  p1=ell[11];
785  q1=ex[11];
786  /* compute outer product and add it to the Z matrix */
787  Z11 += p1 * q1;
788  /* advance pointers */
789  ell += 12;
790  ex += 12;
791  /* end of inner loop */
792  }
793  /* compute left-over iterations */
794  j += 12;
795  for (; j > 0; j--) {
796  /* load p and q values */
797  p1=ell[0];
798  q1=ex[0];
799  /* compute outer product and add it to the Z matrix */
800  Z11 += p1 * q1;
801  /* advance pointers */
802  ell += 1;
803  ex += 1;
804  }
805  /* finish computing the X(i) block */
806  Z11 = ex[0] - Z11;
807  ex[0] = Z11;
808  }
809 }
810 
811 /* solve L^T * x=b, with b containing 1 right hand side.
812  * L is an n*n lower triangular matrix with ones on the diagonal.
813  * L is stored by rows and its leading dimension is lskip.
814  * b is an n*1 matrix that contains the right hand side.
815  * b is overwritten with x.
816  * this processes blocks of 4.
817  */
818 
819 void btSolveL1T (const btScalar *L, btScalar *B, int n, int lskip1)
820 {
821  /* declare variables - Z matrix, p and q vectors, etc */
822  btScalar Z11,m11,Z21,m21,Z31,m31,Z41,m41,p1,q1,p2,p3,p4,*ex;
823  const btScalar *ell;
824  int lskip2,lskip3,i,j;
825  /* special handling for L and B because we're solving L1 *transpose* */
826  L = L + (n-1)*(lskip1+1);
827  B = B + n-1;
828  lskip1 = -lskip1;
829  /* compute lskip values */
830  lskip2 = 2*lskip1;
831  lskip3 = 3*lskip1;
832  /* compute all 4 x 1 blocks of X */
833  for (i=0; i <= n-4; i+=4) {
834  /* compute all 4 x 1 block of X, from rows i..i+4-1 */
835  /* set the Z matrix to 0 */
836  Z11=0;
837  Z21=0;
838  Z31=0;
839  Z41=0;
840  ell = L - i;
841  ex = B;
842  /* the inner loop that computes outer products and adds them to Z */
843  for (j=i-4; j >= 0; j -= 4) {
844  /* load p and q values */
845  p1=ell[0];
846  q1=ex[0];
847  p2=ell[-1];
848  p3=ell[-2];
849  p4=ell[-3];
850  /* compute outer product and add it to the Z matrix */
851  m11 = p1 * q1;
852  m21 = p2 * q1;
853  m31 = p3 * q1;
854  m41 = p4 * q1;
855  ell += lskip1;
856  Z11 += m11;
857  Z21 += m21;
858  Z31 += m31;
859  Z41 += m41;
860  /* load p and q values */
861  p1=ell[0];
862  q1=ex[-1];
863  p2=ell[-1];
864  p3=ell[-2];
865  p4=ell[-3];
866  /* compute outer product and add it to the Z matrix */
867  m11 = p1 * q1;
868  m21 = p2 * q1;
869  m31 = p3 * q1;
870  m41 = p4 * q1;
871  ell += lskip1;
872  Z11 += m11;
873  Z21 += m21;
874  Z31 += m31;
875  Z41 += m41;
876  /* load p and q values */
877  p1=ell[0];
878  q1=ex[-2];
879  p2=ell[-1];
880  p3=ell[-2];
881  p4=ell[-3];
882  /* compute outer product and add it to the Z matrix */
883  m11 = p1 * q1;
884  m21 = p2 * q1;
885  m31 = p3 * q1;
886  m41 = p4 * q1;
887  ell += lskip1;
888  Z11 += m11;
889  Z21 += m21;
890  Z31 += m31;
891  Z41 += m41;
892  /* load p and q values */
893  p1=ell[0];
894  q1=ex[-3];
895  p2=ell[-1];
896  p3=ell[-2];
897  p4=ell[-3];
898  /* compute outer product and add it to the Z matrix */
899  m11 = p1 * q1;
900  m21 = p2 * q1;
901  m31 = p3 * q1;
902  m41 = p4 * q1;
903  ell += lskip1;
904  ex -= 4;
905  Z11 += m11;
906  Z21 += m21;
907  Z31 += m31;
908  Z41 += m41;
909  /* end of inner loop */
910  }
911  /* compute left-over iterations */
912  j += 4;
913  for (; j > 0; j--) {
914  /* load p and q values */
915  p1=ell[0];
916  q1=ex[0];
917  p2=ell[-1];
918  p3=ell[-2];
919  p4=ell[-3];
920  /* compute outer product and add it to the Z matrix */
921  m11 = p1 * q1;
922  m21 = p2 * q1;
923  m31 = p3 * q1;
924  m41 = p4 * q1;
925  ell += lskip1;
926  ex -= 1;
927  Z11 += m11;
928  Z21 += m21;
929  Z31 += m31;
930  Z41 += m41;
931  }
932  /* finish computing the X(i) block */
933  Z11 = ex[0] - Z11;
934  ex[0] = Z11;
935  p1 = ell[-1];
936  Z21 = ex[-1] - Z21 - p1*Z11;
937  ex[-1] = Z21;
938  p1 = ell[-2];
939  p2 = ell[-2+lskip1];
940  Z31 = ex[-2] - Z31 - p1*Z11 - p2*Z21;
941  ex[-2] = Z31;
942  p1 = ell[-3];
943  p2 = ell[-3+lskip1];
944  p3 = ell[-3+lskip2];
945  Z41 = ex[-3] - Z41 - p1*Z11 - p2*Z21 - p3*Z31;
946  ex[-3] = Z41;
947  /* end of outer loop */
948  }
949  /* compute rows at end that are not a multiple of block size */
950  for (; i < n; i++) {
951  /* compute all 1 x 1 block of X, from rows i..i+1-1 */
952  /* set the Z matrix to 0 */
953  Z11=0;
954  ell = L - i;
955  ex = B;
956  /* the inner loop that computes outer products and adds them to Z */
957  for (j=i-4; j >= 0; j -= 4) {
958  /* load p and q values */
959  p1=ell[0];
960  q1=ex[0];
961  /* compute outer product and add it to the Z matrix */
962  m11 = p1 * q1;
963  ell += lskip1;
964  Z11 += m11;
965  /* load p and q values */
966  p1=ell[0];
967  q1=ex[-1];
968  /* compute outer product and add it to the Z matrix */
969  m11 = p1 * q1;
970  ell += lskip1;
971  Z11 += m11;
972  /* load p and q values */
973  p1=ell[0];
974  q1=ex[-2];
975  /* compute outer product and add it to the Z matrix */
976  m11 = p1 * q1;
977  ell += lskip1;
978  Z11 += m11;
979  /* load p and q values */
980  p1=ell[0];
981  q1=ex[-3];
982  /* compute outer product and add it to the Z matrix */
983  m11 = p1 * q1;
984  ell += lskip1;
985  ex -= 4;
986  Z11 += m11;
987  /* end of inner loop */
988  }
989  /* compute left-over iterations */
990  j += 4;
991  for (; j > 0; j--) {
992  /* load p and q values */
993  p1=ell[0];
994  q1=ex[0];
995  /* compute outer product and add it to the Z matrix */
996  m11 = p1 * q1;
997  ell += lskip1;
998  ex -= 1;
999  Z11 += m11;
1000  }
1001  /* finish computing the X(i) block */
1002  Z11 = ex[0] - Z11;
1003  ex[0] = Z11;
1004  }
1005 }
1006 
1007 
1008 
1009 void btVectorScale (btScalar *a, const btScalar *d, int n)
1010 {
1011  btAssert (a && d && n >= 0);
1012  for (int i=0; i<n; i++) {
1013  a[i] *= d[i];
1014  }
1015 }
1016 
1017 void btSolveLDLT (const btScalar *L, const btScalar *d, btScalar *b, int n, int nskip)
1018 {
1019  btAssert (L && d && b && n > 0 && nskip >= n);
1020  btSolveL1 (L,b,n,nskip);
1021  btVectorScale (b,d,n);
1022  btSolveL1T (L,b,n,nskip);
1023 }
1024 
1025 
1026 
1027 //***************************************************************************
1028 
1029 // swap row/column i1 with i2 in the n*n matrix A. the leading dimension of
1030 // A is nskip. this only references and swaps the lower triangle.
1031 // if `do_fast_row_swaps' is nonzero and row pointers are being used, then
1032 // rows will be swapped by exchanging row pointers. otherwise the data will
1033 // be copied.
1034 
1035 static void btSwapRowsAndCols (BTATYPE A, int n, int i1, int i2, int nskip,
1036  int do_fast_row_swaps)
1037 {
1038  btAssert (A && n > 0 && i1 >= 0 && i2 >= 0 && i1 < n && i2 < n &&
1039  nskip >= n && i1 < i2);
1040 
1041 # ifdef BTROWPTRS
1042  btScalar *A_i1 = A[i1];
1043  btScalar *A_i2 = A[i2];
1044  for (int i=i1+1; i<i2; ++i) {
1045  btScalar *A_i_i1 = A[i] + i1;
1046  A_i1[i] = *A_i_i1;
1047  *A_i_i1 = A_i2[i];
1048  }
1049  A_i1[i2] = A_i1[i1];
1050  A_i1[i1] = A_i2[i1];
1051  A_i2[i1] = A_i2[i2];
1052  // swap rows, by swapping row pointers
1053  if (do_fast_row_swaps) {
1054  A[i1] = A_i2;
1055  A[i2] = A_i1;
1056  }
1057  else {
1058  // Only swap till i2 column to match A plain storage variant.
1059  for (int k = 0; k <= i2; ++k) {
1060  btScalar tmp = A_i1[k];
1061  A_i1[k] = A_i2[k];
1062  A_i2[k] = tmp;
1063  }
1064  }
1065  // swap columns the hard way
1066  for (int j=i2+1; j<n; ++j) {
1067  btScalar *A_j = A[j];
1068  btScalar tmp = A_j[i1];
1069  A_j[i1] = A_j[i2];
1070  A_j[i2] = tmp;
1071  }
1072 # else
1073  btScalar *A_i1 = A+i1*nskip;
1074  btScalar *A_i2 = A+i2*nskip;
1075  for (int k = 0; k < i1; ++k) {
1076  btScalar tmp = A_i1[k];
1077  A_i1[k] = A_i2[k];
1078  A_i2[k] = tmp;
1079  }
1080  btScalar *A_i = A_i1 + nskip;
1081  for (int i=i1+1; i<i2; A_i+=nskip, ++i) {
1082  btScalar tmp = A_i2[i];
1083  A_i2[i] = A_i[i1];
1084  A_i[i1] = tmp;
1085  }
1086  {
1087  btScalar tmp = A_i1[i1];
1088  A_i1[i1] = A_i2[i2];
1089  A_i2[i2] = tmp;
1090  }
1091  btScalar *A_j = A_i2 + nskip;
1092  for (int j=i2+1; j<n; A_j+=nskip, ++j) {
1093  btScalar tmp = A_j[i1];
1094  A_j[i1] = A_j[i2];
1095  A_j[i2] = tmp;
1096  }
1097 # endif
1098 }
1099 
1100 
1101 // swap two indexes in the n*n LCP problem. i1 must be <= i2.
1102 
1103 static void btSwapProblem (BTATYPE A, btScalar *x, btScalar *b, btScalar *w, btScalar *lo,
1104  btScalar *hi, int *p, bool *state, int *findex,
1105  int n, int i1, int i2, int nskip,
1106  int do_fast_row_swaps)
1107 {
1108  btScalar tmpr;
1109  int tmpi;
1110  bool tmpb;
1111  btAssert (n>0 && i1 >=0 && i2 >= 0 && i1 < n && i2 < n && nskip >= n && i1 <= i2);
1112  if (i1==i2) return;
1113 
1114  btSwapRowsAndCols (A,n,i1,i2,nskip,do_fast_row_swaps);
1115 
1116  tmpr = x[i1];
1117  x[i1] = x[i2];
1118  x[i2] = tmpr;
1119 
1120  tmpr = b[i1];
1121  b[i1] = b[i2];
1122  b[i2] = tmpr;
1123 
1124  tmpr = w[i1];
1125  w[i1] = w[i2];
1126  w[i2] = tmpr;
1127 
1128  tmpr = lo[i1];
1129  lo[i1] = lo[i2];
1130  lo[i2] = tmpr;
1131 
1132  tmpr = hi[i1];
1133  hi[i1] = hi[i2];
1134  hi[i2] = tmpr;
1135 
1136  tmpi = p[i1];
1137  p[i1] = p[i2];
1138  p[i2] = tmpi;
1139 
1140  tmpb = state[i1];
1141  state[i1] = state[i2];
1142  state[i2] = tmpb;
1143 
1144  if (findex) {
1145  tmpi = findex[i1];
1146  findex[i1] = findex[i2];
1147  findex[i2] = tmpi;
1148  }
1149 }
1150 
1151 
1152 
1153 
1154 //***************************************************************************
1155 // btLCP manipulator object. this represents an n*n LCP problem.
1156 //
1157 // two index sets C and N are kept. each set holds a subset of
1158 // the variable indexes 0..n-1. an index can only be in one set.
1159 // initially both sets are empty.
1160 //
1161 // the index set C is special: solutions to A(C,C)\A(C,i) can be generated.
1162 
1163 //***************************************************************************
1164 // fast implementation of btLCP. see the above definition of btLCP for
1165 // interface comments.
1166 //
1167 // `p' records the permutation of A,x,b,w,etc. p is initially 1:n and is
1168 // permuted as the other vectors/matrices are permuted.
1169 //
1170 // A,x,b,w,lo,hi,state,findex,p,c are permuted such that sets C,N have
1171 // contiguous indexes. the don't-care indexes follow N.
1172 //
1173 // an L*D*L' factorization is maintained of A(C,C), and whenever indexes are
1174 // added or removed from the set C the factorization is updated.
1175 // thus L*D*L'=A[C,C], i.e. a permuted top left nC*nC submatrix of A.
1176 // the leading dimension of the matrix L is always `nskip'.
1177 //
1178 // at the start there may be other indexes that are unbounded but are not
1179 // included in `nub'. btLCP will permute the matrix so that absolutely all
1180 // unbounded vectors are at the start. thus there may be some initial
1181 // permutation.
1182 //
1183 // the algorithms here assume certain patterns, particularly with respect to
1184 // index transfer.
1185 
1186 #ifdef btLCP_FAST
1187 
1188 struct btLCP
1189 {
1190  const int m_n;
1191  const int m_nskip;
1192  int m_nub;
1193  int m_nC, m_nN; // size of each index set
1194  BTATYPE const m_A; // A rows
1195  btScalar *const m_x, * const m_b, *const m_w, *const m_lo,* const m_hi; // permuted LCP problem data
1196  btScalar *const m_L, *const m_d; // L*D*L' factorization of set C
1197  btScalar *const m_Dell, *const m_ell, *const m_tmp;
1198  bool *const m_state;
1199  int *const m_findex, *const m_p, *const m_C;
1200 
1201  btLCP (int _n, int _nskip, int _nub, btScalar *_Adata, btScalar *_x, btScalar *_b, btScalar *_w,
1202  btScalar *_lo, btScalar *_hi, btScalar *_L, btScalar *_d,
1203  btScalar *_Dell, btScalar *_ell, btScalar *_tmp,
1204  bool *_state, int *_findex, int *_p, int *_C, btScalar **Arows);
1205  int getNub() const { return m_nub; }
1206  void transfer_i_to_C (int i);
1207  void transfer_i_to_N (int i) { m_nN++; } // because we can assume C and N span 1:i-1
1208  void transfer_i_from_N_to_C (int i);
1210  int numC() const { return m_nC; }
1211  int numN() const { return m_nN; }
1212  int indexC (int i) const { return i; }
1213  int indexN (int i) const { return i+m_nC; }
1214  btScalar Aii (int i) const { return BTAROW(i)[i]; }
1215  btScalar AiC_times_qC (int i, btScalar *q) const { return btLargeDot (BTAROW(i), q, m_nC); }
1216  btScalar AiN_times_qN (int i, btScalar *q) const { return btLargeDot (BTAROW(i)+m_nC, q+m_nC, m_nN); }
1218  void pN_plusequals_ANi (btScalar *p, int i, int sign=1);
1221  void solve1 (btScalar *a, int i, int dir=1, int only_transfer=0);
1222  void unpermute();
1223 };
1224 
1225 
1226 btLCP::btLCP (int _n, int _nskip, int _nub, btScalar *_Adata, btScalar *_x, btScalar *_b, btScalar *_w,
1227  btScalar *_lo, btScalar *_hi, btScalar *_L, btScalar *_d,
1228  btScalar *_Dell, btScalar *_ell, btScalar *_tmp,
1229  bool *_state, int *_findex, int *_p, int *_C, btScalar **Arows):
1230  m_n(_n), m_nskip(_nskip), m_nub(_nub), m_nC(0), m_nN(0),
1231 # ifdef BTROWPTRS
1232  m_A(Arows),
1233 #else
1234  m_A(_Adata),
1235 #endif
1236  m_x(_x), m_b(_b), m_w(_w), m_lo(_lo), m_hi(_hi),
1237  m_L(_L), m_d(_d), m_Dell(_Dell), m_ell(_ell), m_tmp(_tmp),
1238  m_state(_state), m_findex(_findex), m_p(_p), m_C(_C)
1239 {
1240  {
1241  btSetZero (m_x,m_n);
1242  }
1243 
1244  {
1245 # ifdef BTROWPTRS
1246  // make matrix row pointers
1247  btScalar *aptr = _Adata;
1248  BTATYPE A = m_A;
1249  const int n = m_n, nskip = m_nskip;
1250  for (int k=0; k<n; aptr+=nskip, ++k) A[k] = aptr;
1251 # endif
1252  }
1253 
1254  {
1255  int *p = m_p;
1256  const int n = m_n;
1257  for (int k=0; k<n; ++k) p[k]=k; // initially unpermuted
1258  }
1259 
1260  /*
1261  // for testing, we can do some random swaps in the area i > nub
1262  {
1263  const int n = m_n;
1264  const int nub = m_nub;
1265  if (nub < n) {
1266  for (int k=0; k<100; k++) {
1267  int i1,i2;
1268  do {
1269  i1 = dRandInt(n-nub)+nub;
1270  i2 = dRandInt(n-nub)+nub;
1271  }
1272  while (i1 > i2);
1273  //printf ("--> %d %d\n",i1,i2);
1274  btSwapProblem (m_A,m_x,m_b,m_w,m_lo,m_hi,m_p,m_state,m_findex,n,i1,i2,m_nskip,0);
1275  }
1276  }
1277  */
1278 
1279  // permute the problem so that *all* the unbounded variables are at the
1280  // start, i.e. look for unbounded variables not included in `nub'. we can
1281  // potentially push up `nub' this way and get a bigger initial factorization.
1282  // note that when we swap rows/cols here we must not just swap row pointers,
1283  // as the initial factorization relies on the data being all in one chunk.
1284  // variables that have findex >= 0 are *not* considered to be unbounded even
1285  // if lo=-inf and hi=inf - this is because these limits may change during the
1286  // solution process.
1287 
1288  {
1289  int *findex = m_findex;
1290  btScalar *lo = m_lo, *hi = m_hi;
1291  const int n = m_n;
1292  for (int k = m_nub; k<n; ++k) {
1293  if (findex && findex[k] >= 0) continue;
1294  if (lo[k]==-BT_INFINITY && hi[k]==BT_INFINITY) {
1295  btSwapProblem (m_A,m_x,m_b,m_w,lo,hi,m_p,m_state,findex,n,m_nub,k,m_nskip,0);
1296  m_nub++;
1297  }
1298  }
1299  }
1300 
1301  // if there are unbounded variables at the start, factorize A up to that
1302  // point and solve for x. this puts all indexes 0..nub-1 into C.
1303  if (m_nub > 0) {
1304  const int nub = m_nub;
1305  {
1306  btScalar *Lrow = m_L;
1307  const int nskip = m_nskip;
1308  for (int j=0; j<nub; Lrow+=nskip, ++j) memcpy(Lrow,BTAROW(j),(j+1)*sizeof(btScalar));
1309  }
1310  btFactorLDLT (m_L,m_d,nub,m_nskip);
1311  memcpy (m_x,m_b,nub*sizeof(btScalar));
1312  btSolveLDLT (m_L,m_d,m_x,nub,m_nskip);
1313  btSetZero (m_w,nub);
1314  {
1315  int *C = m_C;
1316  for (int k=0; k<nub; ++k) C[k] = k;
1317  }
1318  m_nC = nub;
1319  }
1320 
1321  // permute the indexes > nub such that all findex variables are at the end
1322  if (m_findex) {
1323  const int nub = m_nub;
1324  int *findex = m_findex;
1325  int num_at_end = 0;
1326  for (int k=m_n-1; k >= nub; k--) {
1327  if (findex[k] >= 0) {
1328  btSwapProblem (m_A,m_x,m_b,m_w,m_lo,m_hi,m_p,m_state,findex,m_n,k,m_n-1-num_at_end,m_nskip,1);
1329  num_at_end++;
1330  }
1331  }
1332  }
1333 
1334  // print info about indexes
1335  /*
1336  {
1337  const int n = m_n;
1338  const int nub = m_nub;
1339  for (int k=0; k<n; k++) {
1340  if (k<nub) printf ("C");
1341  else if (m_lo[k]==-BT_INFINITY && m_hi[k]==BT_INFINITY) printf ("c");
1342  else printf (".");
1343  }
1344  printf ("\n");
1345  }
1346  */
1347 }
1348 
1349 
1351 {
1352  {
1353  if (m_nC > 0) {
1354  // ell,Dell were computed by solve1(). note, ell = D \ L1solve (L,A(i,C))
1355  {
1356  const int nC = m_nC;
1357  btScalar *const Ltgt = m_L + nC*m_nskip, *ell = m_ell;
1358  for (int j=0; j<nC; ++j) Ltgt[j] = ell[j];
1359  }
1360  const int nC = m_nC;
1361  m_d[nC] = btRecip (BTAROW(i)[i] - btLargeDot(m_ell,m_Dell,nC));
1362  }
1363  else {
1364  m_d[0] = btRecip (BTAROW(i)[i]);
1365  }
1366 
1368 
1369  const int nC = m_nC;
1370  m_C[nC] = nC;
1371  m_nC = nC + 1; // nC value is outdated after this line
1372  }
1373 
1374 }
1375 
1376 
1378 {
1379  {
1380  if (m_nC > 0) {
1381  {
1382  btScalar *const aptr = BTAROW(i);
1383  btScalar *Dell = m_Dell;
1384  const int *C = m_C;
1385 # ifdef BTNUB_OPTIMIZATIONS
1386  // if nub>0, initial part of aptr unpermuted
1387  const int nub = m_nub;
1388  int j=0;
1389  for ( ; j<nub; ++j) Dell[j] = aptr[j];
1390  const int nC = m_nC;
1391  for ( ; j<nC; ++j) Dell[j] = aptr[C[j]];
1392 # else
1393  const int nC = m_nC;
1394  for (int j=0; j<nC; ++j) Dell[j] = aptr[C[j]];
1395 # endif
1396  }
1398  {
1399  const int nC = m_nC;
1400  btScalar *const Ltgt = m_L + nC*m_nskip;
1401  btScalar *ell = m_ell, *Dell = m_Dell, *d = m_d;
1402  for (int j=0; j<nC; ++j) Ltgt[j] = ell[j] = Dell[j] * d[j];
1403  }
1404  const int nC = m_nC;
1405  m_d[nC] = btRecip (BTAROW(i)[i] - btLargeDot(m_ell,m_Dell,nC));
1406  }
1407  else {
1408  m_d[0] = btRecip (BTAROW(i)[i]);
1409  }
1410 
1412 
1413  const int nC = m_nC;
1414  m_C[nC] = nC;
1415  m_nN--;
1416  m_nC = nC + 1; // nC value is outdated after this line
1417  }
1418 
1419  // @@@ TO DO LATER
1420  // if we just finish here then we'll go back and re-solve for
1421  // delta_x. but actually we can be more efficient and incrementally
1422  // update delta_x here. but if we do this, we wont have ell and Dell
1423  // to use in updating the factorization later.
1424 
1425 }
1426 
1427 void btRemoveRowCol (btScalar *A, int n, int nskip, int r)
1428 {
1429  btAssert(A && n > 0 && nskip >= n && r >= 0 && r < n);
1430  if (r >= n-1) return;
1431  if (r > 0) {
1432  {
1433  const size_t move_size = (n-r-1)*sizeof(btScalar);
1434  btScalar *Adst = A + r;
1435  for (int i=0; i<r; Adst+=nskip,++i) {
1436  btScalar *Asrc = Adst + 1;
1437  memmove (Adst,Asrc,move_size);
1438  }
1439  }
1440  {
1441  const size_t cpy_size = r*sizeof(btScalar);
1442  btScalar *Adst = A + r * nskip;
1443  for (int i=r; i<(n-1); ++i) {
1444  btScalar *Asrc = Adst + nskip;
1445  memcpy (Adst,Asrc,cpy_size);
1446  Adst = Asrc;
1447  }
1448  }
1449  }
1450  {
1451  const size_t cpy_size = (n-r-1)*sizeof(btScalar);
1452  btScalar *Adst = A + r * (nskip + 1);
1453  for (int i=r; i<(n-1); ++i) {
1454  btScalar *Asrc = Adst + (nskip + 1);
1455  memcpy (Adst,Asrc,cpy_size);
1456  Adst = Asrc - 1;
1457  }
1458  }
1459 }
1460 
1461 
1462 
1463 
1464 void btLDLTAddTL (btScalar *L, btScalar *d, const btScalar *a, int n, int nskip, btAlignedObjectArray<btScalar>& scratch)
1465 {
1466  btAssert (L && d && a && n > 0 && nskip >= n);
1467 
1468  if (n < 2) return;
1469  scratch.resize(2*nskip);
1470  btScalar *W1 = &scratch[0];
1471 
1472  btScalar *W2 = W1 + nskip;
1473 
1474  W1[0] = btScalar(0.0);
1475  W2[0] = btScalar(0.0);
1476  for (int j=1; j<n; ++j) {
1477  W1[j] = W2[j] = (btScalar) (a[j] * SIMDSQRT12);
1478  }
1479  btScalar W11 = (btScalar) ((btScalar(0.5)*a[0]+1)*SIMDSQRT12);
1480  btScalar W21 = (btScalar) ((btScalar(0.5)*a[0]-1)*SIMDSQRT12);
1481 
1482  btScalar alpha1 = btScalar(1.0);
1483  btScalar alpha2 = btScalar(1.0);
1484 
1485  {
1486  btScalar dee = d[0];
1487  btScalar alphanew = alpha1 + (W11*W11)*dee;
1488  btAssert(alphanew != btScalar(0.0));
1489  dee /= alphanew;
1490  btScalar gamma1 = W11 * dee;
1491  dee *= alpha1;
1492  alpha1 = alphanew;
1493  alphanew = alpha2 - (W21*W21)*dee;
1494  dee /= alphanew;
1495  //btScalar gamma2 = W21 * dee;
1496  alpha2 = alphanew;
1497  btScalar k1 = btScalar(1.0) - W21*gamma1;
1498  btScalar k2 = W21*gamma1*W11 - W21;
1499  btScalar *ll = L + nskip;
1500  for (int p=1; p<n; ll+=nskip, ++p) {
1501  btScalar Wp = W1[p];
1502  btScalar ell = *ll;
1503  W1[p] = Wp - W11*ell;
1504  W2[p] = k1*Wp + k2*ell;
1505  }
1506  }
1507 
1508  btScalar *ll = L + (nskip + 1);
1509  for (int j=1; j<n; ll+=nskip+1, ++j) {
1510  btScalar k1 = W1[j];
1511  btScalar k2 = W2[j];
1512 
1513  btScalar dee = d[j];
1514  btScalar alphanew = alpha1 + (k1*k1)*dee;
1515  btAssert(alphanew != btScalar(0.0));
1516  dee /= alphanew;
1517  btScalar gamma1 = k1 * dee;
1518  dee *= alpha1;
1519  alpha1 = alphanew;
1520  alphanew = alpha2 - (k2*k2)*dee;
1521  dee /= alphanew;
1522  btScalar gamma2 = k2 * dee;
1523  dee *= alpha2;
1524  d[j] = dee;
1525  alpha2 = alphanew;
1526 
1527  btScalar *l = ll + nskip;
1528  for (int p=j+1; p<n; l+=nskip, ++p) {
1529  btScalar ell = *l;
1530  btScalar Wp = W1[p] - k1 * ell;
1531  ell += gamma1 * Wp;
1532  W1[p] = Wp;
1533  Wp = W2[p] - k2 * ell;
1534  ell -= gamma2 * Wp;
1535  W2[p] = Wp;
1536  *l = ell;
1537  }
1538  }
1539 }
1540 
1541 
1542 #define _BTGETA(i,j) (A[i][j])
1543 //#define _GETA(i,j) (A[(i)*nskip+(j)])
1544 #define BTGETA(i,j) ((i > j) ? _BTGETA(i,j) : _BTGETA(j,i))
1545 
1546 inline size_t btEstimateLDLTAddTLTmpbufSize(int nskip)
1547 {
1548  return nskip * 2 * sizeof(btScalar);
1549 }
1550 
1551 
1552 void btLDLTRemove (btScalar **A, const int *p, btScalar *L, btScalar *d,
1553  int n1, int n2, int r, int nskip, btAlignedObjectArray<btScalar>& scratch)
1554 {
1555  btAssert(A && p && L && d && n1 > 0 && n2 > 0 && r >= 0 && r < n2 &&
1556  n1 >= n2 && nskip >= n1);
1557  #ifdef BT_DEBUG
1558  for (int i=0; i<n2; ++i)
1559  btAssert(p[i] >= 0 && p[i] < n1);
1560  #endif
1561 
1562  if (r==n2-1) {
1563  return; // deleting last row/col is easy
1564  }
1565  else {
1566  size_t LDLTAddTL_size = btEstimateLDLTAddTLTmpbufSize(nskip);
1567  btAssert(LDLTAddTL_size % sizeof(btScalar) == 0);
1568  scratch.resize(nskip * 2+n2);
1569  btScalar *tmp = &scratch[0];
1570  if (r==0) {
1571  btScalar *a = (btScalar *)((char *)tmp + LDLTAddTL_size);
1572  const int p_0 = p[0];
1573  for (int i=0; i<n2; ++i) {
1574  a[i] = -BTGETA(p[i],p_0);
1575  }
1576  a[0] += btScalar(1.0);
1577  btLDLTAddTL (L,d,a,n2,nskip,scratch);
1578  }
1579  else {
1580  btScalar *t = (btScalar *)((char *)tmp + LDLTAddTL_size);
1581  {
1582  btScalar *Lcurr = L + r*nskip;
1583  for (int i=0; i<r; ++Lcurr, ++i) {
1584  btAssert(d[i] != btScalar(0.0));
1585  t[i] = *Lcurr / d[i];
1586  }
1587  }
1588  btScalar *a = t + r;
1589  {
1590  btScalar *Lcurr = L + r*nskip;
1591  const int *pp_r = p + r, p_r = *pp_r;
1592  const int n2_minus_r = n2-r;
1593  for (int i=0; i<n2_minus_r; Lcurr+=nskip,++i) {
1594  a[i] = btLargeDot(Lcurr,t,r) - BTGETA(pp_r[i],p_r);
1595  }
1596  }
1597  a[0] += btScalar(1.0);
1598  btLDLTAddTL (L + r*nskip+r, d+r, a, n2-r, nskip, scratch);
1599  }
1600  }
1601 
1602  // snip out row/column r from L and d
1603  btRemoveRowCol (L,n2,nskip,r);
1604  if (r < (n2-1)) memmove (d+r,d+r+1,(n2-r-1)*sizeof(btScalar));
1605 }
1606 
1607 
1609 {
1610  {
1611  int *C = m_C;
1612  // remove a row/column from the factorization, and adjust the
1613  // indexes (black magic!)
1614  int last_idx = -1;
1615  const int nC = m_nC;
1616  int j = 0;
1617  for ( ; j<nC; ++j) {
1618  if (C[j]==nC-1) {
1619  last_idx = j;
1620  }
1621  if (C[j]==i) {
1622  btLDLTRemove (m_A,C,m_L,m_d,m_n,nC,j,m_nskip,scratch);
1623  int k;
1624  if (last_idx == -1) {
1625  for (k=j+1 ; k<nC; ++k) {
1626  if (C[k]==nC-1) {
1627  break;
1628  }
1629  }
1630  btAssert (k < nC);
1631  }
1632  else {
1633  k = last_idx;
1634  }
1635  C[k] = C[j];
1636  if (j < (nC-1)) memmove (C+j,C+j+1,(nC-j-1)*sizeof(int));
1637  break;
1638  }
1639  }
1640  btAssert (j < nC);
1641 
1643 
1644  m_nN++;
1645  m_nC = nC - 1; // nC value is outdated after this line
1646  }
1647 
1648 }
1649 
1650 
1652 {
1653  // we could try to make this matrix-vector multiplication faster using
1654  // outer product matrix tricks, e.g. with the dMultidotX() functions.
1655  // but i tried it and it actually made things slower on random 100x100
1656  // problems because of the overhead involved. so we'll stick with the
1657  // simple method for now.
1658  const int nC = m_nC;
1659  btScalar *ptgt = p + nC;
1660  const int nN = m_nN;
1661  for (int i=0; i<nN; ++i) {
1662  ptgt[i] = btLargeDot (BTAROW(i+nC),q,nC);
1663  }
1664 }
1665 
1666 
1667 void btLCP::pN_plusequals_ANi (btScalar *p, int i, int sign)
1668 {
1669  const int nC = m_nC;
1670  btScalar *aptr = BTAROW(i) + nC;
1671  btScalar *ptgt = p + nC;
1672  if (sign > 0) {
1673  const int nN = m_nN;
1674  for (int j=0; j<nN; ++j) ptgt[j] += aptr[j];
1675  }
1676  else {
1677  const int nN = m_nN;
1678  for (int j=0; j<nN; ++j) ptgt[j] -= aptr[j];
1679  }
1680 }
1681 
1683 {
1684  const int nC = m_nC;
1685  for (int i=0; i<nC; ++i) {
1686  p[i] += s*q[i];
1687  }
1688 }
1689 
1691 {
1692  const int nC = m_nC;
1693  btScalar *ptgt = p + nC, *qsrc = q + nC;
1694  const int nN = m_nN;
1695  for (int i=0; i<nN; ++i) {
1696  ptgt[i] += s*qsrc[i];
1697  }
1698 }
1699 
1700 void btLCP::solve1 (btScalar *a, int i, int dir, int only_transfer)
1701 {
1702  // the `Dell' and `ell' that are computed here are saved. if index i is
1703  // later added to the factorization then they can be reused.
1704  //
1705  // @@@ question: do we need to solve for entire delta_x??? yes, but
1706  // only if an x goes below 0 during the step.
1707 
1708  if (m_nC > 0) {
1709  {
1710  btScalar *Dell = m_Dell;
1711  int *C = m_C;
1712  btScalar *aptr = BTAROW(i);
1713 # ifdef BTNUB_OPTIMIZATIONS
1714  // if nub>0, initial part of aptr[] is guaranteed unpermuted
1715  const int nub = m_nub;
1716  int j=0;
1717  for ( ; j<nub; ++j) Dell[j] = aptr[j];
1718  const int nC = m_nC;
1719  for ( ; j<nC; ++j) Dell[j] = aptr[C[j]];
1720 # else
1721  const int nC = m_nC;
1722  for (int j=0; j<nC; ++j) Dell[j] = aptr[C[j]];
1723 # endif
1724  }
1726  {
1727  btScalar *ell = m_ell, *Dell = m_Dell, *d = m_d;
1728  const int nC = m_nC;
1729  for (int j=0; j<nC; ++j) ell[j] = Dell[j] * d[j];
1730  }
1731 
1732  if (!only_transfer) {
1733  btScalar *tmp = m_tmp, *ell = m_ell;
1734  {
1735  const int nC = m_nC;
1736  for (int j=0; j<nC; ++j) tmp[j] = ell[j];
1737  }
1738  btSolveL1T (m_L,tmp,m_nC,m_nskip);
1739  if (dir > 0) {
1740  int *C = m_C;
1741  btScalar *tmp = m_tmp;
1742  const int nC = m_nC;
1743  for (int j=0; j<nC; ++j) a[C[j]] = -tmp[j];
1744  } else {
1745  int *C = m_C;
1746  btScalar *tmp = m_tmp;
1747  const int nC = m_nC;
1748  for (int j=0; j<nC; ++j) a[C[j]] = tmp[j];
1749  }
1750  }
1751  }
1752 }
1753 
1754 
1756 {
1757  // now we have to un-permute x and w
1758  {
1759  memcpy (m_tmp,m_x,m_n*sizeof(btScalar));
1760  btScalar *x = m_x, *tmp = m_tmp;
1761  const int *p = m_p;
1762  const int n = m_n;
1763  for (int j=0; j<n; ++j) x[p[j]] = tmp[j];
1764  }
1765  {
1766  memcpy (m_tmp,m_w,m_n*sizeof(btScalar));
1767  btScalar *w = m_w, *tmp = m_tmp;
1768  const int *p = m_p;
1769  const int n = m_n;
1770  for (int j=0; j<n; ++j) w[p[j]] = tmp[j];
1771  }
1772 }
1773 
1774 #endif // btLCP_FAST
1775 
1776 
1777 //***************************************************************************
1778 // an optimized Dantzig LCP driver routine for the lo-hi LCP problem.
1779 
1781  btScalar* outer_w, int nub, btScalar *lo, btScalar *hi, int *findex, btDantzigScratchMemory& scratchMem)
1782 {
1783  s_error = false;
1784 
1785 // printf("btSolveDantzigLCP n=%d\n",n);
1786  btAssert (n>0 && A && x && b && lo && hi && nub >= 0 && nub <= n);
1787  btAssert(outer_w);
1788 
1789 #ifdef BT_DEBUG
1790  {
1791  // check restrictions on lo and hi
1792  for (int k=0; k<n; ++k)
1793  btAssert (lo[k] <= 0 && hi[k] >= 0);
1794  }
1795 # endif
1796 
1797 
1798  // if all the variables are unbounded then we can just factor, solve,
1799  // and return
1800  if (nub >= n)
1801  {
1802 
1803 
1804  int nskip = (n);
1805  btFactorLDLT (A, outer_w, n, nskip);
1806  btSolveLDLT (A, outer_w, b, n, nskip);
1807  memcpy (x, b, n*sizeof(btScalar));
1808 
1809  return !s_error;
1810  }
1811 
1812  const int nskip = (n);
1813  scratchMem.L.resize(n*nskip);
1814 
1815  scratchMem.d.resize(n);
1816 
1817  btScalar *w = outer_w;
1818  scratchMem.delta_w.resize(n);
1819  scratchMem.delta_x.resize(n);
1820  scratchMem.Dell.resize(n);
1821  scratchMem.ell.resize(n);
1822  scratchMem.Arows.resize(n);
1823  scratchMem.p.resize(n);
1824  scratchMem.C.resize(n);
1825 
1826  // for i in N, state[i] is 0 if x(i)==lo(i) or 1 if x(i)==hi(i)
1827  scratchMem.state.resize(n);
1828 
1829 
1830  // create LCP object. note that tmp is set to delta_w to save space, this
1831  // optimization relies on knowledge of how tmp is used, so be careful!
1832  btLCP lcp(n,nskip,nub,A,x,b,w,lo,hi,&scratchMem.L[0],&scratchMem.d[0],&scratchMem.Dell[0],&scratchMem.ell[0],&scratchMem.delta_w[0],&scratchMem.state[0],findex,&scratchMem.p[0],&scratchMem.C[0],&scratchMem.Arows[0]);
1833  int adj_nub = lcp.getNub();
1834 
1835  // loop over all indexes adj_nub..n-1. for index i, if x(i),w(i) satisfy the
1836  // LCP conditions then i is added to the appropriate index set. otherwise
1837  // x(i),w(i) is driven either +ve or -ve to force it to the valid region.
1838  // as we drive x(i), x(C) is also adjusted to keep w(C) at zero.
1839  // while driving x(i) we maintain the LCP conditions on the other variables
1840  // 0..i-1. we do this by watching out for other x(i),w(i) values going
1841  // outside the valid region, and then switching them between index sets
1842  // when that happens.
1843 
1844  bool hit_first_friction_index = false;
1845  for (int i=adj_nub; i<n; ++i)
1846  {
1847  s_error = false;
1848  // the index i is the driving index and indexes i+1..n-1 are "dont care",
1849  // i.e. when we make changes to the system those x's will be zero and we
1850  // don't care what happens to those w's. in other words, we only consider
1851  // an (i+1)*(i+1) sub-problem of A*x=b+w.
1852 
1853  // if we've hit the first friction index, we have to compute the lo and
1854  // hi values based on the values of x already computed. we have been
1855  // permuting the indexes, so the values stored in the findex vector are
1856  // no longer valid. thus we have to temporarily unpermute the x vector.
1857  // for the purposes of this computation, 0*infinity = 0 ... so if the
1858  // contact constraint's normal force is 0, there should be no tangential
1859  // force applied.
1860 
1861  if (!hit_first_friction_index && findex && findex[i] >= 0) {
1862  // un-permute x into delta_w, which is not being used at the moment
1863  for (int j=0; j<n; ++j) scratchMem.delta_w[scratchMem.p[j]] = x[j];
1864 
1865  // set lo and hi values
1866  for (int k=i; k<n; ++k) {
1867  btScalar wfk = scratchMem.delta_w[findex[k]];
1868  if (wfk == 0) {
1869  hi[k] = 0;
1870  lo[k] = 0;
1871  }
1872  else {
1873  hi[k] = btFabs (hi[k] * wfk);
1874  lo[k] = -hi[k];
1875  }
1876  }
1877  hit_first_friction_index = true;
1878  }
1879 
1880  // thus far we have not even been computing the w values for indexes
1881  // greater than i, so compute w[i] now.
1882  w[i] = lcp.AiC_times_qC (i,x) + lcp.AiN_times_qN (i,x) - b[i];
1883 
1884  // if lo=hi=0 (which can happen for tangential friction when normals are
1885  // 0) then the index will be assigned to set N with some state. however,
1886  // set C's line has zero size, so the index will always remain in set N.
1887  // with the "normal" switching logic, if w changed sign then the index
1888  // would have to switch to set C and then back to set N with an inverted
1889  // state. this is pointless, and also computationally expensive. to
1890  // prevent this from happening, we use the rule that indexes with lo=hi=0
1891  // will never be checked for set changes. this means that the state for
1892  // these indexes may be incorrect, but that doesn't matter.
1893 
1894  // see if x(i),w(i) is in a valid region
1895  if (lo[i]==0 && w[i] >= 0) {
1896  lcp.transfer_i_to_N (i);
1897  scratchMem.state[i] = false;
1898  }
1899  else if (hi[i]==0 && w[i] <= 0) {
1900  lcp.transfer_i_to_N (i);
1901  scratchMem.state[i] = true;
1902  }
1903  else if (w[i]==0) {
1904  // this is a degenerate case. by the time we get to this test we know
1905  // that lo != 0, which means that lo < 0 as lo is not allowed to be +ve,
1906  // and similarly that hi > 0. this means that the line segment
1907  // corresponding to set C is at least finite in extent, and we are on it.
1908  // NOTE: we must call lcp.solve1() before lcp.transfer_i_to_C()
1909  lcp.solve1 (&scratchMem.delta_x[0],i,0,1);
1910 
1911  lcp.transfer_i_to_C (i);
1912  }
1913  else {
1914  // we must push x(i) and w(i)
1915  for (;;) {
1916  int dir;
1917  btScalar dirf;
1918  // find direction to push on x(i)
1919  if (w[i] <= 0) {
1920  dir = 1;
1921  dirf = btScalar(1.0);
1922  }
1923  else {
1924  dir = -1;
1925  dirf = btScalar(-1.0);
1926  }
1927 
1928  // compute: delta_x(C) = -dir*A(C,C)\A(C,i)
1929  lcp.solve1 (&scratchMem.delta_x[0],i,dir);
1930 
1931  // note that delta_x[i] = dirf, but we wont bother to set it
1932 
1933  // compute: delta_w = A*delta_x ... note we only care about
1934  // delta_w(N) and delta_w(i), the rest is ignored
1935  lcp.pN_equals_ANC_times_qC (&scratchMem.delta_w[0],&scratchMem.delta_x[0]);
1936  lcp.pN_plusequals_ANi (&scratchMem.delta_w[0],i,dir);
1937  scratchMem.delta_w[i] = lcp.AiC_times_qC (i,&scratchMem.delta_x[0]) + lcp.Aii(i)*dirf;
1938 
1939  // find largest step we can take (size=s), either to drive x(i),w(i)
1940  // to the valid LCP region or to drive an already-valid variable
1941  // outside the valid region.
1942 
1943  int cmd = 1; // index switching command
1944  int si = 0; // si = index to switch if cmd>3
1945  btScalar s = -w[i]/scratchMem.delta_w[i];
1946  if (dir > 0) {
1947  if (hi[i] < BT_INFINITY) {
1948  btScalar s2 = (hi[i]-x[i])*dirf; // was (hi[i]-x[i])/dirf // step to x(i)=hi(i)
1949  if (s2 < s) {
1950  s = s2;
1951  cmd = 3;
1952  }
1953  }
1954  }
1955  else {
1956  if (lo[i] > -BT_INFINITY) {
1957  btScalar s2 = (lo[i]-x[i])*dirf; // was (lo[i]-x[i])/dirf // step to x(i)=lo(i)
1958  if (s2 < s) {
1959  s = s2;
1960  cmd = 2;
1961  }
1962  }
1963  }
1964 
1965  {
1966  const int numN = lcp.numN();
1967  for (int k=0; k < numN; ++k) {
1968  const int indexN_k = lcp.indexN(k);
1969  if (!scratchMem.state[indexN_k] ? scratchMem.delta_w[indexN_k] < 0 : scratchMem.delta_w[indexN_k] > 0) {
1970  // don't bother checking if lo=hi=0
1971  if (lo[indexN_k] == 0 && hi[indexN_k] == 0) continue;
1972  btScalar s2 = -w[indexN_k] / scratchMem.delta_w[indexN_k];
1973  if (s2 < s) {
1974  s = s2;
1975  cmd = 4;
1976  si = indexN_k;
1977  }
1978  }
1979  }
1980  }
1981 
1982  {
1983  const int numC = lcp.numC();
1984  for (int k=adj_nub; k < numC; ++k) {
1985  const int indexC_k = lcp.indexC(k);
1986  if (scratchMem.delta_x[indexC_k] < 0 && lo[indexC_k] > -BT_INFINITY) {
1987  btScalar s2 = (lo[indexC_k]-x[indexC_k]) / scratchMem.delta_x[indexC_k];
1988  if (s2 < s) {
1989  s = s2;
1990  cmd = 5;
1991  si = indexC_k;
1992  }
1993  }
1994  if (scratchMem.delta_x[indexC_k] > 0 && hi[indexC_k] < BT_INFINITY) {
1995  btScalar s2 = (hi[indexC_k]-x[indexC_k]) / scratchMem.delta_x[indexC_k];
1996  if (s2 < s) {
1997  s = s2;
1998  cmd = 6;
1999  si = indexC_k;
2000  }
2001  }
2002  }
2003  }
2004 
2005  //static char* cmdstring[8] = {0,"->C","->NL","->NH","N->C",
2006  // "C->NL","C->NH"};
2007  //printf ("cmd=%d (%s), si=%d\n",cmd,cmdstring[cmd],(cmd>3) ? si : i);
2008 
2009  // if s <= 0 then we've got a problem. if we just keep going then
2010  // we're going to get stuck in an infinite loop. instead, just cross
2011  // our fingers and exit with the current solution.
2012  if (s <= btScalar(0.0))
2013  {
2014 // printf("LCP internal error, s <= 0 (s=%.4e)",(double)s);
2015  if (i < n) {
2016  btSetZero (x+i,n-i);
2017  btSetZero (w+i,n-i);
2018  }
2019  s_error = true;
2020  break;
2021  }
2022 
2023  // apply x = x + s * delta_x
2024  lcp.pC_plusequals_s_times_qC (x, s, &scratchMem.delta_x[0]);
2025  x[i] += s * dirf;
2026 
2027  // apply w = w + s * delta_w
2028  lcp.pN_plusequals_s_times_qN (w, s, &scratchMem.delta_w[0]);
2029  w[i] += s * scratchMem.delta_w[i];
2030 
2031 // void *tmpbuf;
2032  // switch indexes between sets if necessary
2033  switch (cmd) {
2034  case 1: // done
2035  w[i] = 0;
2036  lcp.transfer_i_to_C (i);
2037  break;
2038  case 2: // done
2039  x[i] = lo[i];
2040  scratchMem.state[i] = false;
2041  lcp.transfer_i_to_N (i);
2042  break;
2043  case 3: // done
2044  x[i] = hi[i];
2045  scratchMem.state[i] = true;
2046  lcp.transfer_i_to_N (i);
2047  break;
2048  case 4: // keep going
2049  w[si] = 0;
2050  lcp.transfer_i_from_N_to_C (si);
2051  break;
2052  case 5: // keep going
2053  x[si] = lo[si];
2054  scratchMem.state[si] = false;
2055  lcp.transfer_i_from_C_to_N (si, scratchMem.m_scratch);
2056  break;
2057  case 6: // keep going
2058  x[si] = hi[si];
2059  scratchMem.state[si] = true;
2060  lcp.transfer_i_from_C_to_N (si, scratchMem.m_scratch);
2061  break;
2062  }
2063 
2064  if (cmd <= 3) break;
2065  } // for (;;)
2066  } // else
2067 
2068  if (s_error)
2069  {
2070  break;
2071  }
2072  } // for (int i=adj_nub; i<n; ++i)
2073 
2074  lcp.unpermute();
2075 
2076 
2077  return !s_error;
2078 }
2079 
static T sum(const btAlignedObjectArray< T > &items)
void btRemoveRowCol(btScalar *A, int n, int nskip, int r)
void btLDLTAddTL(btScalar *L, btScalar *d, const btScalar *a, int n, int nskip, btAlignedObjectArray< btScalar > &scratch)
void btFactorLDLT(btScalar *A, btScalar *d, int n, int nskip1)
btLCP(int _n, int _nskip, int _nub, btScalar *_Adata, btScalar *_x, btScalar *_b, btScalar *_w, btScalar *_lo, btScalar *_hi, btScalar *_L, btScalar *_d, btScalar *_Dell, btScalar *_ell, btScalar *_tmp, bool *_state, int *_findex, int *_p, int *_C, btScalar **Arows)
#define BTGETA(i, j)
btAlignedObjectArray< btScalar * > Arows
Definition: btDantzigLCP.h:65
int *const m_findex
void pN_plusequals_ANi(btScalar *p, int i, int sign=1)
#define btRecip(x)
Definition: btScalar.h:442
btAlignedObjectArray< btScalar > delta_w
Definition: btDantzigLCP.h:61
#define btAssert(x)
Definition: btScalar.h:101
const int m_nskip
btScalar AiN_times_qN(int i, btScalar *q) const
void transfer_i_from_N_to_C(int i)
int indexC(int i) const
void unpermute()
int numC() const
void btLDLTRemove(btScalar **A, const int *p, btScalar *L, btScalar *d, int n1, int n2, int r, int nskip, btAlignedObjectArray< btScalar > &scratch)
btScalar *const *const *const m_w
btScalar Aii(int i) const
btScalar *const *const m_d
btScalar *const m_Dell
static void btSwapProblem(BTATYPE A, btScalar *x, btScalar *b, btScalar *w, btScalar *lo, btScalar *hi, int *p, bool *state, int *findex, int n, int i1, int i2, int nskip, int do_fast_row_swaps)
void solve1(btScalar *a, int i, int dir=1, int only_transfer=0)
btScalar *const *const m_ell
#define SIMDSQRT12
Definition: btScalar.h:439
#define BTROWPTRS
int indexN(int i) const
btScalar *const *const *const m_tmp
void btVectorScale(btScalar *a, const btScalar *d, int n)
btScalar *const *const m_b
bool s_error
btScalar *const *const *const *const *const m_hi
size_t btEstimateLDLTAddTLTmpbufSize(int nskip)
void transfer_i_to_N(int i)
void pN_equals_ANC_times_qC(btScalar *p, btScalar *q)
int numN() const
btAlignedObjectArray< btScalar > m_scratch
Definition: btDantzigLCP.h:58
static void btSwapRowsAndCols(BTATYPE A, int n, int i1, int i2, int nskip, int do_fast_row_swaps)
const int m_n
int getNub() const
btAlignedObjectArray< btScalar > Dell
Definition: btDantzigLCP.h:63
void pC_plusequals_s_times_qC(btScalar *p, btScalar s, btScalar *q)
void btSetZero(T *a, int n)
Definition: btScalar.h:634
void btSolveL1T(const btScalar *L, btScalar *B, int n, int lskip1)
#define BT_INFINITY
Definition: btScalar.h:338
void btSolveLDLT(const btScalar *L, const btScalar *d, btScalar *b, int n, int nskip)
btScalar *const m_x
static void btSolveL1_2(const btScalar *L, btScalar *B, int n, int lskip1)
btAlignedObjectArray< int > p
Definition: btDantzigLCP.h:66
btAlignedObjectArray< int > C
Definition: btDantzigLCP.h:67
static void btSolveL1_1(const btScalar *L, btScalar *B, int n, int lskip1)
bool btSolveDantzigLCP(int n, btScalar *A, btScalar *x, btScalar *b, btScalar *outer_w, int nub, btScalar *lo, btScalar *hi, int *findex, btDantzigScratchMemory &scratchMem)
void resize(int newsize, const T &fillData=T())
void transfer_i_from_C_to_N(int i, btAlignedObjectArray< btScalar > &scratch)
btScalar AiC_times_qC(int i, btScalar *q) const
btAlignedObjectArray< btScalar > ell
Definition: btDantzigLCP.h:64
btScalar *const m_L
btScalar *const *const *const *const m_lo
btAlignedObjectArray< btScalar > delta_x
Definition: btDantzigLCP.h:62
void btSolveL1(const btScalar *L, btScalar *B, int n, int lskip1)
btAlignedObjectArray< btScalar > L
Definition: btDantzigLCP.h:59
int *const *const m_p
btAlignedObjectArray< btScalar > d
Definition: btDantzigLCP.h:60
void pN_plusequals_s_times_qN(btScalar *p, btScalar s, btScalar *q)
#define BTATYPE
BTATYPE const m_A
#define BTAROW(i)
bool *const m_state
btAlignedObjectArray< bool > state
Definition: btDantzigLCP.h:68
int *const *const *const m_C
void transfer_i_to_C(int i)
float btScalar
The btScalar type abstracts floating point numbers, to easily switch between double and single floati...
Definition: btScalar.h:266
btScalar btLargeDot(const btScalar *a, const btScalar *b, int n)
Definition: btScalar.h:646
btScalar btFabs(btScalar x)
Definition: btScalar.h:407