Nav2 Navigation Stack - humble  humble
ROS 2 Navigation Stack
pf.c
1 /*
2  * Player - One Hell of a Robot Server
3  * Copyright (C) 2000 Brian Gerkey & Kasper Stoy
4  * gerkey@usc.edu kaspers@robotics.usc.edu
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  */
21 /**************************************************************************
22  * Desc: Simple particle filter for localization.
23  * Author: Andrew Howard
24  * Date: 10 Dec 2002
25  * CVS: $Id: pf.c 6345 2008-04-17 01:36:39Z gerkey $
26  *************************************************************************/
27 
28 #include <float.h>
29 #include <assert.h>
30 #include <math.h>
31 #include <stdlib.h>
32 #include <time.h>
33 
34 #include "nav2_amcl/pf/pf.hpp"
35 #include "nav2_amcl/pf/pf_pdf.hpp"
36 #include "nav2_amcl/pf/pf_kdtree.hpp"
37 
38 #include "nav2_amcl/portable_utils.hpp"
39 
40 
41 // Compute the required number of samples, given that there are k bins
42 // with samples in them.
43 static int pf_resample_limit(pf_t * pf, int k);
44 
45 
46 // Create a new filter
47 pf_t * pf_alloc(
48  int min_samples, int max_samples,
49  double alpha_slow, double alpha_fast,
50  pf_init_model_fn_t random_pose_fn)
51 {
52  int i, j;
53  pf_t * pf;
54  pf_sample_set_t * set;
55  pf_sample_t * sample;
56 
57  srand48(time(NULL));
58 
59  pf = calloc(1, sizeof(pf_t));
60 
61  pf->random_pose_fn = random_pose_fn;
62 
63  pf->min_samples = min_samples;
64  pf->max_samples = max_samples;
65 
66  // Control parameters for the population size calculation. [err] is
67  // the max error between the true distribution and the estimated
68  // distribution. [z] is the upper standard normal quantile for (1 -
69  // p), where p is the probability that the error on the estimated
70  // distrubition will be less than [err].
71  pf->pop_err = 0.01;
72  pf->pop_z = 3;
73  pf->dist_threshold = 0.5;
74 
75  pf->current_set = 0;
76  for (j = 0; j < 2; j++) {
77  set = pf->sets + j;
78 
79  set->sample_count = max_samples;
80  set->samples = calloc(max_samples, sizeof(pf_sample_t));
81 
82  for (i = 0; i < set->sample_count; i++) {
83  sample = set->samples + i;
84  sample->pose.v[0] = 0.0;
85  sample->pose.v[1] = 0.0;
86  sample->pose.v[2] = 0.0;
87  sample->weight = 1.0 / max_samples;
88  }
89 
90  // HACK: is 3 times max_samples enough?
91  set->kdtree = pf_kdtree_alloc(3 * max_samples);
92 
93  set->cluster_count = 0;
94  set->cluster_max_count = max_samples;
95  set->clusters = calloc(set->cluster_max_count, sizeof(pf_cluster_t));
96 
97  set->mean = pf_vector_zero();
98  set->cov = pf_matrix_zero();
99  }
100 
101  pf->w_slow = 0.0;
102  pf->w_fast = 0.0;
103 
104  pf->alpha_slow = alpha_slow;
105  pf->alpha_fast = alpha_fast;
106 
107  // set converged to 0
108  pf_init_converged(pf);
109 
110  return pf;
111 }
112 
113 // Free an existing filter
114 void pf_free(pf_t * pf)
115 {
116  int i;
117 
118  for (i = 0; i < 2; i++) {
119  free(pf->sets[i].clusters);
120  pf_kdtree_free(pf->sets[i].kdtree);
121  free(pf->sets[i].samples);
122  }
123  free(pf);
124 }
125 
126 // Initialize the filter using a guassian
127 void pf_init(pf_t * pf, pf_vector_t mean, pf_matrix_t cov)
128 {
129  int i;
130  pf_sample_set_t * set;
131  pf_sample_t * sample;
132  pf_pdf_gaussian_t * pdf;
133 
134  set = pf->sets + pf->current_set;
135 
136  // Create the kd tree for adaptive sampling
137  pf_kdtree_clear(set->kdtree);
138 
139  set->sample_count = pf->max_samples;
140 
141  pdf = pf_pdf_gaussian_alloc(mean, cov);
142 
143  // Compute the new sample poses
144  for (i = 0; i < set->sample_count; i++) {
145  sample = set->samples + i;
146  sample->weight = 1.0 / pf->max_samples;
147  sample->pose = pf_pdf_gaussian_sample(pdf);
148 
149  // Add sample to histogram
150  pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
151  }
152 
153  pf->w_slow = pf->w_fast = 0.0;
154 
155  pf_pdf_gaussian_free(pdf);
156 
157  // Re-compute cluster statistics
158  pf_cluster_stats(pf, set);
159 
160  // set converged to 0
161  pf_init_converged(pf);
162 }
163 
164 
165 // Initialize the filter using some model
166 void pf_init_model(pf_t * pf, pf_init_model_fn_t init_fn, void * init_data)
167 {
168  int i;
169  pf_sample_set_t * set;
170  pf_sample_t * sample;
171 
172  set = pf->sets + pf->current_set;
173 
174  // Create the kd tree for adaptive sampling
175  pf_kdtree_clear(set->kdtree);
176 
177  set->sample_count = pf->max_samples;
178 
179  // Compute the new sample poses
180  for (i = 0; i < set->sample_count; i++) {
181  sample = set->samples + i;
182  sample->weight = 1.0 / pf->max_samples;
183  sample->pose = (*init_fn)(init_data);
184 
185  // Add sample to histogram
186  pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
187  }
188 
189  pf->w_slow = pf->w_fast = 0.0;
190 
191  // Re-compute cluster statistics
192  pf_cluster_stats(pf, set);
193 
194  // set converged to 0
195  pf_init_converged(pf);
196 }
197 
198 void pf_init_converged(pf_t * pf)
199 {
200  pf_sample_set_t * set;
201  set = pf->sets + pf->current_set;
202  set->converged = 0;
203  pf->converged = 0;
204 }
205 
206 int pf_update_converged(pf_t * pf)
207 {
208  int i;
209  pf_sample_set_t * set;
210  pf_sample_t * sample;
211 
212  set = pf->sets + pf->current_set;
213  double mean_x = 0, mean_y = 0;
214 
215  for (i = 0; i < set->sample_count; i++) {
216  sample = set->samples + i;
217 
218  mean_x += sample->pose.v[0];
219  mean_y += sample->pose.v[1];
220  }
221  mean_x /= set->sample_count;
222  mean_y /= set->sample_count;
223 
224  for (i = 0; i < set->sample_count; i++) {
225  sample = set->samples + i;
226  if (fabs(sample->pose.v[0] - mean_x) > pf->dist_threshold ||
227  fabs(sample->pose.v[1] - mean_y) > pf->dist_threshold)
228  {
229  set->converged = 0;
230  pf->converged = 0;
231  return 0;
232  }
233  }
234  set->converged = 1;
235  pf->converged = 1;
236  return 1;
237 }
238 
239 // Update the filter with some new action
240 // void pf_update_action(pf_t * pf, pf_action_model_fn_t action_fn, void * action_data)
241 // {
242 // pf_sample_set_t * set;
243 
244 // set = pf->sets + pf->current_set;
245 
246 // (*action_fn)(action_data, set);
247 // }
248 
249 // Update the filter with some new sensor observation
250 void pf_update_sensor(pf_t * pf, pf_sensor_model_fn_t sensor_fn, void * sensor_data)
251 {
252  int i;
253  pf_sample_set_t * set;
254  pf_sample_t * sample;
255  double total;
256 
257  set = pf->sets + pf->current_set;
258 
259  // Compute the sample weights
260  total = (*sensor_fn)(sensor_data, set);
261 
262  if (total > 0.0) {
263  // Normalize weights
264  double w_avg = 0.0;
265  for (i = 0; i < set->sample_count; i++) {
266  sample = set->samples + i;
267  w_avg += sample->weight;
268  sample->weight /= total;
269  }
270  // Update running averages of likelihood of samples (Prob Rob p258)
271  w_avg /= set->sample_count;
272  if (pf->w_slow == 0.0) {
273  pf->w_slow = w_avg;
274  } else {
275  pf->w_slow += pf->alpha_slow * (w_avg - pf->w_slow);
276  }
277  if (pf->w_fast == 0.0) {
278  pf->w_fast = w_avg;
279  } else {
280  pf->w_fast += pf->alpha_fast * (w_avg - pf->w_fast);
281  }
282  } else {
283  // Handle zero total
284  for (i = 0; i < set->sample_count; i++) {
285  sample = set->samples + i;
286  sample->weight = 1.0 / set->sample_count;
287  }
288  }
289 }
290 
291 
292 // Resample the distribution
293 void pf_update_resample(pf_t * pf, void * random_pose_data)
294 {
295  int i;
296  double total;
297  pf_sample_set_t * set_a, * set_b;
298  pf_sample_t * sample_a, * sample_b;
299 
300  // double r,c,U;
301  // int m;
302  // double count_inv;
303  double * c;
304 
305  double w_diff;
306 
307  set_a = pf->sets + pf->current_set;
308  set_b = pf->sets + (pf->current_set + 1) % 2;
309 
310  // Build up cumulative probability table for resampling.
311  // TODO(?): Replace this with a more efficient procedure
312  // (e.g., http://www.network-theory.co.uk/docs/gslref/GeneralDiscreteDistributions.html)
313  c = (double *)malloc(sizeof(double) * (set_a->sample_count + 1));
314  c[0] = 0.0;
315  for (i = 0; i < set_a->sample_count; i++) {
316  c[i + 1] = c[i] + set_a->samples[i].weight;
317  }
318 
319  // Create the kd tree for adaptive sampling
320  pf_kdtree_clear(set_b->kdtree);
321 
322  // Draw samples from set a to create set b.
323  total = 0;
324  set_b->sample_count = 0;
325 
326  w_diff = 1.0 - pf->w_fast / pf->w_slow;
327  if (w_diff < 0.0) {
328  w_diff = 0.0;
329  }
330  // printf("w_diff: %9.6f\n", w_diff);
331 
332  // Can't (easily) combine low-variance sampler with KLD adaptive
333  // sampling, so we'll take the more traditional route.
334  /*
335  // Low-variance resampler, taken from Probabilistic Robotics, p110
336  count_inv = 1.0/set_a->sample_count;
337  r = drand48() * count_inv;
338  c = set_a->samples[0].weight;
339  i = 0;
340  m = 0;
341  */
342  while (set_b->sample_count < pf->max_samples) {
343  sample_b = set_b->samples + set_b->sample_count++;
344 
345  if (drand48() < w_diff) {
346  sample_b->pose = (pf->random_pose_fn)(random_pose_data);
347  } else {
348  // Can't (easily) combine low-variance sampler with KLD adaptive
349  // sampling, so we'll take the more traditional route.
350  /*
351  // Low-variance resampler, taken from Probabilistic Robotics, p110
352  U = r + m * count_inv;
353  while(U>c)
354  {
355  i++;
356  // Handle wrap-around by resetting counters and picking a new random
357  // number
358  if(i >= set_a->sample_count)
359  {
360  r = drand48() * count_inv;
361  c = set_a->samples[0].weight;
362  i = 0;
363  m = 0;
364  U = r + m * count_inv;
365  continue;
366  }
367  c += set_a->samples[i].weight;
368  }
369  m++;
370  */
371 
372  // Naive discrete event sampler
373  double r;
374  r = drand48();
375  for (i = 0; i < set_a->sample_count; i++) {
376  if ((c[i] <= r) && (r < c[i + 1])) {
377  break;
378  }
379  }
380  assert(i < set_a->sample_count);
381 
382  sample_a = set_a->samples + i;
383 
384  assert(sample_a->weight > 0);
385 
386  // Add sample to list
387  sample_b->pose = sample_a->pose;
388  }
389 
390  sample_b->weight = 1.0;
391  total += sample_b->weight;
392 
393  // Add sample to histogram
394  pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
395 
396  // See if we have enough samples yet
397  if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count)) {
398  break;
399  }
400  }
401 
402  // Reset averages, to avoid spiraling off into complete randomness.
403  if (w_diff > 0.0) {
404  pf->w_slow = pf->w_fast = 0.0;
405  }
406 
407  // fprintf(stderr, "\n\n");
408 
409  // Normalize weights
410  for (i = 0; i < set_b->sample_count; i++) {
411  sample_b = set_b->samples + i;
412  sample_b->weight /= total;
413  }
414 
415  // Re-compute cluster statistics
416  pf_cluster_stats(pf, set_b);
417 
418  // Use the newly created sample set
419  pf->current_set = (pf->current_set + 1) % 2;
420 
421  pf_update_converged(pf);
422 
423  free(c);
424 }
425 
426 
427 // Compute the required number of samples, given that there are k bins
428 // with samples in them. This is taken directly from Fox et al.
429 int pf_resample_limit(pf_t * pf, int k)
430 {
431  double a, b, c, x;
432  int n;
433 
434  if (k <= 1) {
435  return pf->max_samples;
436  }
437 
438  a = 1;
439  b = 2 / (9 * ((double) k - 1));
440  c = sqrt(2 / (9 * ((double) k - 1))) * pf->pop_z;
441  x = a - b + c;
442 
443  n = (int) ceil((k - 1) / (2 * pf->pop_err) * x * x * x);
444 
445  if (n < pf->min_samples) {
446  return pf->min_samples;
447  }
448  if (n > pf->max_samples) {
449  return pf->max_samples;
450  }
451 
452  return n;
453 }
454 
455 
456 // Re-compute the cluster statistics for a sample set
457 void pf_cluster_stats(pf_t * pf, pf_sample_set_t * set)
458 {
459  (void)pf;
460  int i, j, k, cidx;
461  pf_sample_t * sample;
462  pf_cluster_t * cluster;
463 
464  // Workspace
465  double m[4], c[2][2];
466  double weight;
467 
468  // Cluster the samples
469  pf_kdtree_cluster(set->kdtree);
470 
471  // Initialize cluster stats
472  set->cluster_count = 0;
473 
474  for (i = 0; i < set->cluster_max_count; i++) {
475  cluster = set->clusters + i;
476  cluster->weight = 0;
477  cluster->mean = pf_vector_zero();
478  cluster->cov = pf_matrix_zero();
479 
480  for (j = 0; j < 4; j++) {
481  cluster->m[j] = 0.0;
482  }
483  for (j = 0; j < 2; j++) {
484  for (k = 0; k < 2; k++) {
485  cluster->c[j][k] = 0.0;
486  }
487  }
488  }
489 
490  // Initialize overall filter stats
491  weight = 0.0;
492  set->mean = pf_vector_zero();
493  set->cov = pf_matrix_zero();
494  for (j = 0; j < 4; j++) {
495  m[j] = 0.0;
496  }
497  for (j = 0; j < 2; j++) {
498  for (k = 0; k < 2; k++) {
499  c[j][k] = 0.0;
500  }
501  }
502 
503  // Compute cluster stats
504  for (i = 0; i < set->sample_count; i++) {
505  sample = set->samples + i;
506 
507  // printf("%d %f %f %f\n", i, sample->pose.v[0], sample->pose.v[1], sample->pose.v[2]);
508 
509  // Get the cluster label for this sample
510  cidx = pf_kdtree_get_cluster(set->kdtree, sample->pose);
511  assert(cidx >= 0);
512  if (cidx >= set->cluster_max_count) {
513  continue;
514  }
515  if (cidx + 1 > set->cluster_count) {
516  set->cluster_count = cidx + 1;
517  }
518 
519  cluster = set->clusters + cidx;
520 
521  cluster->weight += sample->weight;
522 
523  weight += sample->weight;
524 
525  // Compute mean
526  cluster->m[0] += sample->weight * sample->pose.v[0];
527  cluster->m[1] += sample->weight * sample->pose.v[1];
528  cluster->m[2] += sample->weight * cos(sample->pose.v[2]);
529  cluster->m[3] += sample->weight * sin(sample->pose.v[2]);
530 
531  m[0] += sample->weight * sample->pose.v[0];
532  m[1] += sample->weight * sample->pose.v[1];
533  m[2] += sample->weight * cos(sample->pose.v[2]);
534  m[3] += sample->weight * sin(sample->pose.v[2]);
535 
536  // Compute covariance in linear components
537  for (j = 0; j < 2; j++) {
538  for (k = 0; k < 2; k++) {
539  cluster->c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
540  c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
541  }
542  }
543  }
544 
545  // Normalize
546  for (i = 0; i < set->cluster_count; i++) {
547  cluster = set->clusters + i;
548 
549  cluster->mean.v[0] = cluster->m[0] / cluster->weight;
550  cluster->mean.v[1] = cluster->m[1] / cluster->weight;
551  cluster->mean.v[2] = atan2(cluster->m[3], cluster->m[2]);
552 
553  cluster->cov = pf_matrix_zero();
554 
555  // Covariance in linear components
556  for (j = 0; j < 2; j++) {
557  for (k = 0; k < 2; k++) {
558  cluster->cov.m[j][k] = cluster->c[j][k] / cluster->weight -
559  cluster->mean.v[j] * cluster->mean.v[k];
560  }
561  }
562 
563  // Covariance in angular components; I think this is the correct
564  // formula for circular statistics.
565  cluster->cov.m[2][2] = -2 * log(
566  sqrt(
567  cluster->m[2] * cluster->m[2] +
568  cluster->m[3] * cluster->m[3]));
569 
570  // printf("cluster %d %d %f (%f %f %f)\n", i, cluster->count, cluster->weight,
571  // cluster->mean.v[0], cluster->mean.v[1], cluster->mean.v[2]);
572  // pf_matrix_fprintf(cluster->cov, stdout, "%e");
573  }
574 
575  // Compute overall filter stats
576  set->mean.v[0] = m[0] / weight;
577  set->mean.v[1] = m[1] / weight;
578  set->mean.v[2] = atan2(m[3], m[2]);
579 
580  // Covariance in linear components
581  for (j = 0; j < 2; j++) {
582  for (k = 0; k < 2; k++) {
583  set->cov.m[j][k] = c[j][k] / weight - set->mean.v[j] * set->mean.v[k];
584  }
585  }
586 
587  // Covariance in angular components; I think this is the correct
588  // formula for circular statistics.
589  set->cov.m[2][2] = -2 * log(sqrt(m[2] * m[2] + m[3] * m[3]));
590 }
591 
592 
593 // Compute the CEP statistics (mean and variance).
594 // void pf_get_cep_stats(pf_t * pf, pf_vector_t * mean, double * var)
595 // {
596 // int i;
597 // double mn, mx, my, mrr;
598 // pf_sample_set_t * set;
599 // pf_sample_t * sample;
600 
601 // set = pf->sets + pf->current_set;
602 
603 // mn = 0.0;
604 // mx = 0.0;
605 // my = 0.0;
606 // mrr = 0.0;
607 
608 // for (i = 0; i < set->sample_count; i++) {
609 // sample = set->samples + i;
610 
611 // mn += sample->weight;
612 // mx += sample->weight * sample->pose.v[0];
613 // my += sample->weight * sample->pose.v[1];
614 // mrr += sample->weight * sample->pose.v[0] * sample->pose.v[0];
615 // mrr += sample->weight * sample->pose.v[1] * sample->pose.v[1];
616 // }
617 
618 // mean->v[0] = mx / mn;
619 // mean->v[1] = my / mn;
620 // mean->v[2] = 0.0;
621 
622 // *var = mrr / mn - (mx * mx / (mn * mn) + my * my / (mn * mn));
623 // }
624 
625 
626 // Get the statistics for a particular cluster.
627 int pf_get_cluster_stats(
628  pf_t * pf, int clabel, double * weight,
629  pf_vector_t * mean, pf_matrix_t * cov)
630 {
631  pf_sample_set_t * set;
632  pf_cluster_t * cluster;
633 
634  set = pf->sets + pf->current_set;
635 
636  if (clabel >= set->cluster_count) {
637  return 0;
638  }
639  cluster = set->clusters + clabel;
640 
641  *weight = cluster->weight;
642  *mean = cluster->mean;
643  *cov = cluster->cov;
644 
645  return 1;
646 }
Definition: pf.hpp:112