Anti-Grain Geometry Tutorial
agg_array.h
1 //----------------------------------------------------------------------------
2 // Anti-Grain Geometry - Version 2.4
3 // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4 //
5 // Permission to copy, use, modify, sell and distribute this software
6 // is granted provided this copyright notice appears in all copies.
7 // This software is provided "as is" without express or implied
8 // warranty, and with no claim as to its suitability for any purpose.
9 //
10 //----------------------------------------------------------------------------
11 // Contact: mcseem@antigrain.com
12 // mcseemagg@yahoo.com
13 // http://www.antigrain.com
14 //----------------------------------------------------------------------------
15 #ifndef AGG_ARRAY_INCLUDED
16 #define AGG_ARRAY_INCLUDED
17 
18 #include <cstddef>
19 #include <cstring>
20 #include "agg_basics.h"
21 
22 namespace agg
23 {
24 
25  //-------------------------------------------------------pod_array_adaptor
26  template<class T> class pod_array_adaptor
27  {
28  public:
29  typedef T value_type;
30  pod_array_adaptor(T* array, unsigned size) :
31  m_array(array), m_size(size) {}
32 
33  unsigned size() const { return m_size; }
34  const T& operator [] (unsigned i) const { return m_array[i]; }
35  T& operator [] (unsigned i) { return m_array[i]; }
36  const T& at(unsigned i) const { return m_array[i]; }
37  T& at(unsigned i) { return m_array[i]; }
38  T value_at(unsigned i) const { return m_array[i]; }
39 
40  private:
41  T* m_array;
42  unsigned m_size;
43  };
44 
45 
46  //---------------------------------------------------------pod_auto_array
47  template<class T, unsigned Size> class pod_auto_array
48  {
49  public:
50  typedef T value_type;
52 
53  pod_auto_array() {}
54  explicit pod_auto_array(const T* c)
55  {
56  std::memcpy(m_array, c, sizeof(T) * Size);
57  }
58 
59  const self_type& operator = (const T* c)
60  {
61  std::memcpy(m_array, c, sizeof(T) * Size);
62  return *this;
63  }
64 
65  static unsigned size() { return Size; }
66  const T& operator [] (unsigned i) const { return m_array[i]; }
67  T& operator [] (unsigned i) { return m_array[i]; }
68  const T& at(unsigned i) const { return m_array[i]; }
69  T& at(unsigned i) { return m_array[i]; }
70  T value_at(unsigned i) const { return m_array[i]; }
71 
72  private:
73  T m_array[Size];
74  };
75 
76 
77  //--------------------------------------------------------pod_auto_vector
78  template<class T, unsigned Size> class pod_auto_vector
79  {
80  public:
81  typedef T value_type;
83 
84  pod_auto_vector() : m_size(0) {}
85 
86  void remove_all() { m_size = 0; }
87  void clear() { m_size = 0; }
88  void add(const T& v) { m_array[m_size++] = v; }
89  void push_back(const T& v) { m_array[m_size++] = v; }
90  void inc_size(unsigned size) { m_size += size; }
91 
92  unsigned size() const { return m_size; }
93  const T& operator [] (unsigned i) const { return m_array[i]; }
94  T& operator [] (unsigned i) { return m_array[i]; }
95  const T& at(unsigned i) const { return m_array[i]; }
96  T& at(unsigned i) { return m_array[i]; }
97  T value_at(unsigned i) const { return m_array[i]; }
98 
99  private:
100  T m_array[Size];
101  unsigned m_size;
102  };
103 
104 
105  //---------------------------------------------------------------pod_array
106  template<class T> class pod_array
107  {
108  public:
109  typedef T value_type;
110  typedef pod_array<T> self_type;
111 
112  ~pod_array() { pod_allocator<T>::deallocate(m_array, m_size); }
113  pod_array() : m_array(0), m_size(0) {}
114 
115  pod_array(unsigned size) :
116  m_array(pod_allocator<T>::allocate(size)),
117  m_size(size)
118  {}
119 
120  pod_array(const self_type& v) :
121  m_array(pod_allocator<T>::allocate(v.m_size)),
122  m_size(v.m_size)
123  {
124  std::memcpy(m_array, v.m_array, sizeof(T) * m_size);
125  }
126 
127  void resize(unsigned size)
128  {
129  if(size != m_size)
130  {
131  pod_allocator<T>::deallocate(m_array, m_size);
132  m_array = pod_allocator<T>::allocate(m_size = size);
133  }
134  }
135  const self_type& operator = (const self_type& v)
136  {
137  resize(v.size());
138  std::memcpy(m_array, v.m_array, sizeof(T) * m_size);
139  return *this;
140  }
141 
142  unsigned size() const { return m_size; }
143  const T& operator [] (unsigned i) const { return m_array[i]; }
144  T& operator [] (unsigned i) { return m_array[i]; }
145  const T& at(unsigned i) const { return m_array[i]; }
146  T& at(unsigned i) { return m_array[i]; }
147  T value_at(unsigned i) const { return m_array[i]; }
148 
149  const T* data() const { return m_array; }
150  T* data() { return m_array; }
151  private:
152  T* m_array;
153  unsigned m_size;
154  };
155 
156 
157 
158  //--------------------------------------------------------------pod_vector
159  // A simple class template to store Plain Old Data, a vector
160  // of a fixed size. The data is continous in memory
161  //------------------------------------------------------------------------
162  template<class T> class pod_vector
163  {
164  public:
165  typedef T value_type;
166 
167  ~pod_vector() { pod_allocator<T>::deallocate(m_array, m_capacity); }
168  pod_vector() : m_size(0), m_capacity(0), m_array(0) {}
169  pod_vector(unsigned cap, unsigned extra_tail=0);
170 
171  // Copying
172  pod_vector(const pod_vector<T>&);
173  const pod_vector<T>& operator = (const pod_vector<T>&);
174 
175  // Set new capacity. All data is lost, size is set to zero.
176  void capacity(unsigned cap, unsigned extra_tail=0);
177  unsigned capacity() const { return m_capacity; }
178 
179  // Allocate n elements. All data is lost,
180  // but elements can be accessed in range 0...size-1.
181  void allocate(unsigned size, unsigned extra_tail=0);
182 
183  // Resize keeping the content.
184  void resize(unsigned new_size);
185 
186  void zero()
187  {
188  std::memset(m_array, 0, sizeof(T) * m_size);
189  }
190 
191  void add(const T& v) { m_array[m_size++] = v; }
192  void push_back(const T& v) { m_array[m_size++] = v; }
193  void insert_at(unsigned pos, const T& val);
194  void inc_size(unsigned size) { m_size += size; }
195  unsigned size() const { return m_size; }
196  unsigned byte_size() const { return m_size * sizeof(T); }
197  void serialize(int8u* ptr) const;
198  void deserialize(const int8u* data, unsigned byte_size);
199  const T& operator [] (unsigned i) const { return m_array[i]; }
200  T& operator [] (unsigned i) { return m_array[i]; }
201  const T& at(unsigned i) const { return m_array[i]; }
202  T& at(unsigned i) { return m_array[i]; }
203  T value_at(unsigned i) const { return m_array[i]; }
204 
205  const T* data() const { return m_array; }
206  T* data() { return m_array; }
207 
208  void remove_all() { m_size = 0; }
209  void clear() { m_size = 0; }
210  void cut_at(unsigned num) { if(num < m_size) m_size = num; }
211 
212  private:
213  unsigned m_size;
214  unsigned m_capacity;
215  T* m_array;
216  };
217 
218  //------------------------------------------------------------------------
219  template<class T>
220  void pod_vector<T>::capacity(unsigned cap, unsigned extra_tail)
221  {
222  m_size = 0;
223  if(cap > m_capacity)
224  {
225  pod_allocator<T>::deallocate(m_array, m_capacity);
226  m_capacity = cap + extra_tail;
227  m_array = m_capacity ? pod_allocator<T>::allocate(m_capacity) : 0;
228  }
229  }
230 
231  //------------------------------------------------------------------------
232  template<class T>
233  void pod_vector<T>::allocate(unsigned size, unsigned extra_tail)
234  {
235  capacity(size, extra_tail);
236  m_size = size;
237  }
238 
239 
240  //------------------------------------------------------------------------
241  template<class T>
242  void pod_vector<T>::resize(unsigned new_size)
243  {
244  if(new_size > m_size)
245  {
246  if(new_size > m_capacity)
247  {
248  T* data = pod_allocator<T>::allocate(new_size);
249  std::memcpy(data, m_array, m_size * sizeof(T));
250  pod_allocator<T>::deallocate(m_array, m_capacity);
251  m_array = data;
252  }
253  }
254  else
255  {
256  m_size = new_size;
257  }
258  }
259 
260  //------------------------------------------------------------------------
261  template<class T> pod_vector<T>::pod_vector(unsigned cap, unsigned extra_tail) :
262  m_size(0),
263  m_capacity(cap + extra_tail),
264  m_array(pod_allocator<T>::allocate(m_capacity)) {}
265 
266  //------------------------------------------------------------------------
267  template<class T> pod_vector<T>::pod_vector(const pod_vector<T>& v) :
268  m_size(v.m_size),
269  m_capacity(v.m_capacity),
270  m_array(v.m_capacity ? pod_allocator<T>::allocate(v.m_capacity) : 0)
271  {
272  std::memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
273  }
274 
275  //------------------------------------------------------------------------
276  template<class T> const pod_vector<T>&
278  {
279  allocate(v.m_size);
280  if(v.m_size) std::memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
281  return *this;
282  }
283 
284  //------------------------------------------------------------------------
285  template<class T> void pod_vector<T>::serialize(int8u* ptr) const
286  {
287  if(m_size) std::memcpy(ptr, m_array, m_size * sizeof(T));
288  }
289 
290  //------------------------------------------------------------------------
291  template<class T>
292  void pod_vector<T>::deserialize(const int8u* data, unsigned byte_size)
293  {
294  byte_size /= sizeof(T);
295  allocate(byte_size);
296  if(byte_size) std::memcpy(m_array, data, byte_size * sizeof(T));
297  }
298 
299  //------------------------------------------------------------------------
300  template<class T>
301  void pod_vector<T>::insert_at(unsigned pos, const T& val)
302  {
303  if(pos >= m_size)
304  {
305  m_array[m_size] = val;
306  }
307  else
308  {
309  std::memmove(m_array + pos + 1, m_array + pos, (m_size - pos) * sizeof(T));
310  m_array[pos] = val;
311  }
312  ++m_size;
313  }
314 
315  //---------------------------------------------------------------pod_bvector
316  // A simple class template to store Plain Old Data, similar to std::deque
317  // It doesn't reallocate memory but instead, uses blocks of data of size
318  // of (1 << S), that is, power of two. The data is NOT contiguous in memory,
319  // so the only valid access method is operator [] or curr(), prev(), next()
320  //
321  // There reallocs occure only when the pool of pointers to blocks needs
322  // to be extended (it happens very rarely). You can control the value
323  // of increment to reallocate the pointer buffer. See the second constructor.
324  // By default, the incremeent value equals (1 << S), i.e., the block size.
325  //------------------------------------------------------------------------
326  template<class T, unsigned S=6> class pod_bvector
327  {
328  public:
329  enum block_scale_e
330  {
331  block_shift = S,
332  block_size = 1 << block_shift,
333  block_mask = block_size - 1
334  };
335 
336  typedef T value_type;
337 
338  ~pod_bvector();
339  pod_bvector();
340  pod_bvector(unsigned block_ptr_inc);
341 
342  // Copying
343  pod_bvector(const pod_bvector<T, S>& v);
344  const pod_bvector<T, S>& operator = (const pod_bvector<T, S>& v);
345 
346  void remove_all() { m_size = 0; }
347  void clear() { m_size = 0; }
348  void free_all() { free_tail(0); }
349  void free_tail(unsigned size);
350  void add(const T& val);
351  void push_back(const T& val) { add(val); }
352  void modify_last(const T& val);
353  void remove_last();
354 
355  int allocate_continuous_block(unsigned num_elements);
356 
357  void add_array(const T* ptr, unsigned num_elem)
358  {
359  while(num_elem--)
360  {
361  add(*ptr++);
362  }
363  }
364 
365  template<class DataAccessor> void add_data(DataAccessor& data)
366  {
367  while(data.size())
368  {
369  add(*data);
370  ++data;
371  }
372  }
373 
374  void cut_at(unsigned size)
375  {
376  if(size < m_size) m_size = size;
377  }
378 
379  unsigned size() const { return m_size; }
380 
381  const T& operator [] (unsigned i) const
382  {
383  return m_blocks[i >> block_shift][i & block_mask];
384  }
385 
386  T& operator [] (unsigned i)
387  {
388  return m_blocks[i >> block_shift][i & block_mask];
389  }
390 
391  const T& at(unsigned i) const
392  {
393  return m_blocks[i >> block_shift][i & block_mask];
394  }
395 
396  T& at(unsigned i)
397  {
398  return m_blocks[i >> block_shift][i & block_mask];
399  }
400 
401  T value_at(unsigned i) const
402  {
403  return m_blocks[i >> block_shift][i & block_mask];
404  }
405 
406  const T& curr(unsigned idx) const
407  {
408  return (*this)[idx];
409  }
410 
411  T& curr(unsigned idx)
412  {
413  return (*this)[idx];
414  }
415 
416  const T& prev(unsigned idx) const
417  {
418  return (*this)[(idx + m_size - 1) % m_size];
419  }
420 
421  T& prev(unsigned idx)
422  {
423  return (*this)[(idx + m_size - 1) % m_size];
424  }
425 
426  const T& next(unsigned idx) const
427  {
428  return (*this)[(idx + 1) % m_size];
429  }
430 
431  T& next(unsigned idx)
432  {
433  return (*this)[(idx + 1) % m_size];
434  }
435 
436  const T& last() const
437  {
438  return (*this)[m_size - 1];
439  }
440 
441  T& last()
442  {
443  return (*this)[m_size - 1];
444  }
445 
446  unsigned byte_size() const;
447  void serialize(int8u* ptr) const;
448  void deserialize(const int8u* data, unsigned byte_size);
449  void deserialize(unsigned start, const T& empty_val,
450  const int8u* data, unsigned byte_size);
451 
452  template<class ByteAccessor>
453  void deserialize(ByteAccessor data)
454  {
455  remove_all();
456  unsigned elem_size = data.size() / sizeof(T);
457 
458  for(unsigned i = 0; i < elem_size; ++i)
459  {
460  int8u* ptr = (int8u*)data_ptr();
461  for(unsigned j = 0; j < sizeof(T); ++j)
462  {
463  *ptr++ = *data;
464  ++data;
465  }
466  ++m_size;
467  }
468  }
469 
470  template<class ByteAccessor>
471  void deserialize(unsigned start, const T& empty_val, ByteAccessor data)
472  {
473  while(m_size < start)
474  {
475  add(empty_val);
476  }
477 
478  unsigned elem_size = data.size() / sizeof(T);
479  for(unsigned i = 0; i < elem_size; ++i)
480  {
481  int8u* ptr;
482  if(start + i < m_size)
483  {
484  ptr = (int8u*)(&((*this)[start + i]));
485  }
486  else
487  {
488  ptr = (int8u*)data_ptr();
489  ++m_size;
490  }
491  for(unsigned j = 0; j < sizeof(T); ++j)
492  {
493  *ptr++ = *data;
494  ++data;
495  }
496  }
497  }
498 
499  const T* block(unsigned nb) const { return m_blocks[nb]; }
500 
501  private:
502  void allocate_block(unsigned nb);
503  T* data_ptr();
504 
505  unsigned m_size;
506  unsigned m_num_blocks;
507  unsigned m_max_blocks;
508  T** m_blocks;
509  unsigned m_block_ptr_inc;
510  };
511 
512 
513  //------------------------------------------------------------------------
514  template<class T, unsigned S> pod_bvector<T, S>::~pod_bvector()
515  {
516  if(m_num_blocks)
517  {
518  T** blk = m_blocks + m_num_blocks - 1;
519  while(m_num_blocks--)
520  {
521  pod_allocator<T>::deallocate(*blk, block_size);
522  --blk;
523  }
524  }
525  pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
526  }
527 
528 
529  //------------------------------------------------------------------------
530  template<class T, unsigned S>
531  void pod_bvector<T, S>::free_tail(unsigned size)
532  {
533  if(size < m_size)
534  {
535  unsigned nb = (size + block_mask) >> block_shift;
536  while(m_num_blocks > nb)
537  {
538  pod_allocator<T>::deallocate(m_blocks[--m_num_blocks], block_size);
539  }
540  if(m_num_blocks == 0)
541  {
542  pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
543  m_blocks = 0;
544  m_max_blocks = 0;
545  }
546  m_size = size;
547  }
548  }
549 
550 
551  //------------------------------------------------------------------------
552  template<class T, unsigned S> pod_bvector<T, S>::pod_bvector() :
553  m_size(0),
554  m_num_blocks(0),
555  m_max_blocks(0),
556  m_blocks(0),
557  m_block_ptr_inc(block_size)
558  {
559  }
560 
561 
562  //------------------------------------------------------------------------
563  template<class T, unsigned S>
564  pod_bvector<T, S>::pod_bvector(unsigned block_ptr_inc) :
565  m_size(0),
566  m_num_blocks(0),
567  m_max_blocks(0),
568  m_blocks(0),
569  m_block_ptr_inc(block_ptr_inc)
570  {
571  }
572 
573 
574  //------------------------------------------------------------------------
575  template<class T, unsigned S>
576  pod_bvector<T, S>::pod_bvector(const pod_bvector<T, S>& v) :
577  m_size(v.m_size),
578  m_num_blocks(v.m_num_blocks),
579  m_max_blocks(v.m_max_blocks),
580  m_blocks(v.m_max_blocks ?
581  pod_allocator<T*>::allocate(v.m_max_blocks) :
582  0),
583  m_block_ptr_inc(v.m_block_ptr_inc)
584  {
585  unsigned i;
586  for(i = 0; i < v.m_num_blocks; ++i)
587  {
588  m_blocks[i] = pod_allocator<T>::allocate(block_size);
589  std::memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
590  }
591  }
592 
593 
594  //------------------------------------------------------------------------
595  template<class T, unsigned S>
596  const pod_bvector<T, S>&
598  {
599  unsigned i;
600  for(i = m_num_blocks; i < v.m_num_blocks; ++i)
601  {
602  allocate_block(i);
603  }
604  for(i = 0; i < v.m_num_blocks; ++i)
605  {
606  std::memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
607  }
608  m_size = v.m_size;
609  return *this;
610  }
611 
612 
613  //------------------------------------------------------------------------
614  template<class T, unsigned S>
615  void pod_bvector<T, S>::allocate_block(unsigned nb)
616  {
617  if(nb >= m_max_blocks)
618  {
619  T** new_blocks = pod_allocator<T*>::allocate(m_max_blocks + m_block_ptr_inc);
620 
621  if(m_blocks)
622  {
623  std::memcpy(new_blocks,
624  m_blocks,
625  m_num_blocks * sizeof(T*));
626 
627  pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
628  }
629  m_blocks = new_blocks;
630  m_max_blocks += m_block_ptr_inc;
631  }
632  m_blocks[nb] = pod_allocator<T>::allocate(block_size);
633  m_num_blocks++;
634  }
635 
636 
637 
638  //------------------------------------------------------------------------
639  template<class T, unsigned S>
640  inline T* pod_bvector<T, S>::data_ptr()
641  {
642  unsigned nb = m_size >> block_shift;
643  if(nb >= m_num_blocks)
644  {
645  allocate_block(nb);
646  }
647  return m_blocks[nb] + (m_size & block_mask);
648  }
649 
650 
651 
652  //------------------------------------------------------------------------
653  template<class T, unsigned S>
654  inline void pod_bvector<T, S>::add(const T& val)
655  {
656  *data_ptr() = val;
657  ++m_size;
658  }
659 
660 
661  //------------------------------------------------------------------------
662  template<class T, unsigned S>
663  inline void pod_bvector<T, S>::remove_last()
664  {
665  if(m_size) --m_size;
666  }
667 
668 
669  //------------------------------------------------------------------------
670  template<class T, unsigned S>
671  void pod_bvector<T, S>::modify_last(const T& val)
672  {
673  remove_last();
674  add(val);
675  }
676 
677 
678  //------------------------------------------------------------------------
679  template<class T, unsigned S>
680  int pod_bvector<T, S>::allocate_continuous_block(unsigned num_elements)
681  {
682  if(num_elements < block_size)
683  {
684  data_ptr(); // Allocate initial block if necessary
685  unsigned rest = block_size - (m_size & block_mask);
686  unsigned index;
687  if(num_elements <= rest)
688  {
689  // The rest of the block is good, we can use it
690  //-----------------
691  index = m_size;
692  m_size += num_elements;
693  return index;
694  }
695 
696  // New block
697  //---------------
698  m_size += rest;
699  data_ptr();
700  index = m_size;
701  m_size += num_elements;
702  return index;
703  }
704  return -1; // Impossible to allocate
705  }
706 
707 
708  //------------------------------------------------------------------------
709  template<class T, unsigned S>
710  unsigned pod_bvector<T, S>::byte_size() const
711  {
712  return m_size * sizeof(T);
713  }
714 
715 
716  //------------------------------------------------------------------------
717  template<class T, unsigned S>
718  void pod_bvector<T, S>::serialize(int8u* ptr) const
719  {
720  unsigned i;
721  for(i = 0; i < m_size; i++)
722  {
723  std::memcpy(ptr, &(*this)[i], sizeof(T));
724  ptr += sizeof(T);
725  }
726  }
727 
728  //------------------------------------------------------------------------
729  template<class T, unsigned S>
730  void pod_bvector<T, S>::deserialize(const int8u* data, unsigned byte_size)
731  {
732  remove_all();
733  byte_size /= sizeof(T);
734  for(unsigned i = 0; i < byte_size; ++i)
735  {
736  T* ptr = data_ptr();
737  std::memcpy(ptr, data, sizeof(T));
738  ++m_size;
739  data += sizeof(T);
740  }
741  }
742 
743 
744  // Replace or add a number of elements starting from "start" position
745  //------------------------------------------------------------------------
746  template<class T, unsigned S>
747  void pod_bvector<T, S>::deserialize(unsigned start, const T& empty_val,
748  const int8u* data, unsigned byte_size)
749  {
750  while(m_size < start)
751  {
752  add(empty_val);
753  }
754 
755  byte_size /= sizeof(T);
756  for(unsigned i = 0; i < byte_size; ++i)
757  {
758  if(start + i < m_size)
759  {
760  std::memcpy(&((*this)[start + i]), data, sizeof(T));
761  }
762  else
763  {
764  T* ptr = data_ptr();
765  std::memcpy(ptr, data, sizeof(T));
766  ++m_size;
767  }
768  data += sizeof(T);
769  }
770  }
771 
772 
773  //---------------------------------------------------------block_allocator
774  // Allocator for arbitrary POD data. Most usable in different cache
775  // systems for efficient memory allocations.
776  // Memory is allocated with blocks of fixed size ("block_size" in
777  // the constructor). If required size exceeds the block size the allocator
778  // creates a new block of the required size. However, the most efficient
779  // use is when the average reqired size is much less than the block size.
780  //------------------------------------------------------------------------
782  {
783  struct block_type
784  {
785  int8u* data;
786  unsigned size;
787  };
788 
789  public:
790  void remove_all()
791  {
792  if(m_num_blocks)
793  {
794  block_type* blk = m_blocks + m_num_blocks - 1;
795  while(m_num_blocks--)
796  {
797  pod_allocator<int8u>::deallocate(blk->data, blk->size);
798  --blk;
799  }
800  pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
801  }
802  m_num_blocks = 0;
803  m_max_blocks = 0;
804  m_blocks = 0;
805  m_buf_ptr = 0;
806  m_rest = 0;
807  }
808 
809  ~block_allocator()
810  {
811  remove_all();
812  }
813 
814  block_allocator(unsigned block_size, unsigned block_ptr_inc=256-8) :
815  m_block_size(block_size),
816  m_block_ptr_inc(block_ptr_inc),
817  m_num_blocks(0),
818  m_max_blocks(0),
819  m_blocks(0),
820  m_buf_ptr(0),
821  m_rest(0)
822  {
823  }
824 
825 
826  int8u* allocate(unsigned size, unsigned alignment=1)
827  {
828  if(size == 0) return 0;
829  if(size <= m_rest)
830  {
831  int8u* ptr = m_buf_ptr;
832  if(alignment > 1)
833  {
834  unsigned align =
835  (alignment - unsigned((std::size_t)ptr) % alignment) % alignment;
836 
837  size += align;
838  ptr += align;
839  if(size <= m_rest)
840  {
841  m_rest -= size;
842  m_buf_ptr += size;
843  return ptr;
844  }
845  allocate_block(size);
846  return allocate(size - align, alignment);
847  }
848  m_rest -= size;
849  m_buf_ptr += size;
850  return ptr;
851  }
852  allocate_block(size + alignment - 1);
853  return allocate(size, alignment);
854  }
855 
856 
857  private:
858  void allocate_block(unsigned size)
859  {
860  if(size < m_block_size) size = m_block_size;
861  if(m_num_blocks >= m_max_blocks)
862  {
863  block_type* new_blocks =
864  pod_allocator<block_type>::allocate(m_max_blocks + m_block_ptr_inc);
865 
866  if(m_blocks)
867  {
868  std::memcpy(new_blocks,
869  m_blocks,
870  m_num_blocks * sizeof(block_type));
871  pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
872  }
873  m_blocks = new_blocks;
874  m_max_blocks += m_block_ptr_inc;
875  }
876 
877  m_blocks[m_num_blocks].size = size;
878  m_blocks[m_num_blocks].data =
879  m_buf_ptr =
881 
882  m_num_blocks++;
883  m_rest = size;
884  }
885 
886  unsigned m_block_size;
887  unsigned m_block_ptr_inc;
888  unsigned m_num_blocks;
889  unsigned m_max_blocks;
890  block_type* m_blocks;
891  int8u* m_buf_ptr;
892  unsigned m_rest;
893  };
894 
895 
896 
897 
898 
899 
900 
901 
902  //------------------------------------------------------------------------
903  enum quick_sort_threshold_e
904  {
905  quick_sort_threshold = 9
906  };
907 
908 
909  //-----------------------------------------------------------swap_elements
910  template<class T> inline void swap_elements(T& a, T& b)
911  {
912  T temp = a;
913  a = b;
914  b = temp;
915  }
916 
917 
918  //--------------------------------------------------------------quick_sort
919  template<class Array, class Less>
920  void quick_sort(Array& arr, Less less)
921  {
922  if(arr.size() < 2) return;
923 
924  typename Array::value_type* e1;
925  typename Array::value_type* e2;
926 
927  int stack[80];
928  int* top = stack;
929  int limit = arr.size();
930  int base = 0;
931 
932  for(;;)
933  {
934  int len = limit - base;
935 
936  int i;
937  int j;
938  int pivot;
939 
940  if(len > quick_sort_threshold)
941  {
942  // we use base + len/2 as the pivot
943  pivot = base + len / 2;
944  swap_elements(arr[base], arr[pivot]);
945 
946  i = base + 1;
947  j = limit - 1;
948 
949  // now ensure that *i <= *base <= *j
950  e1 = &(arr[j]);
951  e2 = &(arr[i]);
952  if(less(*e1, *e2)) swap_elements(*e1, *e2);
953 
954  e1 = &(arr[base]);
955  e2 = &(arr[i]);
956  if(less(*e1, *e2)) swap_elements(*e1, *e2);
957 
958  e1 = &(arr[j]);
959  e2 = &(arr[base]);
960  if(less(*e1, *e2)) swap_elements(*e1, *e2);
961 
962  for(;;)
963  {
964  do i++; while( less(arr[i], arr[base]) );
965  do j--; while( less(arr[base], arr[j]) );
966 
967  if( i > j )
968  {
969  break;
970  }
971 
972  swap_elements(arr[i], arr[j]);
973  }
974 
975  swap_elements(arr[base], arr[j]);
976 
977  // now, push the largest sub-array
978  if(j - base > limit - i)
979  {
980  top[0] = base;
981  top[1] = j;
982  base = i;
983  }
984  else
985  {
986  top[0] = i;
987  top[1] = limit;
988  limit = j;
989  }
990  top += 2;
991  }
992  else
993  {
994  // the sub-array is small, perform insertion sort
995  j = base;
996  i = j + 1;
997 
998  for(; i < limit; j = i, i++)
999  {
1000  for(; less(*(e1 = &(arr[j + 1])), *(e2 = &(arr[j]))); j--)
1001  {
1002  swap_elements(*e1, *e2);
1003  if(j == base)
1004  {
1005  break;
1006  }
1007  }
1008  }
1009  if(top > stack)
1010  {
1011  top -= 2;
1012  base = top[0];
1013  limit = top[1];
1014  }
1015  else
1016  {
1017  break;
1018  }
1019  }
1020  }
1021  }
1022 
1023 
1024 
1025 
1026  //------------------------------------------------------remove_duplicates
1027  // Remove duplicates from a sorted array. It doesn't cut the
1028  // tail of the array, it just returns the number of remaining elements.
1029  //-----------------------------------------------------------------------
1030  template<class Array, class Equal>
1031  unsigned remove_duplicates(Array& arr, Equal equal)
1032  {
1033  if(arr.size() < 2) return arr.size();
1034 
1035  unsigned i, j;
1036  for(i = 1, j = 1; i < arr.size(); i++)
1037  {
1038  typename Array::value_type& e = arr[i];
1039  if(!equal(e, arr[i - 1]))
1040  {
1041  arr[j++] = e;
1042  }
1043  }
1044  return j;
1045  }
1046 
1047  //--------------------------------------------------------invert_container
1048  template<class Array> void invert_container(Array& arr)
1049  {
1050  int i = 0;
1051  int j = arr.size() - 1;
1052  while(i < j)
1053  {
1054  swap_elements(arr[i++], arr[j--]);
1055  }
1056  }
1057 
1058  //------------------------------------------------------binary_search_pos
1059  template<class Array, class Value, class Less>
1060  unsigned binary_search_pos(const Array& arr, const Value& val, Less less)
1061  {
1062  if(arr.size() == 0) return 0;
1063 
1064  unsigned beg = 0;
1065  unsigned end = arr.size() - 1;
1066 
1067  if(less(val, arr[0])) return 0;
1068  if(less(arr[end], val)) return end + 1;
1069 
1070  while(end - beg > 1)
1071  {
1072  unsigned mid = (end + beg) >> 1;
1073  if(less(val, arr[mid])) end = mid;
1074  else beg = mid;
1075  }
1076 
1077  //if(beg <= 0 && less(val, arr[0])) return 0;
1078  //if(end >= arr.size() - 1 && less(arr[end], val)) ++end;
1079 
1080  return end;
1081  }
1082 
1083  //----------------------------------------------------------range_adaptor
1084  template<class Array> class range_adaptor
1085  {
1086  public:
1087  typedef typename Array::value_type value_type;
1088 
1089  range_adaptor(Array& array, unsigned start, unsigned size) :
1090  m_array(array), m_start(start), m_size(size)
1091  {}
1092 
1093  unsigned size() const { return m_size; }
1094  const value_type& operator [] (unsigned i) const { return m_array[m_start + i]; }
1095  value_type& operator [] (unsigned i) { return m_array[m_start + i]; }
1096  const value_type& at(unsigned i) const { return m_array[m_start + i]; }
1097  value_type& at(unsigned i) { return m_array[m_start + i]; }
1098  value_type value_at(unsigned i) const { return m_array[m_start + i]; }
1099 
1100  private:
1101  Array& m_array;
1102  unsigned m_start;
1103  unsigned m_size;
1104  };
1105 
1106  //---------------------------------------------------------------int_less
1107  inline bool int_less(int a, int b) { return a < b; }
1108 
1109  //------------------------------------------------------------int_greater
1110  inline bool int_greater(int a, int b) { return a > b; }
1111 
1112  //----------------------------------------------------------unsigned_less
1113  inline bool unsigned_less(unsigned a, unsigned b) { return a < b; }
1114 
1115  //-------------------------------------------------------unsigned_greater
1116  inline bool unsigned_greater(unsigned a, unsigned b) { return a > b; }
1117 }
1118 
1119 #endif
Definition: agg_arc.cpp:24