lrde_olena.bib
@comment{{This file has been generated by bib2bib 1.97}}
@comment{{Command line: /usr/bin/bib2bib -s plain -oc lrde_olena.cite -ob lrde_olena.bib -c 'project = "Image" or project = "Olena"' -s year -r ../../doc/lrde.bib}}
@inproceedings{lazzara.11.icdar,
author = {Guillaume Lazzara and Roland Levillain and {\relax
Th}ierry G\'eraud and Yann Jacquelet and Julien Marquegnies
and Arthur Cr\'epin-Leblond},
title = {The {SCRIBO} Module of the {Olena} Platform: a Free
Software Framework for Document Image Analysis},
booktitle = {Proceedings of the 11th International Conference on
Document Analysis and Recognition (ICDAR)},
year = 2011,
address = {Beijing, China},
month = sep,
organization = {International Association for Pattern Recognition (IAPR)},
project = {Olena},
urllrde = {201109-ICDAR},
abstract = {Electronic documents are being more and more usable thanks
to better and more affordable network, storage and
computational facilities. But in order to benefit from
computer-aided document management, paper documents must be
digitized and analyzed. This task may be challenging at
several levels. Data may be of multiple types thus
requiring different adapted processing chains. The tools to
be developed should also take into account the needs and
knowledge of users, ranging from a simple graphical
application to a complete programming framework. Finally,
the data sets to process may be large. In this paper, we
expose a set of features that a Document Image Analysis
framework should provide to handle the previous issues. In
particular, a good strategy to address both flexibility and
efficiency issues is the Generic Programming (GP) paradigm.
These ideas are implemented as an open source module,
SCRIBO, built on top of Olena, a generic and efficient
image processing platform. Our solution features services
such as preprocessing filters, text detection, page
segmentation and document reconstruction (as XML, PDF or
HTML documents). This framework, composed of reusable
software components, can be used to create full-fledged
graphical applications, small utilities, or processing
chains to be integrated into third-party projects.},
keywords = {Document Image Analysis, Software Design, Reusability,
Free Software}
}
@inproceedings{levillain.11.gretsi,
author = {Roland Levillain and {\relax Th}ierry G\'eraud and Laurent
Najman},
title = {Une approche g\'en\'erique du logiciel pour le traitement
d'images pr\'eservant les performances},
booktitle = {Proceedings of the 23rd Symposium on Signal and Image
Processing (GRETSI)},
category = {national},
year = 2011,
address = {Bordeaux, France},
month = sep,
note = {In French.},
project = {Olena},
urllrde = {201109-GRETSI},
abstract = {De plus en plus d'outils logiciels modernes pour le
traitement d'images sont con\,c{}us en prenant en compte le
probl\`eme de la g\'en\'ericit\'e du code, c'est-\`a-dire
la possibilit\'e d'\'ecrire des algorithmes
r\'eutilisables, compatibles avec de nombreux types
d'entr\'ees. Cependant, ce choix de conception se fait
souvent au d\'etriment des performances du code
ex\'ecut\'e. Du fait de la grande vari\'et\'e des types
d'images existants et de la n\'ecessit\'e d'avoir des
impl\'ementations rapides, g\'en\'ericit\'e et performance
apparaissent comme des qualit\'es essentielles du logiciel
en traitement d'images. Cet article pr\'esente une approche
pr\'eservant les performances dans un framework logiciel
g\'en\'erique tirant parti des caract\'eristiques des types
de donn\'ees utilis\'es. Gr\^ace \`a celles-ci, il est
possible d'\'ecrire des variantes d'algorithmes
g\'en\'eriques offrant un compromis entre g\'en\'ericit\'e
et performance. Ces alternatives sont capables de
pr\'eserver une partie des aspects g\'en\'eriques d'origine
tout en apportant des gains substantiels \`a l'ex\'ecution.
D'apr\`es nos essais, ces optimisations g\'en\'eriques
fournissent des performances supportant la comparaison avec
du code d\'edi\'e, allant parfois m\^eme jusqu'\`a surpasser des routines optimis\'ees manuellement.}
}
@inproceedings{levillain.10.icip,
author = {Roland Levillain and {\relax Th}ierry G\'eraud and Laurent
Najman},
title = {Why and How to Design a Generic and Efficient Image
Processing Framework: The Case of the {Milena} Library},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
pages = {1941--1944},
year = 2010,
address = {Hong Kong},
month = sep,
project = {Olena},
urllrde = {201009-ICIP},
abstract = {Most image processing frameworks are not generic enough to
provide true reusability of data structures and algorithms.
In fact, genericity allows users to write and experiment
virtually any method on any compatible input(s). In this
paper, we advocate the use of generic programming in the
design of image processing software, while preserving
performances close to dedicated code. The implementation of
our proposal, Milena, a generic and efficient library,
illustrates the benefits of our approach.},
keywords = {Genericity, Image Processing, Software Design,
Reusability, Efficiency}
}
@inproceedings{levillain.10.wadgmm,
author = {Roland Levillain and {\relax Th}ierry G\'eraud and Laurent
Najman},
title = {Writing Reusable Digital Geometry Algorithms in a Generic
Image Processing Framework},
booktitle = {Proceedings of the Workshop on Applications of Digital
Geometry and Mathematical Morphology (WADGMM)},
pages = {96--100},
year = 2010,
address = {Istanbul, Turkey},
month = aug,
url = {http://mdigest.jrc.ec.europa.eu/wadgmm2010/},
project = {Olena},
urllrde = {201008-WADGMM},
abstract = {Digital Geometry software should reflect the generality of
the underlying mathematics: mapping the latter to the
former requires genericity. By designing generic solutions,
one can effectively reuse digital geometry data structures
and algorithms. We propose an image processing framework
centered on the Generic Programming paradigm in which an
algorithm on the paper can be turn into a single code,
written once and usable with various input types. This
approach enables users to design and implement new methods
at a lower cost, try cross-domain experiments and help
generalize results.},
keywords = {Generic Programming, Interface, Skeleton, Complex}
}
@inproceedings{levillain.09.ismm,
author = {Roland Levillain and {\relax Th}ierry G\'eraud and Laurent
Najman},
title = {{Milena}: Write Generic Morphological Algorithms Once, Run
on Many Kinds of Images},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the Ninth International
Symposium on Mathematical Morphology (ISMM)},
pages = {295--306},
year = 2009,
editor = {Michael H. F. Wilkinson and Jos B. T. M. Roerdink},
series = {Lecture Notes in Computer Science},
address = {Groningen, The Netherlands},
month = aug,
publisher = {Springer Berlin / Heidelberg},
volume = 5720,
project = {Olena},
urllrde = {200908-ISMM},
abstract = {We present a programming framework for discrete
mathematical morphology centered on the concept of
genericity. We show that formal definitions of
morphological algorithms can be translated into actual
code, usable on virtually any kind of compatible images,
provided a general definition of the concept of image is
given. This work is implemented in Milena, a generic,
efficient, and user-friendly image processing library.},
keywords = {mathematical morphology, image processing operator,
genericity, programming}
}
@inproceedings{darbon.08.iwcia,
author = {J\'er\^ome Darbon},
title = {Global Optimization for First Order {Markov} Random Fields
with Submodular Priors},
booktitle = {Proceedings of the twelfth International Workshop on
Combinatorial Image Analysis (IWCIA'08) },
year = 2008,
address = {Buffalo, New York, USA},
month = apr,
project = {Image},
urllrde = {200801-IWCIA},
abstract = {This paper copes with the optimization of Markov Random
Fields with pairwise interactions defined on arbitrary
graphs. The set of labels is assumed to be linearly ordered
and the priors are supposed to be submodular. Under these
assumptions we propose an algorithm which computes an exact
minimizer of the Markovian energy. Our approach relies on
mapping the original into a combinatorial one which
involves only binary variables. The latter is shown to be
exactly solvable via computing a maximum flow. The
restatement into a binary combinatorial problem is done by
considering the level-sets of the labels instead of the
label values themselves. The submodularity of the priors is
shown to be a necessary and sufficient condition for the
applicability of the proposed approach.}
}
@inproceedings{geraud.08.mpool,
author = {{\relax Th}ierry G\'eraud and Roland Levillain},
title = {Semantics-Driven Genericity: A Sequel to the Static {C++}
Object-Oriented Programming Paradigm ({SCOOP 2})},
booktitle = {Proceedings of the 6th International Workshop on
Multiparadigm Programming with Object-Oriented Languages
(MPOOL)},
year = 2008,
address = {Paphos, Cyprus},
month = jul,
project = {Olena},
urllrde = {200807-MPOOL},
abstract = {Classical (unbounded) genericity in \Cxx{}03 defines the
interactions between generic data types and algorithms in
terms of concepts. Concepts define the requirements over a
type (or a parameter) by expressing constraints on its
methods and dependent types (typedefs). The upcoming
\Cxx{}0x standard will promote concepts from abstract
entities (not directly enforced by the tools) to language
constructs, enabling compilers and tools to perform
additional checks on generic constructs as well as enabling
new features (e.g., concept-based overloading). Most modern
languages support this notion of signature on generic
types. However, generic types built on other types and
relying on concepts to both ensure type conformance and
drive code specialization, restrain the interface and the
implementation of the newly created type: specific methods
and associated types not mentioned in the concept will not
be part of the new type. The paradigm of concept-based
genericity lacks the required semantics to transform types
while retaining or adapting their intrinsic capabilities.
We present a new form of semantically-enriched genericity
allowing static generic type transformations through a
simple form of type introspection based on type metadata
called properties. This approach relies on a new Static
\Cxx Object-Oriented Programming (SCOOP) paradigm, and is
adapted to the creation of generic and efficient libraries,
especially in the field of scientific computing. Our
proposal uses a metaprogramming facility built into a \Cxx
library called Static, and doesn't require any language
extension nor additional processing (preprocessor,
transformation tool).}
}
@inproceedings{baillard.07.gretsi,
author = {Anthony Baillard and {\relax Ch}ristophe Berger and
Emmanuel Bertin and {\relax Th}ierry G\'eraud and Roland
Levillain and Nicolas Widynski},
title = {Algorithme de calcul de l'arbre des composantes avec
applications \`a la reconnaissance des formes en imagerie
satellitaire},
booktitle = {Proceedings of the 21st Symposium on Signal and Image
Processing (GRETSI)},
category = {national},
year = 2007,
address = {Troyes, France},
month = sep,
project = {Image},
urllrde = {200705-GRETSI},
abstract = {In this paper a new algorithm to compute the component
tree is presented. As compared to the state-of-the-art,
this algorithm does not use excessive memory and is able to
work efficiently on images whose values are highly
quantized or even with images having floating values. We
also describe how it can be applied to astronomical data to
identify relevant objects.}
}
@inproceedings{berger.07.icip,
author = {{\relax Ch}ristophe Berger and {\relax Th}ierry G\'eraud
and Roland Levillain and Nicolas Widynski and Anthony
Baillard and Emmanuel Bertin},
title = {Effective Component Tree Computation with Application to
Pattern Recognition in Astronomical Imaging},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
pages = {IV-41--IV-44},
volume = 4,
year = 2007,
address = {San Antonio, TX, USA},
month = sep,
project = {Image},
urllrde = {200705-ICIP},
abstract = {In this paper a new algorithm to compute the component
tree is presented. As compared to the state of the art,
this algorithm does not use excessive memory and is able to
work efficiently on images whose values are highly
quantized or even with images having floating values. We
also describe how it can be applied to astronomical data to
identify relevant objects.}
}
@inproceedings{darbon.07.ei,
author = {J\'er\^ome Darbon and Marc Sigelle and Florence Tupin},
title = {The use of levelable regularization functions for {MRF}
restoration of {SAR} images},
booktitle = {Proceedings of the 19th Symposium SPIE on Electronic
Imaging},
year = 2007,
address = {San Jose, CA, USA},
month = jan,
project = {Image},
urllrde = {200701-SPIE},
abstract = {It is well-known that Total Variation (TV) minimization
with L2 data fidelity terms (which corresponds to white
Gaussian additive noise) yields a restored image which
presents some loss of contrast. The same behavior occurs
for TVmodels with non-convex data fidelity terms that
represent speckle noise. In this note we propose a new
approach to cope with the restoration of Synthetic Aperture
Radar images while preserving the contrast.}
}
@inproceedings{darbon.07.mirage,
author = {J\'er\^ome Darbon},
title = {A Note on the Discrete Binary {Mumford-Shah} Model},
booktitle = {Proceedings of the international Computer Vision /
Computer Graphics Collaboration Techniques and Applications
(MIRAGE 2007)},
year = 2007,
address = {Paris, France},
month = mar,
project = {Image},
urllrde = {200703-MIRAGE},
abstract = {This paper is concerned itself with the analysis of the
two-phase Mumford-Shah model also known as the active
contour without edges model introduced by Chan and Vese. It
consists of approximating an observed image by a piecewise
constant image which can take only two values. First we
show that this model with the $L^1$-norm as data fidelity
yields a contrast invariant filter which is a well known
property of morphological filters. Then we consider a
discrete version of the original problem. We show that an
inclusion property holds for the minimizers. The latter is
used to design an efficient graph-cut based algorithm which
computes an exact minimizer. Some preliminary results are
presented.}
}
@inproceedings{chekroun.06.iciar,
author = {Mickael Chekroun and J\'er\^ome Darbon and Igor Ciril},
title = {On a Polynomial Vector Field Model for Shape
Representation},
booktitle = {Proceedings of the International Conference on Image
Analysis and Recognition (ICIAR)},
publisher = {Springer-Verlag},
year = 2006,
address = {Povoa de Varzim, Portugal},
month = sep,
project = {Image},
urllrde = {200609-ICIAR},
abstract = {In this paper we propose an efficient algorithm to perform
a polynomial approximation of the vector field derived from
the usual distance mapping method. The main ingredients
consist of minimizing a quadratic functional and
transforming this problem in an appropriate setting for
implementation. With this approach, we reduce the problem
of obtaining an approximating polynomial vector field to
the resolution of a not expansive linear algebraic system.
By this procedure, we obtain an analytical shape
representation that relies only on some coefficients.
Fidelity and numerical efficiency of our approach are
presented on illustrative examples.}
}
@article{darbon.06.jmiv,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Image restoration with discrete constrained {T}otal
{Variation}---Part~{I}: Fast and exact optimization},
journal = {Journal of Mathematical Imaging and Vision},
year = 2006,
volume = 26,
number = 3,
month = dec,
pages = {261--276},
project = {Image},
urllrde = {2006XXX-JMIVa},
abstract = {This paper deals with the total variation minimization
problem in image restoration for convex data fidelity
functionals. We propose a new and fast algorithm which
computes an exact solution in the discrete framework. Our
method relies on the decomposition of an image into its
level sets. It maps the original problems into independent
binary Markov Random Field optimization problems at each
level. Exact solutions of these binary problems are found
thanks to minimum cost cut techniques in graphs. These
binary solutions are proved to be monotone increasing with
levels and yield thus an exact solution of the discrete
original problem. Furthermore we show that minimization of
total variation under $L^1$ data fidelity term yields a
self-dual contrast invariant filter. Finally we present
some results.}
}
@article{darbon.06.jmivb,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Image restoration with discrete constrained {T}otal
{Variation}---Part~{II}: Levelable functions, convex priors
and non-convex case},
journal = {Journal of Mathematical Imaging and Vision},
year = 2006,
volume = 26,
number = 3,
month = dec,
pages = {277--291},
project = {Image},
urllrde = {2006XXX-JMIVb},
abstract = {In Part II of this paper we extend the results obtained in
Part I for total variation minimization in image
restoration towards the following directions: first we
investigate the decomposability property of energies on
levels, which leads us to introduce the concept of
levelable regularization functions (which TV is the
paradigm of). We show that convex levelable posterior
energies can be minimized exactly using the
level-independant cut optimization scheme seen in part I.
Next we extend this graph cut scheme optimization scheme to
the case of non-convex levelable energies. We present
convincing restoration results for images corrupted with
impulsive noise. We also provide a minimum-cost based
algorithm which computes a global minimizer for Markov
Random Field with convex priors. Last we show that
non-levelable models with convex local conditional
posterior energies such as the class of generalized
gaussian models can be exactly minimized with a generalized
coupled Simulated Annealing.}
}
@inproceedings{darbon.06.siam,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Fast and Exact Discrete Image Restoration Based on Total
Variation and on Its Extensions to Levelable Potentials},
booktitle = {SIAM Conference on Imaging Sciences},
year = 2006,
address = {Minneapolis, USA},
month = may,
project = {Image},
urllrde = {200605-SIAM},
abstract = {We investigate the decomposition property of posterior
restoration energies on level sets in a discrete Markov
Random Field framework. This leads us to the concept of
'levelable' potentials (which TV is shown to be the
paradigm of). We prove that convex levelable posterior
energies can be minimized exactly with level-independant
binary graph cuts. We extend this scheme to the case of
non-convex levelable energies, and present convincing
restoration results for images degraded by impulsive
noise.}
}
@techreport{darbon.06.tr,
author = {J\'er\^ome Darbon and Marc Sigelle and Florence Tupin},
title = {A note on nice-levelable {MRFs} for {SAR} image denoising
with contrast preservation},
institution = {Signal and Image Processing Group, Ecole Nationale
Sup\'erieure des T\'el\'ecommunications},
year = 2006,
number = {2006D006},
address = {Paris, France},
month = sep,
project = {Image},
annote = {On this technical report is based the publication
darbon.07.ei ; 200701-SPIE},
urllrde = {200701-SPIE}
}
@inproceedings{lesage.06.isvc,
author = {David Lesage and J\'er\^ome Darbon and Ceyhun Burak Akg\"ul},
title = {An Efficient Algorithm for Connected Attribute Thinnings
and Thickenings},
booktitle = {Proceedings of the second International Conference on
Visual Computing},
year = 2006,
address = {Lake Tahoe, Nevada, USA},
month = nov,
project = {Image},
pages = {393--404},
volume = 4292,
series = {Lecture Notes in Computer Science Series},
publisher = {Springer-Verlag},
urllrde = {200611-ISVC},
abstract = {Connected attribute filters are anti-extensive
morphological operators widely used for their ability of
simplifying the image without moving its contours. In this
paper, we present a fast, versatile and easy-to-implement
algorithm for grayscale connected attribute thinnings and
thickennings, a subclass of connected filters for the wide
range of non-increasing attributes. We show that our
algorithm consumes less memory and is computationally more
efficient than other available methods on natural images.}
}
@article{yoruk.06.itip,
author = {Erdem Y\"or\"uk and Ender Konukoglu and B\"ulent Sankur
and J\'er\^ome Darbon},
title = {Shape-based hand recognition},
journal = {IEEE Transactions on Image Processing},
year = 2006,
volume = 15,
number = 7,
pages = {1803--1815},
month = jul,
project = {Image},
urllrde = {2006XX-ITIP},
abstract = {The problem of person recognition and verification based
on their hand images has been addressed. The system is
based on the images of the right hands of the subjects,
captured by a flatbed scanner in an unconstrained pose at
45 dpi. In a preprocessing stage of the algorithm, the
silhouettes of hand images are registered to a fixed pose,
which involves both rotation and translation of the hand
and, separately, of the individual fingers. Two feature
sets have been comparatively assessed, Hausdorff distance
of the hand contours and independent component features of
the hand silhouette images. Both the classification and the
verification performances are found to be very satisfactory
as it was shown that, at least for groups of about five
hundred subjects, hand-based recognition is a viable secure
access control scheme.}
}
@inproceedings{baillard.05.adass,
author = {Anthony Baillard and Emmanuel Bertin and Yannic Mellier
and Henry Joy {McCracken} and {\relax Th}ierry G\'eraud and
Roser Pell\'o and Jean-Fran{\,c}ois {LeBorgne} and Pascal
Fouqu\'e},
title = {Project {EFIGI}: Automatic classification of galaxies},
year = 2005,
booktitle = {Astronomical Data Analysis Software and Systems XV},
volume = 351,
pages = {236--239},
publisher = {Astronomical Society of the Pacific},
series = {Conference},
url = {http://www.aspbooks.org/custom/publications/paper/index.phtml?paper_id=3398},
editor = {Carlos Gabriel and {\relax Ch}ristophe Arviset and Daniel
Ponz and Enrique Solano},
isbn = {1-58381-219-9},
project = {Image},
urllrde = {200512-ADASS},
abstract = {We propose an automatic system to classify images of
galaxies with varying resolution. Morphologically typing
galaxies is a difficult task in particular for distant
galaxies convolved by a point-spread function and suffering
from a poor signal-to-noise ratio. In the context of the
first phase of the project EFIGI (extraction of the
idealized shapes of galaxies in imagery), we present the
three steps of our software: cleaning, dimensionality
reduction and supervised learning. We present preliminary
results derived from a subset of 774 galaxies from the
Principal Galaxies Catalog and compare them to human
classifications made by astronomers. We use g-band images
from the Sloan Digital Sky Survey. Finally, we discuss
future improvements which we intend to implement before
releasing our tool to the community.}
}
@article{bloch.05.prl,
author = {Isabelle Bloch and Olivier Colliot and Oscar Camara and
{\relax Th}ierry G\'eraud},
title = {Fusion of spatial relationships for guiding recognition,
example of brain structure recognition in {3D} {MRI}},
journal = {Pattern Recognition Letters},
year = 2005,
volume = 26,
number = 4,
month = mar,
pages = {449--457},
project = {Image},
urllrde = {200407-PRL},
abstract = {Spatial relations play an important role in recognition of
structures embedded in a complex environment and for
reasoning under imprecision. Several types of relationships
can be modeled in a unified way using fuzzy mathematical
morphology. Their combination benefits from the powerful
framework of fuzzy set theory for fusion tasks and decision
making. This paper presents several methods of fusion of
information about spatial relationships and illustrates
them on the example of model-based recognition of brain
structures in 3D magnetic resonance imaging.}
}
@inproceedings{darbon.05.eusipco,
author = {J\'er\^ome Darbon and Ceyhun Burak Akg\"ul},
title = {An efficient algorithm for attribute openings and
closings},
booktitle = {Proceedings of the 13th European Signal Processing
Conference (EUSIPCO)},
year = 2005,
address = {Antalya, Turkey},
month = sep,
project = {Image},
urllrde = {200509-Eusipco},
abstract = {In this paper, we present fast algorithms for area opening
and closing on grayscale images. Salembier's max-tree based
algorithm is one of the well known methods to perform area
opening. It makes use of a special representation where
each node in the tree stands for a flat region and the tree
itself is oriented towards the maxima of the grayscale
image. Pruning the tree with respect to some attribute,
e.g., the area, boils down to attribute opening. Following
the same approach, we propose an algorithm for area opening
(closing) without building the max-tree (min-tree). Our
algorithm exhibit considerable performance compared to the
state-of-the art in this domain.}
}
@inproceedings{darbon.05.ibpria,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {A Fast and Exact Algorithm for Total Variation
Minimization},
booktitle = {Proceedings of the 2nd Iberian Conference on Pattern
Recognition and Image Analysis (IbPRIA)},
publisher = {Springer-Verlag},
volume = 3522,
pages = {351--359},
year = 2005,
address = {Estoril, Portugal},
month = jun,
project = {Image},
urllrde = {200506-IbPria},
abstract = {This paper deals with the minimization of the total
variation under a convex data fidelity term. We propose an
algorithm which computes an exact minimizer of this
problem. The method relies on the decomposition of an image
into its level sets. Using these level sets, we map the
problem into optimizations of independent binary Markov
Random Fields. Binary solutions are found thanks to
graph-cut techniques and we show how to derive a fast
algorithm. We also study the special case when the fidelity
term is the $L^1$-norm. Finally we provide some
experiments.}
}
@inproceedings{darbon.05.ispa,
author = {J\'er\^ome Darbon},
title = {Total Variation Minimization with $L^1$ Data Fidelity as a
Contrast Invariant Filter},
booktitle = {Proceedings of the 4th International Symposium on Image
and Signal Processing and Analysis (ISPA 2005)},
year = 2005,
address = {Zagreb, Croatia},
month = sep,
pages = {221--226},
project = {Image},
urllrde = {200509-Ispa},
abstract = {This paper sheds new light on minimization of the total
variation under the $L^1$-norm as data fidelity term
($L^1+TV$) and its link with mathematical morphology. It is
well known that morphological filters enjoy the property of
being invariant with respect to any change of contrast.
First, we show that minimization of $L^1+TV$ yields a
self-dual and contrast invariant filter. Then, we further
constrain the minimization process by only optimizing the
grey levels of level sets of the image while keeping their
boundaries fixed. This new constraint is maintained thanks
to the Fast Level Set Transform which yields a complete
representation of the image as a tree. We show that this
filter can be expressed as a Markov Random Field on this
tree. Finally, we present some results which demonstrate
that these new filters can be particularly useful as a
preprocessing stage before segmentation.}
}
@inproceedings{darbon.05.isvc,
author = {J\'er\^ome Darbon and Sylvain Peyronnet},
title = {A Vectorial Self-Dual Morphological Filter based on Total
Variation Minimization},
booktitle = {Proceedings of the First International Conference on
Visual Computing},
year = 2005,
address = {Lake Tahoe, Nevada, USA},
month = dec,
project = {Image},
pages = {388--395},
series = {Lecture Notes in Computer Science Series},
publisher = {Springer-Verlag},
volume = 3804,
urllrde = {200512-Isvc},
abstract = {We present a vectorial self dual morphological filter.
Contrary to many methods, our approach does not require the
use of an ordering on vectors. It relies on the
minimization of the total variation with $L^1$ norm as data
fidelity on each channel. We further constraint this
minimization in order not to create new values. It is shown
that this minimization yields a self-dual and contrast
invariant filter. Although the above minimization is not a
convex problem, we propose an algorithm which computes a
global minimizer. This algorithm relies on minimum cost
cut-based optimizations.}
}
@techreport{darbon.05.tr,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {A fast and exact algorithm for total variation
minimization},
institution = {ENST},
year = 2005,
number = {2005D002},
address = {Paris, France},
month = jan,
project = {Image},
annote = {This technical report corresponds to the publication
darbon.05.ibpria.},
urllrde = {200501-TR},
abstract = {This paper deals with the minimization of the total
variation under a convex data fidelity term. We propose an
algorithm which computes an exact minimizer of this
problem. The method relies on the decomposition of an image
into its level sets. Using these level sets, we map the
problem into optimizations of independent binary Markov
Random Fields. Binary solutions are found thanks to
graph-cut techniques and we show how to derive a fast
algorithm. We also study the special case when the fidelity
term is the $L^1$-norm. Finally we provide some
experiments.}
}
@article{dehak.05.pami,
author = {R\'eda Dehak and Isabelle Bloch and Henri Ma{\^\i}tre},
title = {Spatial reasoning with relative incomplete information on
relative positioning},
journal = {IEEE Transactions on Pattern Analysis and Machine
Intelligence},
year = 2005,
pages = {1473--1484},
volume = 27,
month = sep,
number = 9,
project = {Image},
urllrde = {200509-PAMI},
abstract = {This paper describes a probabilistic method of inferring
the position of a point with respect to a reference point
knowing their relative spatial position to a third point.
We address this problem in the case of incomplete
information where only the angular spatial relationships
are known. The use of probabilistic representations allows
us to model prior knowledge. We derive exact formulae
expressing the conditional probability of the position
given the two known angles, in typical cases: uniform or
Gaussian random prior distributions within rectangular or
circular regions. This result is illustrated with respect
to two different simulations: The first is devoted to the
localization of a mobile phone using only angular
relationships, the second, to geopositioning within a city.
This last example uses angular relationships and some
additional knowledge about the position.}
}
@inproceedings{geraud.05.ismm,
author = {{\relax Th}ierry G\'eraud},
title = {Ruminations on {T}arjan's {U}nion-{F}ind algorithm and
connected operators},
booktitle = ismm05,
year = 2005,
address = {Paris, France},
month = apr,
pages = {105--116},
publisher = {Springer},
series = {Computational Imaging and Vision},
volume = 30,
project = {Image},
urllrde = {200504-ISMM},
abstract = {This papers presents a comprehensive and general form of
the Tarjan's union-find algorithm dedicated to connected
operators. An interesting feature of this form is to
introduce the notion of separated domains. The properties
of this form and its flexibility are discussed and
highlighted with examples. In particular, we give clues to
handle correctly the constraint of domain-disjointness
preservation and, as a consequence, we show how we can rely
on ``union-find'' to obtain algorithms for self-dual
filters approaches and levelings with a marker function.}
}
@inproceedings{darbon.04.ecoopphd,
author = {J\'er\^ome Darbon and {\relax Th}ierry G\'eraud and
Patrick Bellot},
title = {Generic algorithmic blocks dedicated to image processing},
booktitle = {Proceedings of the ECOOP Workshop for PhD Students},
year = 2004,
address = {Oslo, Norway},
month = jun,
project = {Olena},
urllrde = {200406-ECOOPPHD},
abstract = {This paper deals with the implementation of algorithms in
the specific domain of image processing. Although many
image processing libraries are available, they generally
lack genericity and flexibility. Many image processing
algorithms can be expressed as compositions of elementary
algorithmic operations referred to as blocks. Implementing
these compositions is achieved using generic programming.
Our solution is compared to previous ones and we
demonstrate it on a class image processing algorithms.}
}
@inproceedings{darbon.04.iwcia,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Exact optimization of discrete constrained total variation
minimization problems},
booktitle = {Proceedings of the 10th International Workshop on
Combinatorial Image Analysis (IWCIA)},
year = 2004,
address = {Auckland, New Zealand},
month = dec,
pages = {548--557},
editors = {R. Klette and J. Zunic},
series = {Lecture Notes in Computer Science Series},
publisher = {Springer-Verlag},
volume = 3322,
project = {Image},
urllrde = {200412-IWCIA},
abstract = {This paper deals with the total variation minimization
problem when the fidelity is either the $L^2$-norm or the
$L^1$-norm. We propose an algorithm which computes the
exact solution of these two problems after discretization.
Our method relies on the decomposition of an image into its
level sets. It maps the original problems into independent
binary Markov Random Field optimization problems associated
with each level set. Exact solutions of these binary
problems are found thanks to minimum-cut techniques. We
prove that these binary solutions are increasing and thus
allow to reconstruct the solution of the original
problems.}
}
@techreport{darbon.04.tr,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Exact optimization of discrete constrained total variation
minimization problems},
institution = {ENST},
year = 2004,
number = {2004C004},
address = {Paris, France},
month = oct,
project = {Image},
annote = {This technical report corresponds to the publication
darbon.04.iwcia. ; 200412-IWCIA},
urllrde = {200410-TR},
abstract = {This paper deals with the total variation minimization
problem when the fidelity is either the $L^2$-norm or the
$L^1$-norm. We propose an algorithm which computes the
exact solution of these two problems after discretization.
Our method relies on the decomposition of an image into its
level sets. It maps the original problems into independent
binary Markov Random Field optimization problems associated
with each level set. Exact solutions of these binary
problems are found thanks to minimum-cut techniques. We
prove that these binary solutions are increasing and thus
allow to reconstruct the solution of the original
problems.}
}
@inproceedings{geraud.04.iccvg,
author = {{\relax Th}ierry G\'eraud and Giovanni Palma and Niels
{Van Vliet}},
title = {Fast color image segmentation based on levellings in
feature Space},
booktitle = {Computer Vision and Graphics---International Conference on
Computer Vision and Graphics (ICCVG), Warsaw, Poland,
September 2004},
year = 2004,
series = {Computational Imaging and Vision},
volume = 32,
editor = {Kluwer Academic Publishers},
pages = {800--807},
note = {On CD.},
project = {Image},
urllrde = {200408-ICCVG},
abstract = {This paper presents a morphological classifier with
application to color image segmentation. The basic idea of
a morphological classifier is to consider that a color
histogram is a 3D gray-level image and that morphological
operators can be applied to modify this image. The final
objective is to extract clusters in color space, that is,
identify regions in the 3D image. In this paper, we
particularly focus on a powerful class of morphology-based
filters called levellings to transform the 3D
histogram-image to identify clusters. We also show that our
method gives better results than the ones of
state-of-the-art methods.}
}
@article{geraud.04.jasp,
author = {{\relax Th}ierry G\'eraud and Jean-Baptiste Mouret},
title = {Fast road network extraction in satellite images using
mathematical morphology and {M}arkov random fields},
journal = {EURASIP Journal on Applied Signal Processing},
year = 2004,
number = 16,
volume = 2004,
pages = {2503--2514},
month = nov,
note = {Special issue on Nonlinear Signal and Image Processing -
Part II},
project = {Image},
doi = {http://doi.acm.org/10.1155/S1110865704409093},
urllrde = {200409-JASP},
abstract = {This paper presents a fast method for road network
extraction in satellite images. It can be seen as a
transposition of the segmentation scheme "watershed
transform + region adjacency graph + Markov random fields"
to the extraction of curvilinear objects. Many road
extractors can be found in the literature which are
composed of two stages. The first one acts like a filter
that can decide from a local analysis, at every image
point, if there is a road or not. The second stage aims at
obtaining the road network structure. In the method we
propose, we rely on a "potential" image, that is,
unstructured image data that can be derived from any road
extractor filter. In such a potential image, the value
assigned to a point is a measure of its likelihood to be
located in the middle of a road. A filtering step applied
on the potential image relies on the area closing operator
followed by the watershed transform to obtain a connected
line which encloses the road network. Then a graph
describing adjacency relationships between watershed lines
is built. Defining Markov random fields upon this graph,
associated with an energetic model of road networks, leads
to the expression of road network extraction as a global
energy minimization problem. This method can easily be
adapted to other image processing fields where the
recognition of curvilinear structures is involved.}
}
@inproceedings{grosicki.04.icc,
author = {Emmanuel Grosicki and Karim Abed-Meraim and R\'eda Dehak},
title = {A novel method to fight the non line of sight error in
{AOA} measurements for mobile location},
booktitle = {Proceedings of the IEEE International Conference on
Communications (ICC)},
year = 2004,
volume = 5,
pages = {2794--2798},
address = {Paris, France},
month = jun,
project = {Image},
urllrde = {200406-ICC},
abstract = {In this contribution, a mobile location method is provided
using measurements from two different Base-Stations.
Although computationally from two different Base-Stations.
Although based on a simple trilateration and takes into
account error measurements caused by Non-Line-Of-Sight
(NLOS) and near-far effect. The new method attributes an
index of confidence for each measure, in order to allow the
mobile to select the two most reliable measures and not to
use all measures, equally.}
}
@techreport{lefebvre.04.tr,
author = {Sylvain Lefebvre and J\'er\^ome Darbon and Fabrice Neyret},
title = {Unified texture management for arbitrary meshes},
institution = {INRIA-Rhone-Alpes},
year = 2004,
number = {RR-5210},
address = {France},
month = may,
project = {Image},
urllrde = {200405-RRinria},
abstract = {Video games and simulators commonly use very detailed
textures, whose cumulative size is often larger than the
GPU memory. Textures may be loaded progressively, but
dynamically loading and transferring this large amount of
data in GPU memory results in loading delays and poor
performance. Therefore, managing texture memory has become
an important issue. While this problem has been (partly)
addressed early for the specific case of terrain rendering,
there is no generic texture management system for arbitrary
meshes. We propose such a system, implemented on today's
GPUs, which unifies classical solutions aimed at reducing
memory transfer: progressive loading, texture compression,
and caching strategies. For this, we introduce a new
algorithm -- running on GPU -- to solve the major
difficulty of detecting which parts of the texture are
required for rendering. Our system is based on three
components manipulating a tile pool which stores texture
data in GPU memory. First, the Texture Load Map determines
at every frame the appropriate list of texture tiles (i.e.
location and MIP-map level) to render from the current
viewpoint. Second, the Texture Cache manages the tile pool.
Finally, the Texture Producer loads and decodes required
texture tiles asynchronously in the tile pool. Decoding of
compressed texture data is implemented on GPU to minimize
texture transfer. The Texture Producer can also generate
procedural textures. Our system is transparent to the user,
and the only parameter that must be supplied at runtime is
the current viewpoint. No modifications of the mesh are
required. We demonstrate our system on large scenes
displayed in real time. We show that it achieves
interactive frame rates even in low-memory low-bandwidth
situations.}
}
@inproceedings{yoruk.04.eusipco,
author = {Erdem Yoruk and Ender Konukoglu and Bulent Sankur and
J\'er\^ome Darbon},
title = {Person authentication based on hand shape},
booktitle = {Proceedings of 12th European Signal Processing Conference
(EUSIPCO)},
year = 2004,
address = {Vienna, Austria},
month = sep,
project = {Image},
urllrde = {200409-EUSIPCO},
abstract = {The problem of person identification based on their hand
images has been addressed. The system is based on the
images of the right hands of the subjects, captured by a
flatbed scanner in an unconstrained pose. In a
preprocessing stage of the algorithm, the silhouettes of
hand images are registered to a fixed pose, which involves
both rotation and translation of the hand and, separately,
of the individual fingers. Independent component features
of the hand silhouette images are used for recognition. The
classification performance is found to be very satisfactory
and it was shown that, at least for groups of one hundred
subjects, hand-based recognition is a viable secure access
control scheme.}
}
@article{bloch.03.ai,
author = {Isabelle Bloch and {\relax Th}ierry G\'eraud and Henri
Ma\^itre},
title = {Representation and fusion of heterogeneous fuzzy
information in the {3D} space for model-based structural
recognition---application to {3D} brain imaging},
journal = {Artificial Intelligence},
month = aug,
year = 2003,
volume = 148,
number = {1-2},
pages = {141--175},
project = {Image},
urllrde = {200308-AI},
abstract = {We present a novel approach of model-based pattern
recognition where structural information and spatial
relationships have a most important role. It is illustrated
in the domain of 3D brain structure recognition using an
anatomical atlas. Our approach performs simultaneously
segmentation and recognition of the scene and the solution
of the recognition task is progressive, processing
successively different objects, using different of
knowledge about the object and about relationships between
objects. Therefore the core of the approach is the
representation part, and constitutes the main contribution
of this paper. We make use of a spatial representation of
each piece of information, as a spatial set representing a
constraint to be satisfied by the searched object, thanks
in particular to fuzzy mathematical operations. Fusion of
these constraints allows to, segment and recognize the
desired object.}
}
@inproceedings{burrus.03.mpool,
author = {Nicolas Burrus and Alexandre Duret-Lutz and {\relax
Th}ierry G\'eraud and David Lesage and Rapha\"el Poss},
title = {A static {C++} object-oriented programming ({SCOOP})
paradigm mixing benefits of traditional {OOP} and generic
programming},
booktitle = {Proceedings of the Workshop on Multiple Paradigm with
Object-Oriented Languages (MPOOL)},
year = 2003,
address = {Anaheim, CA, USA},
month = oct,
project = {Olena},
urllrde = {200310-MPOOL},
abstract = {Object-oriented and generic programming are both supported
in C++. OOP provides high expressiveness whereas GP leads
to more efficient programs by avoiding dynamic typing. This
paper presents SCOOP, a new paradigm which enables both
classical OO design and high performance in C++ by mixing
OOP and GP. We show how classical and advanced OO features
such as virtual methods, multiple inheritance, argument
covariance, virtual types and multimethods can be
implemented in a fully statically typed model, hence
without run-time overhead.}
}
@inproceedings{geraud.03.grec,
author = {{\relax Th}ierry G\'eraud and Geoffroy Fouquier and Quoc
Peyrot and Nicolas Lucas and Franck Signorile},
title = {Document type recognition using evidence theory},
booktitle = {Proceedings of the 5th IAPR International Workshop on
Graphics Recognition (GREC)},
year = 2003,
pages = {212--221},
editors = {Josep Llad\`os},
address = {Computer Vision Center, UAB, Barcelona, Spain},
month = jul,
project = {Image},
urllrde = {200307-Grec},
abstract = {This paper presents a method to recognize the type of a
document when a database of models (document types) is
given. For instance, when every documents are forms and
when we know every different types of forms, we want to be
able to assign to an input document its type of form. To
that aim, we define each model by a set of characteristics
whose nature can vary from one to another. For instance, a
characteristic can be having a flower-shaped logo on
top-left as well as having about 12pt fonts. This paper
does not intent to explain how to extract such knowledge
from documents but it describes how to use such information
to decide what the type of a given document is when
different document types are described by
characteristics.}
}
@inproceedings{geraud.03.ibpria,
author = {{\relax Th}ierry G\'eraud},
title = {Segmentation of curvilinear objects using a
watershed-based curve adjacency graph},
booktitle = {Proceedings of the 1st Iberian Conference on Pattern
Recognition and Image Analysis (IbPRIA)},
pages = {279--286},
year = 2003,
editor = {Springer-Verlag},
volume = 2652,
series = {Lecture Notes in Computer Science Series},
address = {Mallorca, Spain},
month = jun,
publisher = {Springer-Verlag},
project = {Image},
urllrde = {200306-Ibpria},
abstract = {This paper presents a general framework to segment
curvilinear objects in 2D images. A pre-processing step
relies on mathematical morphology to obtain a connected
line which encloses curvilinear objects. Then, a graph is
constructed from this line and a Markovian Random Field is
defined to perform objects segmentation. Applications of
our framework are numerous: they go from simple surve
segmentation to complex road network extraction in
satellite images.}
}
@inproceedings{geraud.03.icisp,
author = {{\relax Th}ierry G\'eraud},
title = {Segmentation d'objets curvilignes \`a l'aide des champs de
Markov sur un graphe d'adjacence de courbes issu de
l'algorithme de la ligne de partage des eaux},
booktitle = {Proceedings of the International Conference on Image and
Signal Processing (ICISP)},
year = 2003,
volume = 2,
pages = {404--411},
address = {Agadir, Morocco},
month = jun,
publisher = {Faculty of Sciences at Ibn Zohr University, Morocco},
note = {In French},
project = {Image},
urllrde = {200306-Icisp},
abstract = {This paper presents a general framework to segment
curvilinear objects in 2D images. A pre-processing step
relies on mathematical morphology to obtain a connected
line which encloses curvilinear objects. Then, a graph is
constructed from this line and a Markovian Random Field is
defined to perform objects segmentation. Applications of
our framework are numerous: they go from simple surve
segmentation to complex road network extraction in
satellite images.}
}
@inproceedings{geraud.03.nsip,
author = {{\relax Th}ierry G\'eraud},
title = {Fast Road Network Extraction in Satellite Images using
Mathematical Morphology and {MRF}},
booktitle = {Proceedings of the EURASIP Workshop on Nonlinear Signal
and Image Processing (NSIP)},
year = 2003,
address = {Trieste, Italy},
month = jun,
project = {Image},
urllrde = {200306-Nsip},
abstract = {This paper presents a fast method to extract road network
in satellite images. A pre-processing stage relies on
mathematical morphology to obtain a connected line which
encloses road network. Then, a graph is constructed from
this line and a Markovian Random Field is defined to
perform road extraction.}
}
@inproceedings{xue.03.icip,
author = {Heru Xue and {\relax Th}ierry G\'eraud and Alexandre
Duret-Lutz},
title = {Multi-band segmentation using morphological clustering and
fusion application to color image segmentation},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
year = 2003,
pages = {353--356},
volume = 1,
address = {Barcelona, Spain},
month = sep,
project = {Image},
urllrde = {200309-Icip},
abstract = {In this paper we propose a novel approach for color image
segmentation. Our approach is based on segmentation of
subsets of bands using mathematical morphology followed by
the fusion of the resulting segmentation channels. For
color images the band subsets are chosen as RG, RB and GB
pairs, whose 2D histograms are processed as projections of
a 3D histogram. The segmentations in 2D color spaces are
obtained using the watershed algorithm. These 2D
segmentations are then combined to obtain a final result
using a region split-and-merge process. The CIE L a b color
space is used to measure the color distance. Our approach
results in improved performance and can be generalized for
multi-band segmentation of images such as multi-spectral
satellite images information.}
}
@inproceedings{darbon.02.ismm,
author = {J\'er\^ome Darbon and {\relax Th}ierry G\'eraud and
Alexandre Duret-Lutz},
title = {Generic implementation of morphological image operators},
booktitle = {Mathematical Morphology, Proceedings of the 6th
International Symposium (ISMM)},
pages = {175--184},
year = 2002,
address = {Sydney, Australia},
month = apr,
publisher = {CSIRO Publishing},
project = {Olena},
urllrde = {200204-Ismm},
abstract = {Several libraries dedicated to mathematical morphology
exist. But they lack genericity, that is to say, the
ability for operators to accept input of different natures
---2D binary images, graphs enclosing floating values, etc.
We describe solutions which are integrated in Olena, a
library providing morphological operators. We demonstrate
with some examples that translating mathematical formulas
and algorithms into source code is made easy and safe with
Olena. Moreover, experimental results show that no extra
costs at run-time are induced.}
}
@inproceedings{darbon.01.ei,
author = {J\'er\^ome Darbon and Bulent Sankur and Henri Ma\^{\i}tre},
title = {Error correcting code performance for watermark
protection},
booktitle = {Proceedings of the 13th Symposium SPIE on Electronic
Imaging----Security and Watermarking of Multimedia Contents
III (EI27)},
year = 2001,
address = {San Jose, CA, USA},
month = jan,
volume = 4314,
editors = {P.W. Wong and E.J. Delp III},
pages = {663--672},
project = {Image},
urllrde = {200101-Ei},
abstract = {The watermark signals are weakly inserted in images due to
imperceptibility constraints which makes them prone to
errors in the extraction stage. Although the error
correcting codes can potentially improve their performance
one must pay attention to the fact that the watermarking
channel is in general very noisy. We have considered the
trade-off of the BCH codes and repetition codes in various
concatenation modes. At the higher rates that can be
encountered in watermarking channels such as due to
low-quality JPEG compression, codes like the BCH codes
cease being useful. Repetition coding seems to be the last
resort at these error rates of 25\% and beyond. It has been
observed that there is a zone of bit error rate where their
concatenation turns out to be more useful. In fact the
concatenation of repetition and BCH codes judiciously
dimensioned, given the available number of insertion sites
and the payload size, achieves a higher reliability level.}
}
@inproceedings{geraud.01.ai,
author = {{\relax Th}ierry G\'eraud and Yoann Fabre and Alexandre
Duret-Lutz},
title = {Applying generic programming to image processing},
booktitle = {Proceedings of the IASTED International Conference on
Applied Informatics (AI)---Symposium on Advances in
Computer Applications},
year = 2001,
publisher = {ACTA Press},
editor = {M.H.~Hamsa},
address = {Innsbruck, Austria},
pages = {577--581},
month = feb,
project = {Olena},
urllrde = {200102-Ai},
abstract = {This paper presents the evolution of algorithms
implementation in image processing libraries and discusses
the limits of these implementations in terms of
reusability. In particular, we show that in C++, an
algorithm can have a general implementation; said
differently, an implementation can be generic, i.e.,
independent of both the input aggregate type and the type
of the data contained in the input aggregate. A total
reusability of algorithms can therefore be obtained;
moreover, a generic implementation is more natural and does
not introduce a meaningful additional cost in execution
time as compared to an implementation dedicated to a
particular input type.}
}
@inproceedings{geraud.01.icip,
author = {{\relax Th}ierry G\'eraud and Pierre-Yves Strub and
J\'er\^ome Darbon},
title = {Color image segmentation based on automatic morphological
clustering},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
year = 2001,
volume = 3,
pages = {70--73},
address = {Thessaloniki, Greece},
month = oct,
project = {Image},
urllrde = {200110-Icip},
abstract = {We present an original method to segment color images
using a classification in the 3-D color space. In the case
of ordinary images, clusters that appear in 3-D histograms
usually do not fit a well-known statistical model. For that
reason, we propose a classifier that relies on mathematical
morphology, and more precisely on the watershed algorithm.
We show on various images that the expected color clusters
are correctly identified by our method. Last, to segment
color images into coherent regions, we perform a Markovian
labeling that takes advantage of the morphological
classification results.}
}
@inproceedings{geraud.01.icisp,
author = {{\relax Th}ierry G\'eraud and Pierre-Yves Strub and
J\'er\^ome Darbon},
title = {Segmentation d'images en couleur par classification
morphologique non supervis\'ee},
booktitle = {Proceedings of the International Conference on Image and
Signal Processing (ICISP)},
year = 2001,
pages = {387--394},
address = {Agadir, Morocco},
month = may,
publisher = {Faculty of Sciences at Ibn Zohr University, Morocco},
note = {In French},
project = {Image},
urllrde = {200105-Icisp},
abstract = {In this paper, we present an original method to segment
color images using a classification of the image histogram
in the 3D color space. As color modes in natural images
usually do not fit a well-known statistical model, we
propose a classifier that rely on mathematical morphology
and, more particularly, on the watershed algorithm. We show
on various images that the expected color modes are
correctly identified and, in order to obtain coherent
region, we extend the method to make the segmentation
contextual.}
}
@inproceedings{duret.00.gcse,
author = {Alexandre Duret-Lutz},
title = {Olena: a component-based platform for image processing,
mixing generic, generative and {OO} programming},
booktitle = {Proceedings of the 2nd International Symposium on
Generative and Component-Based Software Engineering
(GCSE)---Young Researchers Workshop; published in
``Net.ObjectDays2000''},
pages = {653--659},
year = 2000,
address = {Erfurt, Germany},
month = oct,
isbn = {3-89683-932-2},
project = {Olena},
urllrde = {200010-NetObjectDays},
abstract = {This paper presents Olena, a toolkit for programming and
designing image processing chains in which each processing
is a component. But since there exist many image types
(different structures such as 2D images, 3D images or
graphs, as well as different value types) the platform has
been designed with genericity and reusability in mind: each
component is written as a generic C++ procedure, \`a la
STL. Other libraries, such as Khoros [Kon94] have a
different approach where a processing component contains an
implementation for each type supported by the library. This
makes code maintenance hard and prevents easy addition of
new image types. Still, Olena is not only a generic
component library [Jaz95], it shall contain additional
tools such as a visual programming environment (VPE). Those
tools may be programmed in a classical object-oriented
fashion (using operation and inclusion polymorphism) which
may seems antagonist with the generic programming paradigm
used in the library. Section 2 outlines the architecture of
Olena and elaborates more on the design problems resulting
from the use of generic components. Section 3 presents the
solution chosen to address these problems.}
}
@inproceedings{geraud.00.icpr,
author = {{\relax Th}ierry G\'eraud and Yoann Fabre and Alexandre
Duret-Lutz and Dimitri Papadopoulos-Orfanos and
Jean-Fran\c{c}ois Mangin},
title = {Obtaining genericity for image processing and pattern
recognition algorithms},
booktitle = {Proceedings of the 15th International Conference on
Pattern Recognition (ICPR)},
year = 2000,
month = sep,
address = {Barcelona, Spain},
volume = 4,
pages = {816--819},
publisher = {IEEE Computer Society},
project = {Olena},
urllrde = {200009-Icpr},
abstract = {Algorithm libraries dedicated to image processing and
pattern recognition are not reusable; to run an algorithm
on particular data, one usually has either to rewrite the
algorithm or to manually ``copy, paste, and modify''. This
is due to the lack of genericity of the programming
paradigm used to implement the libraries. In this paper, we
present a recent paradigm that allows algorithms to be
written once and for all and to accept input of various
types. Moreover, this total reusability can be obtained
with a very comprehensive writing and without significant
cost at execution, compared to a dedicated algorithm. This
new paradigm is called ``generic programming'' and is fully
supported by the C++ language. We show how this paradigm
can be applied to image processing and pattern recognition
routines. The perspective of our work is the creation of a
generic library.}
}
@inproceedings{geraud.00.rfia,
author = {{\relax Th}ierry G\'eraud and Isabelle Bloch and Henri
Ma{\^\i}tre},
title = {Reconnaissance de structures c\'er\'ebrales \`a l'aide
d'un atlas et par fusion d'informations structurelles floues},
booktitle = {Actes du 12\`eme Congr\`es Francophone AFRIF-AFIA de
Reconnaissance des Formes et Intelligence Artificielle
(RFIA)},
year = 2000,
address = {Paris, France},
month = feb,
volume = 1,
pages = {287--295},
note = {EPITA as current address.},
category = {national},
project = {Image},
urllrde = {200002-RFIA},
abstract = {Nous proposons une proc\'edure automatique de
reconnaissance progressive des structures internes du
cerveau guid\'ee par un atlas anatomique. L'originalit\'e
de notre proc\'edure est multiple. D'une part, elle prend
en compte des informations structurelles sous la forme de
contraintes spatiales flexibles, en utilisant les
formalismes de la th\'eorie des ensembles flous et de la
fusion d'informations. D'autre part, le calcul de la
correspondance entre volume IRM et atlas que nous proposons
permet d'inf\'erer un champ de d\'eformations discret,
respectant des contraintes sur la surface des objets.
Enfin, le caract\`ere s\'equentiel de la proc\'edure permet
de s'appuyer sur la connaissance des objets d\'ej\`a
segment\'es pour acc\'eder \`a des objets dont l'obtention
est a priori de plus en plus difficile.}
}
@techreport{clouard.99.tr,
author = {R\'egis Clouard and Abderrahim Elmoataz and Fran\c{c}ois
Angot and Olivier Lezoray and Alexandre Duret-Lutz},
title = {Une biblioth\`eque et un environnement de programmation
d'op\'erateurs de traitement d'images},
institution = {GREYC-ISMRA},
year = 1999,
number = 99008,
address = {Caen, France},
month = nov,
url = {http://www.greyc.ismra.fr/~regis/Pandore/},
project = {Olena},
urllrde = {199911-TR}
}
@inproceedings{geraud.99.cimaf,
author = {{\relax Th}ierry G\'eraud and Isabelle Bloch and Henri
Ma\^tre},
title = {Atlas-guided recognition of cerebral structures in {MRI}
using fusion of fuzzy structural information},
booktitle = {Proceeding of CIMAF Symposium on Artificial Intelligence},
pages = {99--106},
year = 1999,
address = {La Havana, Cuba},
note = {EPITA as current address.},
project = {Image},
urllrde = {199900-CIMAF}
}
@inproceedings{geraud.99.gretsi,
author = {{\relax Th}ierry G\'eraud and Yoann Fabre and Dimitri
Papadopoulos-Orfanos and Jean-Fran\c{c}ois Mangin},
title = {Vers une r\'eutilisabilit\'e totale des algorithmes de
traitement d'images},
booktitle = {Proceedings of the 17th Symposium on Signal and Image
Processing (GRETSI)},
category = {national},
pages = {331--334},
volume = 2,
year = 1999,
address = {Vannes, France},
month = sep,
note = {In French},
project = {Olena},
urllrde = {199909-Gretsi},
abstract = {Cet article pr\'esente l'\'evolution des techniques de
programmation d'algorithmes de traitement d'images et
discute des limites de la r\'eutilisabilit\'e de ces
algorithmes. En particulier, nous montrons qu'en C++ un
algorithme peut s'\'ecrire sous une forme g\'en\'erale,
ind\'ependante aussi bien du type des donn\'ees que du type
des structures de donn\'ees sur lesquelles il peut
s'appliquer. Une r\'eutilisabilit\'e totale des algorithmes
peut donc \^etre obtenue ; mieux, leur \'ecriture est plus
naturelle et elle n'introduit pas de surco\^ut significatif
en temps d'ex\'ecution.}
}