<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article
  PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.0 20120330//EN" "http://jats.nlm.nih.gov/publishing/1.0/JATS-journalpublishing1.dtd">
<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.6" xml:lang="en" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
	<front>
		<journal-meta>
			<journal-id journal-id-type="publisher-id">dyna</journal-id>
			<journal-title-group>
				<journal-title>DYNA</journal-title>
				<abbrev-journal-title abbrev-type="publisher">Dyna rev.fac.nac.minas</abbrev-journal-title>
			</journal-title-group>
			<issn pub-type="ppub">0012-7353</issn>
			<publisher>
				<publisher-name>Universidad Nacional de Colombia</publisher-name>
			</publisher>
		</journal-meta>
		<article-meta>
			<article-id pub-id-type="doi">10.15446/dyna.v84n202.63389</article-id>
			<article-categories>
				<subj-group subj-group-type="heading">
					<subject>Articulos</subject>
				</subj-group>
			</article-categories>
			<title-group>
				<article-title>Evaluating supervised learning approaches for spatial-domain multi-focus image fusion</article-title>
				<trans-title-group xml:lang="es">
					<trans-title>Evaluando aproximaciones basadas en aprendizaje supervisado para la fusión en el dominio espacial de imágenes multi-foco</trans-title>
				</trans-title-group>
			</title-group>
			<contrib-group>
				<contrib contrib-type="author">
					<name>
						<surname>Atencio-Ortiz</surname>
						<given-names>Pedro</given-names>
					</name>
					<xref ref-type="aff" rid="aff1"><sup>
 <italic>a</italic>
</sup> </xref>
				</contrib>
				<contrib contrib-type="author">
					<name>
						<surname>Sanchez-Torres</surname>
						<given-names>German</given-names>
					</name>
					<xref ref-type="aff" rid="aff2"><sup>
 <italic>b</italic>
</sup> </xref>
				</contrib>
				<contrib contrib-type="author">
					<name>
						<surname>Branch-Bedoya</surname>
						<given-names>John William</given-names>
					</name>
					<xref ref-type="aff" rid="aff3"><sup>
 <italic>c</italic>
</sup> </xref>
				</contrib>
			</contrib-group>
			<aff id="aff1">
				<label>a</label>
				<institution content-type="original"> Facultad de Ingenierías, Instituto Tecnológico Metropolitano, Medellín, Colombia. pedroatencio@itm.edu.co</institution>
				<institution content-type="normalized">Instituto Tecnológico Metropolitano</institution>
				<institution content-type="orgdiv1">Facultad de Ingenierías</institution>
				<institution content-type="orgname">Instituto Tecnológico Metropolitano</institution>
				<addr-line>
					<named-content content-type="city">Medellín</named-content>
				</addr-line>
				<country country="CO">Colombia</country>
				<email>pedroatencio@itm.edu.co</email>
			</aff>
			<aff id="aff2">
				<label>b</label>
				<institution content-type="original"> Facultad de Ingenierías, Universidad del Magdalena, Santa Marta, Colombia. gsanchez@unimagdalena.edu.co</institution>
				<institution content-type="normalized">Universidad del Magdalena</institution>
				<institution content-type="orgdiv1">Facultad de Ingenierías</institution>
				<institution content-type="orgname">Universidad del Magdalena</institution>
				<addr-line>
					<named-content content-type="city">Santa Marta</named-content>
				</addr-line>
				<country country="CO">Colombia</country>
				<email>gsanchez@unimagdalena.edu.co</email>
			</aff>
			<aff id="aff3">
				<label>c</label>
				<institution content-type="original"> Facultad de Minas, Universidad Nacional de Colombia, Medellín, Colombia. jwbranch@unal.edu.co</institution>
				<institution content-type="normalized">Universidad Nacional de Colombia</institution>
				<institution content-type="orgdiv1">Facultad de Minas</institution>
				<institution content-type="orgname">Universidad Nacional de Colombia</institution>
				<addr-line>
					<named-content content-type="city">Medellín</named-content>
				</addr-line>
				<country country="CO">Colombia</country>
				<email>jwbranch@unal.edu.co</email>
			</aff>
			<pub-date pub-type="epub-ppub">
				<season>Jul-Sep</season>
				<year>2017</year>
			</pub-date>
			<volume>84</volume>
			<issue>202</issue>
			<fpage>137</fpage>
			<lpage>146</lpage>
			<history>
				<date date-type="received">
					<day>20</day>
					<month>03</month>
					<year>2017</year>
				</date>
				<date date-type="rev-recd">
					<day>05</day>
					<month>07</month>
					<year>2017</year>
				</date>
				<date date-type="accepted">
					<day>25</day>
					<month>07</month>
					<year>2017</year>
				</date>
			</history>
			<permissions>
				<license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by-nc-nd/4.0/" xml:lang="en">
					<license-p>This is an open-access article distributed under the terms of the Creative Commons Attribution License</license-p>
				</license>
			</permissions>
			<abstract>
				<title>Abstract</title>
				<p>Image fusion is the generation of an image <italic>f</italic> that combines the most relevant information from a set of images of the same scene, acquired with different cameras or camera settings. Multi-Focus Image Fusion (MFIF) aims to generate an image <sub>
 <sup>
 <italic>fe</italic>
</sup> 
</sub> with extended depth-of-field from a set of images taken at different focal distances or focal planes, and it proposes a solution to the typical limited depth-of-field problem in an optical system configuration. A broad variety of works presented in the literature address this problem. The primary approaches found there are domain transformations and block-of-pixels analysis. In this work, we evaluate different systems of supervised machine learning applied to MFIF, including k-nearest neighbors, linear discriminant analysis, neural networks, and support vector machines. We started from two images at different focal distances and divided them into rectangular regions. The main objective of the machine-learning-based classification system is to choose the parts of both images that must be in the fused image in order to obtain a completely focused image. For focus quantification, we used the most popular metrics proposed in the literature, such as: Laplacian energy, sum-modified Laplacian, and gradient energy, among others. The evaluation of the proposed method considered classifier testing and fusion quality metrics commonly used in research, such as visual information fidelity and mutual information feature. Our results strongly suggest that the automatic classification concept satisfactorily addresses the MFIF problem.</p>
			</abstract>
			<trans-abstract xml:lang="es">
				<title>Resumen</title>
				<p>La fusión de imágenes genera una imagen <italic>f</italic> que combina las características más relevantes de un conjunto de imágenes de la misma escena adquiridas con diferentes cámaras o configuraciones. La Fusión de Imágenes Multifoco (MFIF) parte de un conjunto de imágenes con diferente distancia focal para generar una imagen <sub>
 <sup>
 <italic>fe</italic>
</sup> 
</sub> con una profundidad de campo extendida. Lo que constituye una solución al problema de la profundidad de campo limitada en la configuración de un sistema óptico. La literatura muestra una amplia variedad de trabajos que abordan este problema. Las transformaciones de dominios y el análisis de bloques de píxeles son la base de los principales enfoques propuestos. En este trabajo se presenta una evaluación de diferentes sistemas de aprendizaje supervisado aplicados a MFIF, incluyendo k-vecinos más cercanos, análisis discriminante lineal, redes neuronales y máquinas de soporte vectorial. El método inicia con dos imágenes de la misma escena, pero con diferentes distancias focales que se dividen en regiones rectangulares. El objetivo principal del sistema de clasificación, que está basado en aprendizaje de máquina, es elegir las partes de ambas imágenes que deben estar en la imagen fusionada para obtener una imagen completamente enfocada. Para la cuantificación del enfoque se utilizaron las métricas más populares propuestas en la literatura como: la Energía Laplaciana, el Laplaciano Modificado por Suma y el Gradiente de Energía, entre otras. La evaluación del método propuesto incluye la fase de prueba de los clasificadores y las métricas de calidad de fusión utilizadas comúnmente en la investigación, tales como la fidelidad de la información visual y la característica de información mutua. Los resultados muestran que el concepto de clasificación automática puede abordar satisfactoriamente el problema MFIF.</p>
			</trans-abstract>
			<kwd-group xml:lang="en">
				<title>Keywords:</title>
				<kwd>Multi-focus image fusion</kwd>
				<kwd>image processing</kwd>
				<kwd>supervised learning</kwd>
				<kwd>machine learning.</kwd>
			</kwd-group>
			<kwd-group xml:lang="es">
				<title>Palabras clave:</title>
				<kwd>Fusión de imágenes mutifoco</kwd>
				<kwd>procesamiento de imágenes</kwd>
				<kwd>aprendizaje supervisado</kwd>
				<kwd>aprendizaje de máquina.</kwd>
			</kwd-group>
			<counts>
				<fig-count count="8"/>
				<table-count count="3"/>
				<equation-count count="26"/>
				<ref-count count="40"/>
				<page-count count="10"/>
			</counts>
		</article-meta>
	</front>
	<body>
		<sec sec-type="intro">
			<title>1. Introduction</title>
			<p>Traditional optical systems are limited by focus range, which means that not all objects in a scene appear clearly defined [<xref ref-type="bibr" rid="B1">1</xref>], so only the objects within the field of depth of the camera are focused and are perceived clearly, while the rest of the scene is blurred [<xref ref-type="bibr" rid="B2">2</xref>]. Thus, to generate an image with all objects adequately focused, the academic community used the idea of forming a synthetic image by fusing a set of images of the same scene. This is called Multi-Focus Image Fusion (MFIF). The MFIF process consists of merging multiple images with different focal planes to generate an image in which all objects appear sharp [<xref ref-type="bibr" rid="B3">3</xref>] without the introduction of any artifacts.</p>
			<p>The application of MFIF is widespread and used to solve problems related to three-dimensional reconstruction [<xref ref-type="bibr" rid="B2">2</xref>,<xref ref-type="bibr" rid="B4">4</xref>], mobile image processing, microscopic imaging [<xref ref-type="bibr" rid="B5">5</xref>], and computer vision [<xref ref-type="bibr" rid="B6">6</xref>], among others.</p>
			<p>To generate an extended-focus image from a sequence of partially focused images of the same scene, digital image descriptors called focus measures [<xref ref-type="bibr" rid="B7">7</xref>] are used in different approaches. </p>
			<p>MFIF is generally divided into two major types of methods [<xref ref-type="bibr" rid="B1">1</xref>,<xref ref-type="bibr" rid="B8">8</xref>-<xref ref-type="bibr" rid="B9">9</xref>]:</p>
			<p>
				<list list-type="bullet">
					<list-item>
						<p>spatial-domain image fusion methods;</p>
					</list-item>
					<list-item>
						<p>frequency-domain image fusion methods.</p>
					</list-item>
				</list>
			</p>
			<p>The spatial-domain methods use measurable characteristics of spatial information of image pixels. They estimate these measures pixel by pixel or by using pixel sets [<xref ref-type="bibr" rid="B10">10</xref>,<xref ref-type="bibr" rid="B11">11</xref>]. The principal advantages of these methods are that they are easy to implement and require low computational complexity. However, they do require rich texture information in an image to generate good results and are usually weak in smooth image regions. Furthermore, grouping of pixels presents difficulties related to the correct determination of group size for quality maximization and to the presence of artifacts generated on border blocks [<xref ref-type="bibr" rid="B3">3</xref>]. </p>
			<p>Various approaches within the special-domain methods of MFIF have been applied. For example, K.L. Hua et al. [<xref ref-type="bibr" rid="B11">11</xref>] used random walks on graphs created from several feature sets of focus measures and color consistency to model local and global characteristics. This approach estimated the measures in each pixel locally for each input image and used them to maximize the global focus score and color consistency. On the other hand, a method based on sparse feature matrix decomposition using morphological filtering to extract salient features of original input images, was proposed in [<xref ref-type="bibr" rid="B12">12</xref>]. They used a pixel-wise methodology to fuse each sparse feature matrix estimation based on morphological filtering to generate the fused image.</p>
			<p>Some works use hierarchical structures called QuadTree for recursively partitioning the pixel space of the image and decompose the input images into blocks of variable size [<xref ref-type="bibr" rid="B13">13</xref>,<xref ref-type="bibr" rid="B14">14</xref>]. Using a focus measure based on the sum-modified-Laplacian (SML), the method detects the focused regions. So, the resulting image is generated using the focused regions of input images. The main problem with partitioned methods is determination of the correct block size. Avoiding defocused regions in large regions or small blocks with low-contrast variance is the main challenge of QuadTree [<xref ref-type="bibr" rid="B14">14</xref>]. Similarly, the accuracy of the partitioning depends on searching deep in the tree, which has a direct impact on the computational cost. Other graph-based works are found in [<xref ref-type="bibr" rid="B15">15</xref>,<xref ref-type="bibr" rid="B16">16</xref>].</p>
			<p>Another approach for dealing with the selection of focused regions in images is the segmentation approach. S. Li et al. [<xref ref-type="bibr" rid="B17">17</xref>] proposed a method that uses morphological filtering for rough segmentation of the images based on an initially estimated focus map. The method then uses the <italic>image matting</italic> technique to refine the segmentation results and a merging process to generate the final image. Another segmentation approach was proposed by M. Nejati et al. [<xref ref-type="bibr" rid="B18">18</xref>], based on a training and a testing phase. The training phase constructs a dictionary using focus information maps from local patches of source images. Each pixel from each input image is classified as in focus or not in focus. The final image results from pixel fusion according to a decision map that indicates which source image must be used to obtain the pixel intensity value. Overall, although the segmentation/optimization-based methods applied to the problem of image fusion generate adequate results, they involve a high computational cost. </p>
			<p>Frequency-domain image fusion methods transform input images into a frequency domain representation where they are combined. One approach within this category is based on multi-scale decomposition. This is the most commonly reported approach to MFIF. For example, a shift and rotation invariant pyramid representation called Steerable Pyramid was applied by Z. Lin et al. [<xref ref-type="bibr" rid="B22">22</xref>]. Other frequency representations reported are Discrete Wavelet Transform [<xref ref-type="bibr" rid="B7">7</xref>,<xref ref-type="bibr" rid="B23">23</xref>] and Robust Principal Component Analysis [<xref ref-type="bibr" rid="B6">6</xref>]. Frequency-domain image fusion methods can be applied to multi-focus and multi-modal images with acceptable behavior. However, they add some noise level and cannot guarantee fidelity of the input image in the final image. </p>
			<p>The MFIF problem can be stated as a classification problem where the classifier must decide in which source image the pixel or region has a high focus measure. J. Saeedi and K. Faez proposed a wavelet-based MFIF method which used a two-class Fisher classifier to group the regions into focused and defocused ones [<xref ref-type="bibr" rid="B19">19</xref>]. To reduce the number of misclassified regions due to uncertainty, they included a fuzzy logic algorithm. On the other hand, some works reported the use of other types of classifiers, such as neural networks, to tackle the MFIF problem [<xref ref-type="bibr" rid="B20">20</xref>,<xref ref-type="bibr" rid="B21">21</xref>].</p>
			<p>The aim of this study is to compare various classification approaches to the MFIF problem. We selected four popular focus measures and proposed a new one based on morphological features; then we used these as classifier inputs.</p>
			<p>This paper is organized as follows. In Section 2, we present the methodology used in this study, including the data and vector feature selection, training, and fusion stages. Section 3 presents the results obtained and comparisons. Finally, Section 4 concludes with a summary and future works.</p>
		</sec>
		<sec sec-type="methods">
			<title>2. Methodology</title>
			<p>We establish the MFIF as a classification problem where the aim is to process a pair of input images, labeling their regions as focused and defocused, in order to build up a final image that merges the focused regions. The methodology begins by selecting the image set used for the training and testing stages. After the image set was defined, we selected focus measures reported in the literature and machine-learning-based classifiers. The training process and the image fusion were the next methodological stages. Finally, we carried out an evaluation step to compare the behaviors of the selected classifiers. <xref ref-type="fig" rid="f1">Figure 1</xref> shows a block diagram of the methodology used.</p>
			<p>
				<fig id="f1">
					<label>Figure 1</label>
					<caption>
						<title>Blocks diagram of used methodology. </title>
					</caption>
					<graphic xlink:href="0012-7353-dyna-84-202-00137-gf1.png"/>
					<attrib><bold>Source:</bold> The authors.</attrib>
				</fig>
			</p>
			<sec>
				<title>2.1. Image-set selection and rectangular segmentation</title>
				<p>Our initial set of images consisted of 30 pairs of multifocus images taken from public datasets [<xref ref-type="bibr" rid="B18">18</xref>,<xref ref-type="bibr" rid="B24">24</xref>] and a set of images of our own acquired in the laboratory. Every pair consisted of two images: a near-focused one and a far-focused one. Then, using rectangular cropping, a subset of 830 images was constructed and associated with a binary tag (<italic>focused:1, defocused:0</italic>) by human judgment. <xref ref-type="fig" rid="f2">Figure 2</xref> illustrates this process.</p>
				<p>
					<fig id="f2">
						<label>Figure 2</label>
						<caption>
							<title>The scheme used to form the subset of images. </title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gf2.png"/>
					</fig>
				</p>
			</sec>
			<sec>
				<title>2.2. Focus measures and feature vector formation</title>
				<p>In the MFIF context, a focus image operator is a local metric that quantifies the quality of focus in an image region. Ideally, when the region is perfectly focused, these operators must generate a maximum value [<xref ref-type="bibr" rid="B25">25</xref>] that decreases in a similar way when the image becomes blurred. Many focus metrics have been proposed by the scientific community. A typical focus metric should satisfy the following requirements [<xref ref-type="bibr" rid="B7">7</xref>]:</p>
				<p>
					<list list-type="bullet">
						<list-item>
							<p>independence of image content</p>
						</list-item>
						<list-item>
							<p>monotonicity of blur</p>
						</list-item>
						<list-item>
							<p>unimodality (only one maximum value)</p>
						</list-item>
						<list-item>
							<p>value variation according to degree of blurring </p>
						</list-item>
						<list-item>
							<p>minimal computation complexity</p>
						</list-item>
						<list-item>
							<p>robustness to noise</p>
						</list-item>
					</list>
				</p>
				<p>We selected four focus measures frequently used in reported works [<xref ref-type="bibr" rid="B7">7</xref>] and proposed a new focus measure based on morphological features. </p>
				<p>Let us consider f 𝑥,𝑦 , the intensity level of a pixel 𝑥,𝑦 . The selected focus measures are defined below:</p>
				<sec>
					<title>2.2.1. Energy of Laplacian</title>
					<p>The Energy of Laplacian (EOL) of an image 𝑓 is computed as:</p>
					<p>
						<disp-formula id="e1">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e1.png"/>
						</disp-formula>
					</p>
					<p>Where</p>
					<p>
						<disp-formula id="e2">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e2.jpg"/>
						</disp-formula>
					</p>
				</sec>
				<sec>
					<title>2.2.2. Sum-modified Laplacian</title>
					<p>The modified Laplacian is a proposal to avoid the cancellation trend of the second derivate in the EOL basic definition [<xref ref-type="bibr" rid="B25">25</xref>]. So, the SML is defined by equations (3) and (4):</p>
					<p>
						<disp-formula id="e3">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e3.jpg"/>
						</disp-formula>
					</p>
					<p>where 𝛽 is a spacing parameter addressing the accommodation of texture variation in the image and set to 𝛽=1. </p>
					<p>
						<disp-formula id="e4">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e4.jpg"/>
						</disp-formula>
					</p>
					<p>where 𝑇 is a discrimination threshold value and 𝑁 is the window size.</p>
				</sec>
				<sec>
					<title>2.2.3. Energy of image gradient</title>
					<p>This operator is based on the concept of determining the local high-frequency variations. The Energy of Gradient (EOG) can be computed as:</p>
					<p>
						<disp-formula id="e5">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e5.png"/>
						</disp-formula>
					</p>
					<p>Where</p>
					<p>
						<disp-formula id="e6">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e6.png"/>
						</disp-formula>
					</p>
					<p>
						<disp-formula id="e7">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e7.png"/>
						</disp-formula>
					</p>
				</sec>
				<sec>
					<title>2.2.4. Spatial Frequency</title>
					<p>Spatial frequency (SF) is a modification of the energy of image gradient operator. It is defined as:</p>
					<p>
						<disp-formula id="e8">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e8.png"/>
						</disp-formula>
					</p>
					<p>where RF and CF are the row and column frequency respectively.</p>
					<p>
						<disp-formula id="e9">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e9.jpg"/>
						</disp-formula>
					</p>
					<p>
						<disp-formula id="e10">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e10.jpg"/>
						</disp-formula>
					</p>
				</sec>
				<sec>
					<title>2.2.5. Energy of morphological features</title>
					<p>We propose a new focus measure based on the metric defined in [<xref ref-type="bibr" rid="B17">17</xref>] and the concept of the energy of the image. We named this focus measure as the Energy of Morphological Features (EMF). We used a combination of bottom-hat and top-hat operations to first extract salient local features and then calculate the sum of maximum values for a window.</p>
					<p>
						<disp-formula id="e11">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e11.jpg"/>
						</disp-formula>
					</p>
					<p>Let T and B be top-hat and bottom-hat morphological operations, respectively, centered at the pixel with coordinates (i,j). We describe the EMF of a local window W of an input image I. Finally, we build up a feature vector 𝛾 composed of the ratio of each focus measure of input images A and B. <xref ref-type="fig" rid="f3">Figure 3</xref> shows the structure of the feature vector.</p>
					<p>
						<fig id="f3">
							<label>Figure 3</label>
							<caption>
								<title>Vector feature structure. </title>
							</caption>
							<graphic xlink:href="0012-7353-dyna-84-202-00137-gf3.png"/>
							<attrib><bold>Source:</bold> The authors.</attrib>
						</fig>
					</p>
				</sec>
			</sec>
			<sec>
				<title>2.3. Classifier selection</title>
				<p>Because the MFIF has been established as a classification problem, we selected the most representative classification approaches in the literature: Linear Discriminant Analysis (LDA) [<xref ref-type="bibr" rid="B26">26</xref>], naïve Bayes [<xref ref-type="bibr" rid="B27">27</xref>-<xref ref-type="bibr" rid="B29">29</xref>], k-nearest neighbors (k-NN) [<xref ref-type="bibr" rid="B30">30</xref>], random forest, multilayer perceptron (MLP) [<xref ref-type="bibr" rid="B31">31</xref>], and support vector machine (SVM) [<xref ref-type="bibr" rid="B32">32</xref>]. The two classes for region classification were focused and defocused regions, represented as a binary tag.</p>
			</sec>
			<sec>
				<title>2.4. Training procedure</title>
				<p>We created a binary classification dataset using the previously created subset of images with binary tags and the feature vector formation scheme described in Section 2.2.</p>
				<p>With this dataset, each binary classifier was trained and tested using cross-validation. Then, the best score classifier of each classification technique was saved for the posterior fusion scheme.</p>
				<p>For this purpose, the Python with Scikit-learn [<xref ref-type="bibr" rid="B33">33</xref>] and PyBrain [<xref ref-type="bibr" rid="B34">34</xref>] libraries were used to configure, train, and test different classifiers.</p>
			</sec>
			<sec>
				<title>2.5. Fusion scheme</title>
				<p>The fusion scheme proposed in this work is composed of two main stages. In the first stage, a binary mask is generated with information about which regions of both images have high focus (<xref ref-type="fig" rid="f4">Fig. 4</xref>). In the second stage, the fused image is obtained using input images 𝐴 and 𝐵 and the binary mask obtained in the first stage (<xref ref-type="fig" rid="f5">Fig. 5</xref>).</p>
				<p>
					<fig id="f4">
						<label>Figure 4</label>
						<caption>
							<title>Scheme of the generation of binary mask 𝑍 in stage 1. </title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gf4.png"/>
						<attrib><bold>Source:</bold> The authors.</attrib>
					</fig>
				</p>
				<p>
					<fig id="f5">
						<label>Figure 5</label>
						<caption>
							<title>Scheme of stage 2 for the generation of the fused image F. </title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gf5.jpg"/>
						<attrib><bold>Source:</bold> The authors.</attrib>
					</fig>
				</p>
				<sec>
					<title>2.5.1. Stage 1 - Binary mask generation</title>
					<p>The first stage begins by moving a rectangular window over a pair of input images and iteratively describing each window as a feature vector. Then, the ratio 𝛾 of these two vectors (<xref ref-type="fig" rid="f3">Fig. 3</xref>) is used as an input for a trained classifier which returns a binary label (focused:1, defocused:0) for the highest focus in image 𝐴 or 𝐵 respectively. This binary label is used to build a binary mask 𝑍 of resolution equal to the moving window size. This mask contains information about the highly focused regions in both input images. </p>
					<p>
						<disp-formula id="e12">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e12.jpg"/>
						</disp-formula>
					</p>
					<p>Equation (21) describes stage 1, where 𝑍 is the binary mask matrix, the range ( 𝑥 0 : 𝑥 1 , 𝑦 0 : 𝑦 1 ) is the image portion of the sliding window, 𝐶 is the binary classifier, and 𝛾 𝑥 0 : 𝑥 1 , 𝑦 0 : 𝑦 1 is the ratio of the feature vector of input images 𝐴 and 𝐵 for the sliding window in the range ( 𝑥 0 : 𝑥 1 , 𝑦 0 : 𝑦 1 ).</p>
					<p>A scheme of the generation of the binary mask 𝑍 in stage 1 is shown in <xref ref-type="fig" rid="f4">Figure 4</xref>.</p>
				</sec>
				<sec>
					<title>2.5.2. Stage 2 - Image fusion</title>
					<p>It is possible to generate a fused image using the binary mask obtained in stage 1, but because of the rectangular nature of the moving window, a high number of artifacts can be generated in regions containing borders of objects (see the rough behavior of borders and holes in 𝑍 in <xref ref-type="fig" rid="f4">Figure 4</xref>). An improved result can be obtained by smoothing the binary mask.</p>
					<p>This stage begins by applying the morphological operation close to group individual pixels and fill holes in the binary mask. Then a low-pass filter (median blur) is applied to delete random noise and smooth the borders of the binary mask. Both operations used rectangular kernels of the same size. Finally, using the smoothed binary mask 𝑍 ′ , input images 𝐴 and 𝐵 are cropped to generate the fused image 𝐹. This process is illustrated in <xref ref-type="fig" rid="f5">Figure 5</xref>.</p>
					<p>
						<disp-formula id="e13">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e13.png"/>
						</disp-formula>
					</p>
					<p>Equation (22) describes stage 2, where 𝐹 is the image fused using the proposed scheme, 𝐴 and 𝐵 are the input images, 𝑍 𝐴 ′ and 𝑍 𝐴 ′ are the pixels of the smoothed mask belonging to images 𝐴 and 𝐵 respectively, and × is an element-wise multiplication of two matrices.</p>
				</sec>
			</sec>
			<sec>
				<title>2.6. Fusion quality measure</title>
				<p>The fusion quality metric reflects the quality of visual information of a fused image obtained from a set of input images [<xref ref-type="bibr" rid="B35">35</xref>]. Evaluation of image fusion algorithms has become an important issue due to the different complexity characteristics used by several proposed approaches. Typically, the way in which the quality of the fused images is measured is by the mean of experts who score them. This approach does not offer a general way to evaluate approaches automatically and implies a costly effort. However, for objective evaluation of fusion results, we use three fusion-quality metrics including Visual Information Fidelity for Fusion (VIFF) [<xref ref-type="bibr" rid="B36">36</xref>], Petrovic’s metric based on edge information ( 𝑄 𝐴𝐵/𝐹 ) [<xref ref-type="bibr" rid="B35">35</xref>], and Feature Mutual Information (FMI) [<xref ref-type="bibr" rid="B37">37</xref>].</p>
				<sec>
					<title>2.6.1. Visual information fidelity for fusion</title>
					<p>VIFF is founded on the Visual Information Fidelity (VIF) quality metric based on Natural Scene Statistics theory, which measures the visual information by computing mutual information between different models estimated from images. These models are in the wavelet domain and include Gaussian Scale Mixture, the distortion model, and the Human Visual System.</p>
					<p>The common procedure for VIF estimation is to divide the images into 𝑘 sub-bands, each of which is divided in turn into 𝑏 blocks. The mutual information between the different models is estimated and VIF can be stated as:</p>
					<p>
						<disp-formula id="e14">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e14.jpg"/>
						</disp-formula>
					</p>
				</sec>
				<sec>
					<title>2.6.2. Edge-based fusion performance</title>
					<p>This metric is based on the idea of quantifying the important information preserved in the fused image. The important information is associated with edge information, and therefore the metric measures the amount of edge information transferred from input images to the fused image, using a Sobel operator to get the relative edge strength and orientation between the input and fused images [<xref ref-type="bibr" rid="B35">35</xref>]. </p>
					<p>Let 𝐴 be an input image and let the pixel 𝐴(𝑖,𝑗) have an edge strength 𝑔 𝐴 (𝑖,𝑗) and orientation 𝛼 𝐴 (𝑖,𝑗) defined as:</p>
					<p>
						<disp-formula id="e15">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e15.jpg"/>
						</disp-formula>
					</p>
					<p>
						<disp-formula id="e16">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e16.jpg"/>
						</disp-formula>
					</p>
					<p>where 𝑆 𝐴 𝑥 𝑖,𝑗 and 𝑆 𝐴 𝑦 𝑖,𝑗 are the result of applying a horizontal and vertical Sobel template centered on 𝐴(??,𝑗). The relative edge strength (𝐺 𝐴𝐹 ) and orientation ( ∆ 𝐴𝐹 ) of the image A with respect to a fused image are defined as:</p>
					<p>
						<disp-formula id="e17">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e17.jpg"/>
						</disp-formula>
					</p>
					<p>
						<disp-formula id="e18">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e18.png"/>
						</disp-formula>
					</p>
					<p>The edge strength and orientation preservation values can be derived by:</p>
					<p>
						<disp-formula id="e19">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e19.jpg"/>
						</disp-formula>
					</p>
					<p>
						<disp-formula id="e20">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e20.jpg"/>
						</disp-formula>
					</p>
					<p>where the constants Γ 𝑔 , 𝑘 𝑔 , 𝜎 𝑔 , Γ 𝛼 , 𝑘 𝛼 , and 𝜎 𝛼 determine the shape of sigmoid functions used to form the edge strength and orientation preservation value [<xref ref-type="bibr" rid="B35">35</xref>].</p>
					<p>The edge preservation value is defined as:</p>
					<p>
						<disp-formula id="e21">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e21.png"/>
						</disp-formula>
					</p>
					<p>Finally, for input images 𝐴 and 𝐵, the final weighted performance measure 𝑄 𝐴𝐵/𝐹 with respect to the fused image 𝐹 is estimated as:</p>
					<p>
						<disp-formula id="e22">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e22.png"/>
						</disp-formula>
					</p>
				</sec>
				<sec>
					<title>2.6.3. Feature mutual information</title>
					<p>Mutual Information (MI) is derived from information theory and quantifies the amount of information obtained about one variable from another variable. Thus, FMI quantifies the amount of image features transferred from the source images into the fused image. Gradient maps represent the images’ features because they contain information about edges, directions, texture, contrast, and pixel neighborhoods [<xref ref-type="bibr" rid="B37">37</xref>]. Like classic approaches used to estimate MI, FMI estimation is based on the calculation of the joint probability distribution functions. Assuming the intensity pixels of the fused image 𝐹 𝑥,𝑦 and input images 𝐴(𝑧,𝑤) and 𝐵(𝑧,𝑤), the methods use the normalized values of gradient magnitude image features, like marginal distributions, and therefore the amount of feature information in the fused image 𝐹 from input images 𝐴 and 𝐵 is given by [<xref ref-type="bibr" rid="B37">37</xref>]:</p>
					<p>
						<disp-formula id="e23">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e23.jpg"/>
						</disp-formula>
					</p>
					<p>
						<disp-formula id="e24">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e24.jpg"/>
						</disp-formula>
					</p>
					<p>where 𝑃 𝐹𝐴 and 𝑃 𝐹𝐵 are the joint distribution between the fused image and each input image. 𝑝 𝐹 , 𝑝 𝐴 , and 𝑝 𝐵 are the marginal distributions. The FMI is defined as:</p>
					<p>
						<disp-formula id="e25">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e25.png"/>
						</disp-formula>
					</p>
					<p>Based on [<xref ref-type="bibr" rid="B37">37</xref>-<xref ref-type="bibr" rid="B39">39</xref>], the normalized FMI can be obtained as:</p>
					<p>
						<disp-formula id="e26">
							<graphic xlink:href="0012-7353-dyna-84-202-00137-e26.png"/>
						</disp-formula>
					</p>
					<p>where 𝐻 𝐹 , 𝐻 𝐴 , and 𝐻 𝐵 are the histogram-based entropies of the images 𝐴, 𝐵, and 𝐹.</p>
				</sec>
			</sec>
		</sec>
		<sec sec-type="results">
			<title>3. Experiments and results</title>
			<p>In this section, we present detailed experimental settings used to evaluate the performance of the classification approach for MFIF from two perspectives: classifier performance and fusion quality. The first evaluates the learning ability of a classifier to decide whether or not an image is focused. The second measures the fusion result generated by a classifier of the information contained in the input images. Finally, we discuss the results exhibited.</p>
			<sec>
				<title>3.1. Classifier performance</title>
				<p>We selected five binary classifiers and trained them with the parameters shown in <xref ref-type="table" rid="t1">Table 1</xref> and a dataset of 830 images labeled as binary for focused or defocused cases.</p>
				<p>
					<table-wrap id="t1">
						<label>Table 1</label>
						<caption>
							<title>Parameters used for training binary classifiers </title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gt1.jpg"/>
						<table-wrap-foot>
							<fn id="TFN1">
								<p><bold>Source:</bold> The authors</p>
							</fn>
						</table-wrap-foot>
					</table-wrap>
				</p>
				<p>Training parameters were obtained experimentally, selecting the set of parameters that allowed us to obtain the best results.</p>
				<p>For each classifier, we progressively changed the dataset size and used the k-fold cross-validation technique [<xref ref-type="bibr" rid="B40">40</xref>] with 𝑘=10 in each iteration, to measure the learning and test precision/error. This allowed us to evaluate the performance variation (standard deviation) of each classifier. <xref ref-type="table" rid="t2">Table 2</xref> shows the mean and standard deviation of precision of training and testing for each classifier. The highest scores are obtained by k-NN for training precision and by MLP for test precision. In contrast, the lowest precision is obtained by using Naïve Bayes in both the training and test stages.</p>
				<p>
					<table-wrap id="t2">
						<label>Table 2</label>
						<caption>
							<title>Mean and standard deviation of classifier precision for training and test stages.</title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gt2.jpg"/>
						<table-wrap-foot>
							<fn id="TFN2">
								<p><bold>Source:</bold> The authors.</p>
							</fn>
						</table-wrap-foot>
					</table-wrap>
				</p>
				<p>Learning curves obtained for each classifier are shown in <xref ref-type="fig" rid="f6">Figure 6</xref>. The shaded area around the learning curves indicates the standard deviation of training error for 10 iterations per step.</p>
				<p>
					<fig id="f6">
						<label>Figure 6</label>
						<caption>
							<title>Learning curves for the binary classifiers used. Red and green lines, respectively, represent the training and cross-validation scores obtained when varying the training sample size.</title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gf6.png"/>
						<attrib><bold>Source:</bold> The authors.</attrib>
					</fig>
				</p>
				<p>As shown in <xref ref-type="fig" rid="f6">Figure 6</xref>c and <xref ref-type="fig" rid="f6">Figure 6</xref>d, MLP and SVM are the most stable classifiers, as the learning and test curves do not show large differences when the training sample size is varied. MLP and k-NN achieve the highest scores in the training and test stages, but MLP presents high variation when using cross-validation (green shadow).</p>
				<p>LDA (<xref ref-type="fig" rid="f6">Figure 6</xref>a) and Naïve Bayes (<xref ref-type="fig" rid="f6">Figure 6</xref>e) have the highest variations in training and test scores, which means that more iterations are required to train a good classifier.</p>
			</sec>
			<sec>
				<title>3.2. Fusion quality</title>
				<p>A total of 30 pairs (near and far focused) of input images were fused using the proposed scheme. Fused images were evaluated using the fusion quality metrics detailed in Section 2.6. The mean fusion quality for each classifier over 30 cases is shown in <xref ref-type="table" rid="t3">Table 3</xref>. </p>
				<p>
					<table-wrap id="t3">
						<label>Table 3</label>
						<caption>
							<title>Average fusion quality obtained with different classification methods</title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gt3.jpg"/>
						<table-wrap-foot>
							<fn id="TFN3">
								<p><bold>Source:</bold> The authors.</p>
							</fn>
						</table-wrap-foot>
					</table-wrap>
				</p>
				<p>The Naïve Bayes classifier obtained the highest quality score for the VIFF and Qabf metrics, and MLP obtained the highest quality score for the FMI metric among all classifiers. </p>
				<p>The high mean fusion quality achieved by MLP is expected because this classifier obtained the highest test score, as shown in <xref ref-type="table" rid="t2">Table 2</xref>. On the contrary, Naïve Bayes achieved the lowest training and test scores (<xref ref-type="table" rid="t2">Table 2</xref>) out of all classifiers, but the highest fusion quality according to two out of three metrics among all classifiers (<xref ref-type="table" rid="t3">Table 3</xref>).</p>
				<p>Some examples of the fusion results of the classifiers with the best results (Naive Bayes and MLP) are presented in <xref ref-type="fig" rid="f7">Figure 7</xref>. Subtle differences in the masks generated from both classifiers can be observed. A qualitative judgment about this result is that MLP generates fewer artifacts than Naive Bayes.</p>
				<p>
					<fig id="f7">
						<label>Figure 7</label>
						<caption>
							<title>Results of image fusion scheme proposed for classifiers with the best fusion results: Naive Bayes and MLP. Column 1) input image 1; column 2) input image 2; column 3) Naive Bayes: near focus plane mask; column 4) MLP: near-focus plane mask.</title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gf7.jpg"/>
						<attrib><bold>Source:</bold> The authors.</attrib>
					</fig>
				</p>
				<p>Differences in fusion quality can be observed near the borders of objects, where artifacts are present due to the ambiguous nature of focus of the borders of objects in a natural scene (<xref ref-type="fig" rid="f8">Fig. 8</xref>).</p>
				<p>
					<fig id="f8">
						<label>Figure 8</label>
						<caption>
							<title>Magnified regions of fusion results obtained by a) FFN-MLP; b) k-NN; c) LDA; d) Naïve-Bayes; and e) RBF-SVM.</title>
						</caption>
						<graphic xlink:href="0012-7353-dyna-84-202-00137-gf8.jpg"/>
						<attrib><bold>Source:</bold> The authors.</attrib>
					</fig>
				</p>
			</sec>
		</sec>
		<sec sec-type="conclusions">
			<title>4. Conclusions</title>
			<p>The results obtained in the previous section show that the automatic classification concept can satisfactorily address the MFIF problem; that is, a classification scheme can be used to decide whether an image is focused or defocused based on local features. Thus, using this classification scheme, a fused image can be generated in which most regions or pixels are focused. The main contribution of this work is that it compares different classifiers in an MFIF scheme, thus evaluating which one obtains the best results in both learning and fusion stages. However, this work only evaluates the main classifiers found in the literature, and other classifiers not evaluated in this work may obtain better results.</p>
			<p>The most important conclusions obtained from this work are as follows:</p>
			<p>
				<list list-type="bullet">
					<list-item>
						<p>Classification precision does not guarantee fusion quality; that is, a classifier can have a low training and testing score but a high fusion quality. </p>
					</list-item>
					<list-item>
						<p>Naïve Bayes is an example of the latter. It is well known that Naïve Bayes is a robust classifier, and thus, subject to overfitting problems. Thus, it can perform well with small amounts of training data, which is the case found in this work. It is possible that this can be related to the number of artifacts a classifier generates and therefore the fusion quality.</p>
					</list-item>
					<list-item>
						<p>The MLP classifier shows good results in both the classification stage (training and test) and the fusion stage (fusion quality). </p>
					</list-item>
					<list-item>
						<p>Artifacts are generated near the borders of objects in the scene due to the spatial nature of the scheme used and the rectangular shape of the moving window. A good classifier generates fewer artifacts in these regions.</p>
					</list-item>
					<list-item>
						<p>MFIF is not a trivial problem, and a specific classifier can directly affect the fusion quality obtained using the proposed scheme in this work.</p>
					</list-item>
					<list-item>
						<p>Both the size of the moving window and kernels for morphological operations directly affect the results. Thus, future work should focus on evaluating the impact of these parameters on fusion quality obtained by this scheme.</p>
					</list-item>
				</list>
			</p>
			<p>Furthermore, the scheme proposed in this work could be adapted to address multi-focus scenes that present more than two (near and far) focal depths. An optimization technique based on region-growing could thus be used to generate a better smoothed mask 𝑍 ′ that encloses the objects that appear in the scene more precisely.</p>
		</sec>
	</body>
	<back>
		<ref-list>
			<title>References</title>
			<ref id="B1">
				<label>[1]</label>
				<mixed-citation>[1]  Zhang, B., Lu, X., Pei, H., Liu, H., Zhao, Y. and Zhou, W., Multi-focus image fusion algorithm based on focused region extraction. Neurocomputing, 174, pp. 733-748, Jan. 2016. DOI: 10.1016/j.neucom.2015.09.092.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Zhang</surname>
							<given-names>B.</given-names>
						</name>
						<name>
							<surname>Lu</surname>
							<given-names>X.</given-names>
						</name>
						<name>
							<surname>Pei</surname>
							<given-names>H.</given-names>
						</name>
						<name>
							<surname>Liu</surname>
							<given-names>H.</given-names>
						</name>
						<name>
							<surname>Zhao</surname>
							<given-names>Y.</given-names>
						</name>
						<name>
							<surname>Zhou</surname>
							<given-names>W.</given-names>
						</name>
					</person-group>
					<article-title>Multi-focus image fusion algorithm based on focused region extraction</article-title>
					<source>Neurocomputing</source>
					<issue>174</issue>
					<fpage>733</fpage>
					<lpage>748</lpage>
					<month>01</month>
					<year>2016</year>
					<pub-id pub-id-type="doi">10.1016/j.neucom.2015.09.092.</pub-id>
				</element-citation>
			</ref>
			<ref id="B2">
				<label>[2]</label>
				<mixed-citation>[2]  Favaro, P., Mennucci, A. and Soatto, S., Observing shape from defocused images. Int. J. Comput. Vis., 52(1), pp. 25-43, 2003. DOI: 10.1023/A:1022366408068.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Favaro</surname>
							<given-names>P.</given-names>
						</name>
						<name>
							<surname>Mennucci</surname>
							<given-names>A.</given-names>
						</name>
						<name>
							<surname>Soatto</surname>
							<given-names>S.</given-names>
						</name>
					</person-group>
					<article-title>Observing shape from defocused images</article-title>
					<source>Int. J. Comput. Vis.</source>
					<volume>52</volume>
					<issue>1</issue>
					<fpage>25</fpage>
					<lpage>43</lpage>
					<year>2003</year>
					<pub-id pub-id-type="doi">10.1023/A:1022366408068</pub-id>
				</element-citation>
			</ref>
			<ref id="B3">
				<label>[3]</label>
				<mixed-citation>[3]  Xiao, J., Liu, T., Zhang, Y., Zou, B., Lei, J. and Li, Q., Multi-focus image fusion based on depth extraction with inhomogeneous diffusion equation. Signal Process., 125, pp. 171-186, Aug. 2016. DOI: 10.1016/j.sigpro.2016.01.014.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Xiao</surname>
							<given-names>J.</given-names>
						</name>
						<name>
							<surname>Liu</surname>
							<given-names>T.</given-names>
						</name>
						<name>
							<surname>Zhang</surname>
							<given-names>Y.</given-names>
						</name>
						<name>
							<surname>Zou</surname>
							<given-names>B.</given-names>
						</name>
						<name>
							<surname>Lei</surname>
							<given-names>J.</given-names>
						</name>
						<name>
							<surname>Li</surname>
							<given-names>Q.</given-names>
						</name>
					</person-group>
					<article-title>Multi-focus image fusion based on depth extraction with inhomogeneous diffusion equation.</article-title>
					<source>Signal Process</source>
					<issue>125</issue>
					<fpage>171</fpage>
					<lpage>186</lpage>
					<month>08</month>
					<year>2016</year>
					<pub-id pub-id-type="doi">10.1016/j.sigpro.2016.01.014</pub-id>
				</element-citation>
			</ref>
			<ref id="B4">
				<label>[4]</label>
				<mixed-citation>[4]  Saeed, A. and Choi, T.-S., A novel algorithm for estimation of depth map using image focus for 3D shape recovery in the presence of noise. Pattern Recognit., 41(6), pp. 2200-2225, 2008. DOI: 10.1016/j.patcog.2007.12.014.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Saeed</surname>
							<given-names>A.</given-names>
						</name>
						<name>
							<surname>Choi</surname>
							<given-names>T.-S.</given-names>
						</name>
					</person-group>
					<article-title>A novel algorithm for estimation of depth map using image focus for 3D shape recovery in the presence of noise</article-title>
					<source>Pattern Recognit</source>
					<volume>41</volume>
					<issue>6</issue>
					<fpage>2200</fpage>
					<lpage>2225</lpage>
					<year>2008</year>
					<pub-id pub-id-type="doi">10.1016/j.patcog.2007.12.014</pub-id>
				</element-citation>
			</ref>
			<ref id="B5">
				<label>[5]</label>
				<mixed-citation>[5]  Song, Y., Li, M., Li, Q. and Sun, L., A new wavelet based multi-focus image fusion scheme and its application on optical microscopy, in: Robotics and Biomimetics, 2006. ROBIO ’06. IEEE Int. Conf., 2006, pp. 401-405. DOI: 10.1109/ROBIO.2006.340210.</mixed-citation>
				<element-citation publication-type="confproc">
					<person-group person-group-type="author">
						<name>
							<surname>Song</surname>
							<given-names>Y.</given-names>
						</name>
						<name>
							<surname>Li</surname>
							<given-names>M.</given-names>
						</name>
						<name>
							<surname>Li</surname>
							<given-names>Q.</given-names>
						</name>
						<name>
							<surname>Sun</surname>
							<given-names>L.</given-names>
						</name>
					</person-group>
					<source>A new wavelet based multi-focus image fusion scheme and its application on optical microscopy</source>
					<conf-name>Robotics and Biomimetics</conf-name>
					<year>2006</year>
					<fpage>401</fpage>
					<lpage>405</lpage>
					<pub-id pub-id-type="doi">10.1109/ROBIO.2006.340210</pub-id>
				</element-citation>
			</ref>
			<ref id="B6">
				<label>[6]</label>
				<mixed-citation>[6]  Wan, T., Zhu, C. and Qin, Z., Multifocus image fusion based on robust principal component analysis. Pattern Recognit. Lett., 34(9), pp. 1001-1008, 2013. DOI: 10.1016/j.patrec.2013.03.003.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Wan</surname>
							<given-names>T.</given-names>
						</name>
						<name>
							<surname>Zhu</surname>
							<given-names>C.</given-names>
						</name>
						<name>
							<surname>Qin</surname>
							<given-names>Z.</given-names>
						</name>
					</person-group>
					<article-title>Multifocus image fusion based on robust principal component analysis</article-title>
					<source>Pattern Recognit. Lett.</source>
					<volume>34</volume>
					<issue>9</issue>
					<fpage>1001</fpage>
					<lpage>1008</lpage>
					<year>2013</year>
					<pub-id pub-id-type="doi">10.1016/j.patrec.2013.03.003</pub-id>
				</element-citation>
			</ref>
			<ref id="B7">
				<label>[7]</label>
				<mixed-citation>[7]  Huang, W. and Jing, Z., Evaluation of focus measures in multi-focus image fusion. Pattern Recognit. Lett. , 28(4), 2007. DOI: 10.1016/j.patrec.2006.09.005.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Huang</surname>
							<given-names>W.</given-names>
						</name>
						<name>
							<surname>Jing</surname>
							<given-names>Z.</given-names>
						</name>
					</person-group>
					<article-title>Evaluation of focus measures in multi-focus image fusion</article-title>
					<source>Pattern Recognit. Lett.</source>
					<volume>28</volume>
					<issue>4</issue>
					<year>2007</year>
					<pub-id pub-id-type="doi">10.1016/j.patrec.2006.09.005</pub-id>
				</element-citation>
			</ref>
			<ref id="B8">
				<label>[8]</label>
				<mixed-citation>[8]  Xin, W., You-Li, W., and Fu, L., A New Multi-source image sequence fusion algorithm based on SIDWT, in: 2013 Seventh International Conference on Image and Graphics, 2013, pp. 568-571. DOI: 10.1109/ICIG.2013.119.</mixed-citation>
				<element-citation publication-type="confproc">
					<person-group person-group-type="author">
						<name>
							<surname>Xin</surname>
							<given-names>W.</given-names>
						</name>
						<name>
							<surname>You-Li</surname>
							<given-names>W.</given-names>
						</name>
						<name>
							<surname>Fu</surname>
							<given-names>L.</given-names>
						</name>
					</person-group>
					<source>A New Multi-source image sequence fusion algorithm based on SIDWT</source>
					<conf-date>2013</conf-date>
					<conf-name>SeventhInternational Conference on Image and Graphics</conf-name>
					<year>2013</year>
					<fpage>568</fpage>
					<lpage>571</lpage>
					<pub-id pub-id-type="doi">10.1109/ICIG.2013.119</pub-id>
				</element-citation>
			</ref>
			<ref id="B9">
				<label>[9]</label>
				<mixed-citation>[9]  Yu, B. et al., Hybrid dual-tree complex wavelet transform and support vector machine for digital multi-focus image fusion, Neurocomputing , 182, pp. 1-9, Mar. 2016. DOI: 10.1016/j.neucom.2015.10.084.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Yu</surname>
							<given-names>B.</given-names>
						</name>
						<etal/>
					</person-group>
					<article-title>Hybrid dual-tree complex wavelet transform and support vector machine for digital multi-focus image fusion</article-title>
					<source>Neurocomputing</source>
					<issue>182</issue>
					<fpage>1</fpage>
					<lpage>9</lpage>
					<month>03</month>
					<year>2016</year>
					<pub-id pub-id-type="doi">10.1016/j.neucom.2015.10.084</pub-id>
				</element-citation>
			</ref>
			<ref id="B10">
				<label>[10]</label>
				<mixed-citation>[10]  Haghighat, M.B.A., Aghagolzadeh, A. and Seyedarabi, H., Multi-focus image fusion for visual sensor networks in DCT domain. Comput. Electr. Eng., 37(5), pp. 789-797, Sep. 2011. DOI: 10.1016/j.compeleceng.2011.04.016.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Haghighat</surname>
							<given-names>M.B.A.</given-names>
						</name>
						<name>
							<surname>Aghagolzadeh</surname>
							<given-names>A.</given-names>
						</name>
						<name>
							<surname>Seyedarabi</surname>
							<given-names>H.</given-names>
						</name>
					</person-group>
					<article-title>Multi-focus image fusion for visual sensor networks in DCT domain.</article-title>
					<source>Comput. Electr. Eng.</source>
					<volume>37</volume>
					<issue>5</issue>
					<fpage>789</fpage>
					<lpage>797</lpage>
					<month>09</month>
					<year>2011</year>
					<pub-id pub-id-type="doi">10.1016/j.compeleceng.2011.04.016</pub-id>
				</element-citation>
			</ref>
			<ref id="B11">
				<label>[11]</label>
				<mixed-citation>[11]  Hua, K.-L., Wang, H.-C., Rusdi, A.H. and Jiang, S.-Y., A novel multi-focus image fusion algorithm based on random walks, J. Vis. Commun. Image Represent., 25(5), pp. 951-962, Jul. 2014. DOI: 10.1016/j.jvcir.2014.02.009.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Hua</surname>
							<given-names>K.-L.</given-names>
						</name>
						<name>
							<surname>Wang</surname>
							<given-names>H.-C.</given-names>
						</name>
						<name>
							<surname>Rusdi</surname>
							<given-names>A.H.</given-names>
						</name>
						<name>
							<surname>Jiang</surname>
							<given-names>S.-Y.</given-names>
						</name>
					</person-group>
					<article-title>A novel multi-focus image fusion algorithm based on random walks</article-title>
					<source>J. Vis. Commun. Image Represent.</source>
					<volume>25</volume>
					<issue>5</issue>
					<fpage>951</fpage>
					<lpage>962</lpage>
					<month>07</month>
					<year>2014</year>
					<pub-id pub-id-type="doi">10.1016/j.jvcir.2014.02.009</pub-id>
				</element-citation>
			</ref>
			<ref id="B12">
				<label>[12]</label>
				<mixed-citation>[12]  Li, H., Li, L. and Zhang, J., Multi-focus image fusion based on sparse feature matrix decomposition and morphological filtering, Opt. Commun., 342, pp. 1-11, May 2015. DOI: 10.1016/j.optcom.2014.12.048.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Li</surname>
							<given-names>H.</given-names>
						</name>
						<name>
							<surname>Li</surname>
							<given-names>L.</given-names>
						</name>
						<name>
							<surname>Zhang</surname>
							<given-names>J.</given-names>
						</name>
					</person-group>
					<article-title>Multi-focus image fusion based on sparse feature matrix decomposition and morphological filtering</article-title>
					<source>Opt. Commun</source>
					<issue>342</issue>
					<fpage>1</fpage>
					<lpage>11</lpage>
					<month>05</month>
					<year>2015</year>
					<pub-id pub-id-type="doi">10.1016/j.optcom.2014.12.048</pub-id>
				</element-citation>
			</ref>
			<ref id="B13">
				<label>[13]</label>
				<mixed-citation>[13]  Bai, X., Zhang, Y., Zhou, F. and Xu, B., Quadtree-based multi-focus image fusion using a weighted focus-measure. Inf. Fusion, 22, pp. 105-118, Mar. 2015. DOI: 10.1016/j.inffus.2014.05.003.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Bai</surname>
							<given-names>X.</given-names>
						</name>
						<name>
							<surname>Zhang</surname>
							<given-names>Y.</given-names>
						</name>
						<name>
							<surname>Zhou</surname>
							<given-names>F.</given-names>
						</name>
						<name>
							<surname>Xu</surname>
							<given-names>B.</given-names>
						</name>
					</person-group>
					<article-title>Quadtree-based multi-focus image fusion using a weighted focus-measure.</article-title>
					<source>Inf. Fusion</source>
					<issue>22</issue>
					<fpage>105</fpage>
					<lpage>118</lpage>
					<month>03</month>
					<year>2015</year>
					<pub-id pub-id-type="doi">10.1016/j.inffus.2014.05.003</pub-id>
				</element-citation>
			</ref>
			<ref id="B14">
				<label>[14]</label>
				<mixed-citation>[14]  De, I. and Chanda, B., Multi-focus image fusion using a morphology-based focus measure in a quad-tree structure, Inf. Fusion , 14, pp. 136-146, 2015. DOI: 10.1016/j.inffus.2012.01.007.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>De</surname>
							<given-names>I.</given-names>
						</name>
						<name>
							<surname>Chanda</surname>
							<given-names>B.</given-names>
						</name>
					</person-group>
					<article-title>Multi-focus image fusion using a morphology-based focus measure in a quad-tree structure</article-title>
					<source>Inf. Fusion</source>
					<issue>14</issue>
					<fpage>136</fpage>
					<lpage>146</lpage>
					<year>2015</year>
					<pub-id pub-id-type="doi">10.1016/j.inffus.2012.01.007</pub-id>
				</element-citation>
			</ref>
			<ref id="B15">
				<label>[15]</label>
				<mixed-citation>[15]  Rajagopalan, A.N. and Chaudhuri, S., An MRF model-based approach to simultaneous recovery of depth and restoration from defocused images. IEEE Trans. Pattern Anal. Mach. Intell., 21(7), pp. 577-589, Jul. 1999. DOI: 10.1109/34.777369.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Rajagopalan</surname>
							<given-names>A.N.</given-names>
						</name>
						<name>
							<surname>Chaudhuri</surname>
							<given-names>S.</given-names>
						</name>
					</person-group>
					<article-title>An MRF model-based approach to simultaneous recovery of depth and restoration from defocused images.</article-title>
					<source>IEEE Trans. Pattern Anal. Mach. Intell.</source>
					<volume>21</volume>
					<issue>7</issue>
					<fpage>577</fpage>
					<lpage>589</lpage>
					<month>07</month>
					<year>1999</year>
					<pub-id pub-id-type="doi">10.1109/34.777369</pub-id>
				</element-citation>
			</ref>
			<ref id="B16">
				<label>[16]</label>
				<mixed-citation>[16]  Xu, N., Tan, K., Arora, H. and Ahuja, N., Generating omnifocus images using graph cuts and a new focus measure, in: Proc. 17th Int. Conf. Pattern Recognition, 2004. ICPR 2004, 4, pp. 697-700, 2004. DOI: 10.1109/ICPR.2004.1333868. </mixed-citation>
				<element-citation publication-type="confproc">
					<person-group person-group-type="author">
						<name>
							<surname>Xu</surname>
							<given-names>N.</given-names>
						</name>
						<name>
							<surname>Tan</surname>
							<given-names>K.</given-names>
						</name>
						<name>
							<surname>Arora</surname>
							<given-names>H.</given-names>
						</name>
						<name>
							<surname>Ahuja</surname>
							<given-names>N.</given-names>
						</name>
					</person-group>
					<source>Generating omnifocus images using graph cuts and a new focus measure</source>
					<conf-name>17thInt. Conf. Pattern Recognition</conf-name>
					<conf-date>2004</conf-date>
					<year>2004</year>
					<edition>4</edition>
					<fpage>697</fpage>
					<lpage>700</lpage>
					<year>2004</year>
					<pub-id pub-id-type="doi">10.1109/ICPR.2004.1333868</pub-id>
				</element-citation>
			</ref>
			<ref id="B17">
				<label>[17]</label>
				<mixed-citation>[17]  Li, S., Kang, X., Hu, J. and Yang, B., Image matting for fusion of multi-focus images in dynamic scenes. Inf. Fusion , 14, pp. 147-162, 2013. DOI: 10.1016/j.inffus.2011.07.001.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Li</surname>
							<given-names>S.</given-names>
						</name>
						<name>
							<surname>Kang</surname>
							<given-names>X.</given-names>
						</name>
						<name>
							<surname>Hu</surname>
							<given-names>J.</given-names>
						</name>
						<name>
							<surname>Yang</surname>
							<given-names>B.</given-names>
						</name>
					</person-group>
					<article-title>Image matting for fusion of multi-focus images in dynamic scenes.</article-title>
					<source>Inf. Fusion</source>
					<issue>14</issue>
					<fpage>147</fpage>
					<lpage>162</lpage>
					<year>2013</year>
					<pub-id pub-id-type="doi">10.1016/j.inffus.2011.07.001</pub-id>
				</element-citation>
			</ref>
			<ref id="B18">
				<label>[18]</label>
				<mixed-citation>[18]  Nejati, M., Samavi, S. and Shirani, S., Multi-focus image fusion using dictionary-based sparse representation. Inf. Fusion , 25, pp. 72-84, 2015. DOI: 10.1016/j.inffus.2014.10.004.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Nejati</surname>
							<given-names>M.</given-names>
						</name>
						<name>
							<surname>Samavi</surname>
							<given-names>S.</given-names>
						</name>
						<name>
							<surname>Shirani</surname>
							<given-names>S.</given-names>
						</name>
					</person-group>
					<article-title>Multi-focus image fusion using dictionary-based sparse representation</article-title>
					<source>Inf. Fusion</source>
					<issue>25</issue>
					<fpage>72</fpage>
					<lpage>84</lpage>
					<year>2015</year>
					<pub-id pub-id-type="doi">10.1016/j.inffus.2014.10.004</pub-id>
				</element-citation>
			</ref>
			<ref id="B19">
				<label>[19]</label>
				<mixed-citation>[19]  Saeedi, J. and Faez, K., Fisher classifier and fuzzy logic based multi-focus image fusion. In: Intelligent Computing and Intelligent Systems, 2009. ICIS 2009. IEEE Int. Conf. , pp. 420-425, 2009. DOI: 10.1109/ICICISYS.2009.5357648.</mixed-citation>
				<element-citation publication-type="confproc">
					<person-group person-group-type="author">
						<name>
							<surname>Saeedi</surname>
							<given-names>J.</given-names>
						</name>
						<name>
							<surname>Faez</surname>
							<given-names>K.</given-names>
						</name>
					</person-group>
					<source>Fisher classifier and fuzzy logic based multi-focus image fusion</source>
					<conf-name>Intelligent Computing and Intelligent Systems</conf-name>
					<conf-date>2009</conf-date>
					<fpage>420</fpage>
					<lpage>425</lpage>
					<year>2009</year>
					<pub-id pub-id-type="doi">10.1109/ICICISYS.2009.5357648</pub-id>
				</element-citation>
			</ref>
			<ref id="B20">
				<label>[20]</label>
				<mixed-citation>[20]  Li, S., Kwok, J. and Wang, Y., Multifocus image fusion using artificial neural networks. Pattern Recognit. Lett. , 23, pp. 985-997, 2002. DOI: 10.1016/S0167-8655(02)00029-6.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Li</surname>
							<given-names>S.</given-names>
						</name>
						<name>
							<surname>Kwok</surname>
							<given-names>J.</given-names>
						</name>
						<name>
							<surname>Wang</surname>
							<given-names>Y.</given-names>
						</name>
					</person-group>
					<article-title>Multifocus image fusion using artificial neural networks</article-title>
					<source>Pattern Recognit. Lett.</source>
					<issue>23</issue>
					<fpage>985</fpage>
					<lpage>997</lpage>
					<year>2002</year>
					<pub-id pub-id-type="doi">10.1016/S0167-8655(02)00029-6.</pub-id>
				</element-citation>
			</ref>
			<ref id="B21">
				<label>[21]</label>
				<mixed-citation>[21]  Wang, Z., Ma, Y. and Gu, J., Multi-focus image fusion using PCNN. Pattern Recognit., 43(6), pp. 2003-2016, Jun. 2010. DOI: 10.1016/j.patcog.2010.01.011.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Wang</surname>
							<given-names>Z.</given-names>
						</name>
						<name>
							<surname>Ma</surname>
							<given-names>Y.</given-names>
						</name>
						<name>
							<surname>Gu</surname>
							<given-names>J.</given-names>
						</name>
					</person-group>
					<article-title>Multi-focus image fusion using PCNN</article-title>
					<source>Pattern Recognit.</source>
					<volume>43</volume>
					<issue>6</issue>
					<fpage>2003</fpage>
					<lpage>2016</lpage>
					<month>06</month>
					<year>2010</year>
					<pub-id pub-id-type="doi">10.1016/j.patcog.2010.01.011</pub-id>
				</element-citation>
			</ref>
			<ref id="B22">
				<label>[22]</label>
				<mixed-citation>[22]  Liu, Z., Tsukada, K., Hanasaki, K., Ho, Y.K. and Dai, Y.P., Image fusion by using steerable pyramid. Pattern Recognit. Lett. , 22(9), pp. 929-939, 2001. DOI: 10.1016/S0167-8655(01)00047-2.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Liu</surname>
							<given-names>Z.</given-names>
						</name>
						<name>
							<surname>Tsukada</surname>
							<given-names>K.</given-names>
						</name>
						<name>
							<surname>Hanasaki</surname>
							<given-names>K.</given-names>
						</name>
						<name>
							<surname>Ho</surname>
							<given-names>Y.K.</given-names>
						</name>
						<name>
							<surname>Dai</surname>
							<given-names>Y.P.</given-names>
						</name>
					</person-group>
					<article-title>Image fusion by using steerable pyramid</article-title>
					<source>Pattern Recognit. Lett.</source>
					<volume>22</volume>
					<issue>9</issue>
					<fpage>929</fpage>
					<lpage>939</lpage>
					<year>2001</year>
					<pub-id pub-id-type="doi">10.1016/S0167-8655(01)00047-2</pub-id>
				</element-citation>
			</ref>
			<ref id="B23">
				<label>[23]</label>
				<mixed-citation>[23]  Santhosh, J., Ketan, B. and Anand, S., Application of SiDWT with extended PCA for multi-focus images, in Medical Imaging, m-Health and Emerging Communication Systems (MedCom), 2014. Int. Conf., pp. 55-59, 2014. DOI: 10.1109/MedCom.2014.7005975.</mixed-citation>
				<element-citation publication-type="confproc">
					<person-group person-group-type="author">
						<name>
							<surname>Santhosh</surname>
							<given-names>J.</given-names>
						</name>
						<name>
							<surname>Ketan</surname>
							<given-names>B.</given-names>
						</name>
						<name>
							<surname>Anand</surname>
							<given-names>S.</given-names>
						</name>
					</person-group>
					<source>Application of SiDWT with extended PCA for multi-focus images, in Medical Imaging</source>
					<conf-name>m-Health and Emerging Communication Systems (MedCom)</conf-name>
					<conf-date>2014</conf-date>
					<fpage>55</fpage>
					<lpage>59</lpage>
					<year>2014</year>
					<pub-id pub-id-type="doi">10.1109/MedCom.2014.7005975</pub-id>
				</element-citation>
			</ref>
			<ref id="B24">
				<label>[24]</label>
				<mixed-citation>[24]  Savic, S., Multifocus image fusion based on empirical mode decomposition, in: 20th Int. Electrotechnical and Computer Science Conf., 2011, pp. 91-94.</mixed-citation>
				<element-citation publication-type="confproc">
					<person-group person-group-type="author">
						<name>
							<surname>Savic</surname>
							<given-names>S.</given-names>
						</name>
					</person-group>
					<source>Multifocus image fusion based on empirical mode decomposition</source>
					<conf-name>20thInt. Electrotechnical and Computer Science Conf.</conf-name>
					<conf-date>2011</conf-date>
					<fpage>91</fpage>
					<lpage>94</lpage>
				</element-citation>
			</ref>
			<ref id="B25">
				<label>[25]</label>
				<mixed-citation>[25]  Nayar, S.K. and Nakgawa, Y., Shape from focus, IEEE Trans. Pattern Anal. Mach. Intell. , 16(8), pp. 824-831, 1994. DOI: 10.1109/34.308479.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Nayar</surname>
							<given-names>S.K.</given-names>
						</name>
						<name>
							<surname>Nakgawa</surname>
							<given-names>Y.</given-names>
						</name>
					</person-group>
					<article-title>Shape from focus</article-title>
					<source>IEEE Trans. Pattern Anal. Mach. Intell.</source>
					<volume>16</volume>
					<issue>8</issue>
					<fpage>824</fpage>
					<lpage>831</lpage>
					<year>1994</year>
					<pub-id pub-id-type="doi">10.1109/34.308479</pub-id>
				</element-citation>
			</ref>
			<ref id="B26">
				<label>[26]</label>
				<mixed-citation>[26]  Balakrishnama, G., Linear discriminant analysis - A brief tutorial, 1998.</mixed-citation>
				<element-citation publication-type="book">
					<person-group person-group-type="author">
						<name>
							<surname>Balakrishnama</surname>
							<given-names>G.</given-names>
						</name>
					</person-group>
					<source>Linear discriminant analysis - A brief tutorial</source>
					<year>1998</year>
				</element-citation>
			</ref>
			<ref id="B27">
				<label>[27]</label>
				<mixed-citation>[27]  Cheeseman, P. and Stutz, J., Bayesian classification (autoclass): Theory and results, in: Advances in knowledge discovery and data mining, Fayyad, U.M., Piatetsky-Shapiro, G., Smyth, P. and Uthurusamy, R., Eds., Menlo Park, CA, USA: American Association for Artificial Intelligence, 1996, pp. 153-180.</mixed-citation>
				<element-citation publication-type="book">
					<person-group person-group-type="author">
						<name>
							<surname>Cheeseman</surname>
							<given-names>P.</given-names>
						</name>
						<name>
							<surname>Stutz</surname>
							<given-names>J.</given-names>
						</name>
					</person-group>
					<chapter-title>Bayesian classification (autoclass): Theory and results</chapter-title>
					<source>Advances in knowledge discovery and data mining</source>
					<person-group person-group-type="editor">
						<name>
							<surname>Fayyad</surname>
							<given-names>U.M.</given-names>
						</name>
						<name>
							<surname>Piatetsky-Shapiro</surname>
							<given-names>G.</given-names>
						</name>
						<name>
							<surname>Smyth</surname>
							<given-names>P.</given-names>
						</name>
						<name>
							<surname>Uthurusamy</surname>
							<given-names>R.</given-names>
						</name>
					</person-group>
					<publisher-loc>Menlo Park</publisher-loc>
					<publisher-name>American Association for Artificial Intelligence</publisher-name>
					<year>1996</year>
					<fpage>153</fpage>
					<lpage>180</lpage>
				</element-citation>
			</ref>
			<ref id="B28">
				<label>[28]</label>
				<mixed-citation>[28]  Tsangaratos, P. and Ilia, I., Comparison of a logistic regression and Naïve Bayes classifier in landslide susceptibility assessments: The influence of models complexity and training dataset size. CATENA, 145, pp. 164-79, Oct. 2016. DOI: 10.1016/j.catena.2016.06.004.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Tsangaratos</surname>
							<given-names>P.</given-names>
						</name>
						<name>
							<surname>Ilia</surname>
							<given-names>I.</given-names>
						</name>
					</person-group>
					<article-title>Comparison of a logistic regression and Naïve Bayes classifier in landslide susceptibility assessments: The influence of models complexity and training dataset size</article-title>
					<source>CATENA</source>
					<issue>145</issue>
					<fpage>164</fpage>
					<lpage>179</lpage>
					<month>10</month>
					<year>2016</year>
					<pub-id pub-id-type="doi">10.1016/j.catena.2016.06.004</pub-id>
				</element-citation>
			</ref>
			<ref id="B29">
				<label>[29]</label>
				<mixed-citation>[29]  Domingos, P. and Pazzani, M., Beyond independence: Conditions for the optimality of the simple Bayesian classifier, in: Machine Learning, 1996, pp. 105-112. </mixed-citation>
				<element-citation publication-type="book">
					<person-group person-group-type="author">
						<name>
							<surname>Domingos</surname>
							<given-names>P.</given-names>
						</name>
						<name>
							<surname>Pazzani</surname>
							<given-names>M.</given-names>
						</name>
					</person-group>
					<chapter-title>Beyond independence: Conditions for the optimality of the simple Bayesian classifier</chapter-title>
					<source>Machine Learning</source>
					<year>1996</year>
					<fpage>105</fpage>
					<lpage>112</lpage>
				</element-citation>
			</ref>
			<ref id="B30">
				<label>[30]</label>
				<mixed-citation>[30]  Villa-Medina, J.L., Boqué, R. and Ferré, J., Bagged k-nearest neighbours classification with uncertainty in the variables. Anal. Chim. Acta, 646(1-2), pp. 62-68, Jul. 2009. DOI: 10.1016/j.aca.2009.05.016.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Villa-Medina</surname>
							<given-names>J.L.</given-names>
						</name>
						<name>
							<surname>Boqué</surname>
							<given-names>R.</given-names>
						</name>
						<name>
							<surname>Ferré</surname>
							<given-names>J.</given-names>
						</name>
					</person-group>
					<article-title>Bagged k-nearest neighbours classification with uncertainty in the variables</article-title>
					<source>Anal. Chim. Acta</source>
					<volume>646</volume>
					<issue>1-2</issue>
					<fpage>62</fpage>
					<lpage>68</lpage>
					<month>07</month>
					<year>2009</year>
					<pub-id pub-id-type="doi">10.1016/j.aca.2009.05.016</pub-id>
				</element-citation>
			</ref>
			<ref id="B31">
				<label>[31]</label>
				<mixed-citation>[31]  Orhan, U., Hekim, M. and Ozer, M., EEG signals classification using the K-means clustering and a multilayer perceptron neural network model. Expert Syst. Appl., 38(10), pp. 13475-13481, Sep. 2011. DOI: 10.1016/j.eswa.2011.04.149.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Orhan</surname>
							<given-names>U.</given-names>
						</name>
						<name>
							<surname>Hekim</surname>
							<given-names>M.</given-names>
						</name>
						<name>
							<surname>Ozer</surname>
							<given-names>M.</given-names>
						</name>
					</person-group>
					<article-title>EEG signals classification using the K-means clustering and a multilayer perceptron neural network model.</article-title>
					<source>Expert Syst. Appl.</source>
					<volume>38</volume>
					<issue>10</issue>
					<fpage>13475</fpage>
					<lpage>13481</lpage>
					<month>09</month>
					<year>2011</year>
					<pub-id pub-id-type="doi">10.1016/j.eswa.2011.04.149</pub-id>
				</element-citation>
			</ref>
			<ref id="B32">
				<label>[32]</label>
				<mixed-citation>[32]  Kong, Y.B., Lee, E.J., Hur, M.G., Park, J.H., Park, Y.D. and Yang, S.D., Support vector machine based fault detection approach for RFT-30 cyclotron. Nucl. Instrum. Methods Phys. Res. Sect. Accel. Spectrometers Detect. Assoc. Equip., 834, pp. 143-148, Oct. 2016. DOI: 10.1016/j.nima.2016.07.054.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Kong</surname>
							<given-names>Y.B.</given-names>
						</name>
						<name>
							<surname>Lee</surname>
							<given-names>E.J.</given-names>
						</name>
						<name>
							<surname>Hur</surname>
							<given-names>M.G.</given-names>
						</name>
						<name>
							<surname>Park</surname>
							<given-names>J.H.</given-names>
						</name>
						<name>
							<surname>Park</surname>
							<given-names>Y.D.</given-names>
						</name>
						<name>
							<surname>Yang</surname>
							<given-names>S.D.</given-names>
						</name>
					</person-group>
					<article-title>Support vector machine based fault detection approach for RFT-30 cyclotron.</article-title>
					<source>Nucl. Instrum. Methods Phys. Res. Sect. Accel. Spectrometers Detect. Assoc. Equip.</source>
					<issue>834</issue>
					<fpage>143</fpage>
					<lpage>148</lpage>
					<month>10</month>
					<year>2016</year>
					<pub-id pub-id-type="doi">10.1016/j.nima.2016.07.054</pub-id>
				</element-citation>
			</ref>
			<ref id="B33">
				<label>[33]</label>
				<mixed-citation>[33]  Pedregosa, F. et al., Scikit-learn: machine learning in Python. J. Mach. Learn. Res., 12, pp. 2825-2830, 2011.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Pedregosa</surname>
							<given-names>F.</given-names>
						</name>
						<etal/>
					</person-group>
					<article-title>Scikit-learn: machine learning in Python.</article-title>
					<source>J. Mach. Learn. Res.</source>
					<issue>12</issue>
					<fpage>2825</fpage>
					<lpage>2830</lpage>
					<year>2011</year>
				</element-citation>
			</ref>
			<ref id="B34">
				<label>[34]</label>
				<mixed-citation>[34]  Schaul, T. et al., PyBrain. J. Mach. Learn. Res. , 11, pp. 743-746, 2010.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Schaul</surname>
							<given-names>T.</given-names>
						</name>
						<etal/>
					</person-group>
					<article-title>PyBrain</article-title>
					<source>J. Mach. Learn. Res.</source>
					<issue>11</issue>
					<fpage>743</fpage>
					<lpage>746</lpage>
					<year>2010</year>
				</element-citation>
			</ref>
			<ref id="B35">
				<label>[35]</label>
				<mixed-citation>[35]  Xydeas, C.S. and Petrovic, V., Objective image fusion performance measure. Electron. Lett., 36(4), pp. 308-309, Feb. 2000. DOI: 10.1049/el:20000267.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Xydeas</surname>
							<given-names>C.S.</given-names>
						</name>
						<name>
							<surname>Petrovic</surname>
							<given-names>V.</given-names>
						</name>
					</person-group>
					<article-title>Objective image fusion performance measure.</article-title>
					<source>Electron. Lett.</source>
					<volume>36</volume>
					<issue>4</issue>
					<fpage>308</fpage>
					<lpage>309</lpage>
					<month>02</month>
					<year>2000</year>
					<pub-id pub-id-type="doi">10.1049/el:20000267</pub-id>
				</element-citation>
			</ref>
			<ref id="B36">
				<label>[36]</label>
				<mixed-citation>[36]  Han, Y., Cai, Y., Cao, Y. and Xu, X., A new image fusion performance metric based on visual information fidelity. Inf. Fusion , 14(2), pp. 127-135, 2013. DOI: 10.1016/j.inffus.2011.08.002</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Han</surname>
							<given-names>Y.</given-names>
						</name>
						<name>
							<surname>Cai</surname>
							<given-names>Y.</given-names>
						</name>
						<name>
							<surname>Cao</surname>
							<given-names>Y.</given-names>
						</name>
						<name>
							<surname>Xu</surname>
							<given-names>X.</given-names>
						</name>
					</person-group>
					<article-title>A new image fusion performance metric based on visual information fidelity.</article-title>
					<source>Inf. Fusion</source>
					<volume>14</volume>
					<issue>2</issue>
					<fpage>127</fpage>
					<lpage>135</lpage>
					<year>2013</year>
					<pub-id pub-id-type="doi">10.1016/j.inffus.2011.08.002</pub-id>
				</element-citation>
			</ref>
			<ref id="B37">
				<label>[37]</label>
				<mixed-citation>[37]  Haghighat, M.B.A., Aghagolzadeh, A. and Seyedarabi, H., A non-reference image fusion metric based on mutual information of image features, Comput. Electr. Eng. , 37(5), pp. 744-756, Sep. 2011. DOI: 10.1016/j.compeleceng.2011.07.012.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Haghighat</surname>
							<given-names>M.B.A.</given-names>
						</name>
						<name>
							<surname>Aghagolzadeh</surname>
							<given-names>A.</given-names>
						</name>
						<name>
							<surname>Seyedarabi</surname>
							<given-names>H.</given-names>
						</name>
					</person-group>
					<article-title>A non-reference image fusion metric based on mutual information of image features</article-title>
					<source>Comput. Electr. Eng.</source>
					<volume>37</volume>
					<issue>5</issue>
					<fpage>744</fpage>
					<lpage>756</lpage>
					<month>09</month>
					<year>2011</year>
					<pub-id pub-id-type="doi">10.1016/j.compeleceng.2011.07.012</pub-id>
				</element-citation>
			</ref>
			<ref id="B38">
				<label>[38]</label>
				<mixed-citation>[38]  Kvalseth, T.O., Entropy and correlation: some comments. IEEE Trans. Syst. Man Cybern., 17(3), pp. 517-519, May 1987. DOI: 10.1109/TSMC.1987.4309069.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Kvalseth</surname>
							<given-names>T.O.</given-names>
						</name>
					</person-group>
					<article-title>Entropy and correlation: some comments.</article-title>
					<source>IEEE Trans. Syst. Man Cybern.</source>
					<volume>17</volume>
					<issue>3</issue>
					<fpage>517</fpage>
					<lpage>519</lpage>
					<month>05</month>
					<year>1987</year>
					<pub-id pub-id-type="doi">10.1109/TSMC.1987.4309069</pub-id>
				</element-citation>
			</ref>
			<ref id="B39">
				<label>[39]</label>
				<mixed-citation>[39]  Hossny, M., Nahavandi, S. and Creighton, D., Comments on ‘Information measure for performance of image fusion’. Electron. Lett. , 44(18), pp. 1066-1067, Aug. 2008. DOI: 10.1049/el:20081754.</mixed-citation>
				<element-citation publication-type="journal">
					<person-group person-group-type="author">
						<name>
							<surname>Hossny</surname>
							<given-names>M.</given-names>
						</name>
						<name>
							<surname>Nahavandi</surname>
							<given-names>S.</given-names>
						</name>
						<name>
							<surname>Creighton</surname>
							<given-names>D.</given-names>
						</name>
					</person-group>
					<article-title>Comments on ‘Information measure for performance of image fusion’</article-title>
					<source>Electron. Lett.</source>
					<volume>44</volume>
					<issue>18</issue>
					<fpage>1066</fpage>
					<lpage>1067</lpage>
					<month>08</month>
					<year>2008</year>
					<pub-id pub-id-type="doi">10.1049/el:20081754</pub-id>
				</element-citation>
			</ref>
			<ref id="B40">
				<label>[40]</label>
				<mixed-citation>[40]  Duda, R., Hart, P. and Stork, D., Pattern classification, 2nd ed., 2001.</mixed-citation>
				<element-citation publication-type="book">
					<person-group person-group-type="author">
						<name>
							<surname>Duda</surname>
							<given-names>R.</given-names>
						</name>
						<name>
							<surname>Hart</surname>
							<given-names>P.</given-names>
						</name>
						<name>
							<surname>Stork</surname>
							<given-names>D.</given-names>
						</name>
					</person-group>
					<source>Pattern classification</source>
					<edition>2nd</edition>
					<year>2001</year>
				</element-citation>
			</ref>
		</ref-list>
		<fn-group>
			<fn fn-type="other" id="fn1">
				<label>1</label>
				<p><bold>How to cite:</bold> Atencio-Ortiz, P., Sanchez-Torres, G. and Branch-Bedoya, J.W., Evaluating supervised learning approaches for spatial-domain multi-focus image fusion. DYNA, 84(202), pp. 137-146, September, 2017.</p>
			</fn>
		</fn-group>
	</back>
</article>