% pubman genre = article @article{item_3524162, title = {{Automatic individual identification of patterned solitary species based on unlabeled video data}}, author = {Suessle, Vanessa and Arandjelovic, Milica and Kalan, Ammie K. and Agbor, Anthony and Boesch, Christophe and Brazzola, Gregory and Deschner, Tobias and Dieguez, Paula and Granjon, Anne-C{\'e}line and K{\"u}hl, Hjalmar S. and Landsmann, Anja and Lapuente, Juan and Maldonado, Nuria and Meier, Amelia and Rockaiova, Zuzana and Wessling, Erin G. and Wittig, Roman M. and Downs, Colleen T. and Weinmann, Andreas and Hergenroether, Elke}, language = {eng}, issn = {1213-6972; 1213-6964}, doi = {10.24132/JWSCG.2023.1}, publisher = {Vaclav Skala - Union Agency}, address = {Plzen}, year = {2023}, date = {2023}, abstract = {{The manual processing and analysis of videos from camera traps is time-consuming and includes several steps,{\textless}br{\textgreater}ranging from the filtering of falsely triggered footage to identifying and re-identifying individuals. In this study,{\textless}br{\textgreater}we developed a pipeline to automatically analyze videos from camera traps to identify individuals without{\textless}br{\textgreater}requiring manual interaction. This pipeline applies to animal species with uniquely identifiable fur patterns and{\textless}br{\textgreater}solitary behavior, such as leopards (Panthera pardus). We assumed that the same individual was seen throughout{\textless}br{\textgreater}one triggered video sequence. With this assumption, multiple images could be assigned to an individual for the{\textless}br{\textgreater}initial database filling without pre-labeling. The pipeline was based on well-established components from{\textless}br{\textgreater}computer vision and deep learning, particularly convolutional neural networks (CNNs) and scale-invariant feature{\textless}br{\textgreater}transform (SIFT) features. We augmented this basis by implementing additional components to substitute{\textless}br{\textgreater}otherwise required human interactions. Based on the similarity between frames from the video material, clusters{\textless}br{\textgreater}were formed that represented individuals bypassing the open set problem of the unknown total population. The{\textless}br{\textgreater}pipeline was tested on a dataset of leopard videos collected by the Pan African Programme: The Cultured{\textless}br{\textgreater}Chimpanzee (PanAf) and achieved a success rate of over 83{\textpercent} for correct matches between previously unknown{\textless}br{\textgreater}individuals. The proposed pipeline can become a valuable tool for future conservation projects based on camera{\textless}br{\textgreater}trap data, reducing the work of manual analysis for individual identification, when labeled data is unavailable.}}, journal = {{Journal of WSCG}}, volume = {31}, pages = {1--10}, }