@techreport{AmanoBimberGrundhoefer2010, author = {Amano, Toshiyuki and Bimber, Oliver and Grundh{\"o}fer, Anselm}, title = {Appearance Enhancement for Visually Impaired with Projector Camera Feedback}, doi = {10.25643/bauhaus-universitaet.1411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20100106-14974}, year = {2010}, abstract = {Visually impaired is a common problem for human life in the world wide. The projector-based AR technique has ability to change appearance of real object, and it can help to improve visibility for visually impaired. We propose a new framework for the appearance enhancement with the projector camera system that employed model predictive controller. This framework enables arbitrary image processing such as photo-retouch software in the real world and it helps to improve visibility for visually impaired. In this article, we show the appearance enhancement result of Peli's method and Wolffshon's method for the low vision, Jefferson's method for color vision deficiencies. Through experiment results, the potential of our method to enhance the appearance for visually impaired was confirmed as same as appearance enhancement for the digital image and television viewing.}, subject = {Maschinelles Sehen}, language = {en} } @incollection{Bimber2006, author = {Bimber, Oliver}, title = {Projector-Based Augmentation}, series = {Emerging Technologies of Augmented Reality: Interfaces \& Design}, booktitle = {Emerging Technologies of Augmented Reality: Interfaces \& Design}, doi = {10.25643/bauhaus-universitaet.735}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-7353}, year = {2006}, abstract = {Projector-based augmentation approaches hold the potential of combining the advantages of well-establishes spatial virtual reality and spatial augmented reality. Immersive, semi-immersive and augmented visualizations can be realized in everyday environments - without the need for special projection screens and dedicated display configurations. Limitations of mobile devices, such as low resolution and small field of view, focus constrains, and ergonomic issues can be overcome in many cases by the utilization of projection technology. Thus, applications that do not require mobility can benefit from efficient spatial augmentations. Examples range from edutainment in museums (such as storytelling projections onto natural stone walls in historical buildings) to architectural visualizations (such as augmentations of complex illumination simulations or modified surface materials in real building structures). This chapter describes projector-camera methods and multi-projector techniques that aim at correcting geometric aberrations, compensating local and global radiometric effects, and improving focus properties of images projected onto everyday surfaces.}, subject = {Erweiterte Realit{\"a}t }, language = {en} } @techreport{BrunsBrombachZeidleretal.2005, author = {Bruns, Erich and Brombach, Benjamin and Zeidler, Thomas and Bimber, Oliver}, title = {Enabling Mobile Phones To Support Large-Scale Museum Guidance}, doi = {10.25643/bauhaus-universitaet.677}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-6777}, year = {2005}, abstract = {We present a museum guidance system called PhoneGuide that uses widespread camera equipped mobile phones for on-device object recognition in combination with pervasive tracking. It provides additional location- and object-aware multimedia content to museum visitors, and is scalable to cover a large number of museum objects.}, subject = {Objektverfolgung}, language = {en} } @unpublished{LanglotzBimber2007, author = {Langlotz, Tobias and Bimber, Oliver}, title = {Unsynchronized 4D Barcodes}, doi = {10.25643/bauhaus-universitaet.853}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8531}, year = {2007}, abstract = {We present a novel technique for optical data transfer between public displays and mobile devices based on unsynchronized 4D barcodes. We assume that no direct (electromagnetic or other) connection between the devices can exist. Time-multiplexed, 2D color barcodes are displayed on screens and recorded with camera equipped mobile phones. This allows to transmit information optically between both devices. Our approach maximizes the data throughput and the robustness of the barcode recognition, while no immediate synchronization exists. Although the transfer rate is much smaller than it can be achieved with electromagnetic techniques (e.g., Bluetooth or WiFi), we envision to apply such a technique wherever no direct connection is available. 4D barcodes can, for instance, be integrated into public web-pages, movie sequences or advertisement presentations, and they encode and transmit more information than possible with single 2D or 3D barcodes.}, subject = {Maschinelles Sehen}, language = {en} } @article{BrunsBimber2008, author = {Bruns, Erich and Bimber, Oliver}, title = {Phone-to-Phone Communication for Adaptive Image Classification}, doi = {10.25643/bauhaus-universitaet.1296}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080722-13685}, year = {2008}, abstract = {In this paper, we present a novel technique for adapting local image classifiers that are applied for object recognition on mobile phones through ad-hoc network communication between the devices. By continuously accumulating and exchanging collected user feedback among devices that are located within signal range, we show that our approach improves the overall classification rate and adapts to dynamic changes quickly. This technique is applied in the context of PhoneGuide - a mobile phone based museum guidance framework that combines pervasive tracking and local object recognition for identifying a large number of objects in uncontrolled museum environments.}, subject = {Peer-to-Peer-Netz}, language = {en} } @phdthesis{Bruns2010, author = {Bruns, Erich}, title = {Adaptive Image Classification on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.1421}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20100707-15092}, school = {Bauhaus-Universit{\"a}t Weimar}, year = {2010}, abstract = {The advent of high-performance mobile phones has opened up the opportunity to develop new context-aware applications for everyday life. In particular, applications for context-aware information retrieval in conjunction with image-based object recognition have become a focal area of recent research. In this thesis we introduce an adaptive mobile museum guidance system that allows visitors in a museum to identify exhibits by taking a picture with their mobile phone. Besides approaches to object recognition, we present different adaptation techniques that improve classification performance. After providing a comprehensive background of context-aware mobile information systems in general, we present an on-device object recognition algorithm and show how its classification performance can be improved by capturing multiple images of a single exhibit. To accomplish this, we combine the classification results of the individual pictures and consider the perspective relations among the retrieved database images. In order to identify multiple exhibits in pictures we present an approach that uses the spatial relationships among the objects in images. They make it possible to infer and validate the locations of undetected objects relative to the detected ones and additionally improve classification performance. To cope with environmental influences, we introduce an adaptation technique that establishes ad-hoc wireless networks among the visitors' mobile devices to exchange classification data. This ensures constant classification rates under varying illumination levels and changing object placement. Finally, in addition to localization using RF-technology, we present an adaptation technique that uses user-generated spatio-temporal pathway data for person movement prediction. Based on the history of previously visited exhibits, the algorithm determines possible future locations and incorporates these predictions into the object classification process. This increases classification performance and offers benefits comparable to traditional localization approaches but without the need for additional hardware. Through multiple field studies and laboratory experiments we demonstrate the benefits of each approach and show how they influence the overall classification rate.}, subject = {Kontextbezogenes System}, language = {en} } @techreport{BrunsBrombachBimber2007, author = {Bruns, Erich and Brombach, Benjamin and Bimber, Oliver}, title = {Mobile Phone Enabled Museum Guidance with Adaptive Classification}, doi = {10.25643/bauhaus-universitaet.940}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-9406}, year = {2007}, abstract = {Although audio guides are widely established in many museums, they suffer from several drawbacks compared to state-of-the-art multimedia technologies: First, they provide only audible information to museum visitors, while other forms of media presentation, such as reading text or video could be beneficial for museum guidance tasks. Second, they are not very intuitive. Reference numbers have to be manually keyed in by the visitor before information about the exhibit is provided. These numbers are either displayed on visible tags that are located near the exhibited objects, or are printed in brochures that have to be carried. Third, offering mobile guidance equipment to visitors leads to acquisition and maintenance costs that have to be covered by the museum. With our project PhoneGuide we aim at solving these problems by enabling the application of conventional camera-equipped mobile phones for museum guidance purposes. The advantages are obvious: First, today's off-the-shelf mobile phones offer a rich pallet of multimedia functionalities ---ranging from audio (over speaker or head-set) and video (graphics, images, movies) to simple tactile feedback (vibration). Second, integrated cameras, improvements in processor performance and more memory space enable supporting advanced computer vision algorithms. Instead of keying in reference numbers, objects can be recognized automatically by taking non-persistent photographs of them. This is more intuitive and saves museum curators from distributing and maintaining a large number of physical (visible or invisible) tags. Together with a few sensor-equipped reference tags only, computer vision based object recognition allows for the classification of single objects; whereas overlapping signal ranges of object-distinct active tags (such as RFID) would prevent the identification of individuals that are grouped closely together. Third, since we assume that museum visitors will be able to use their own devices, the acquisition and maintenance cost for museum-owned devices decreases.}, subject = {Objektverfolgung}, language = {en} } @techreport{ExnerBrunsKurzetal.2009, author = {Exner, David and Bruns, Erich and Kurz, Daniel and Grundh{\"o}fer, Anselm and Bimber, Oliver}, title = {Fast and Reliable CAMShift Tracking}, organization = {JP AUgmented Reality, Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.1410}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20091217-14962}, year = {2009}, abstract = {CAMShift is a well-established and fundamental algorithm for kernel-based visual object tracking. While it performs well with objects that have a simple and constant appearance, it is not robust in more complex cases. As it solely relies on back projected probabilities it can fail in cases when the object's appearance changes (e.g. due to object or camera movement, or due to lighting changes), when similarly colored objects have to be re-detected or when they cross their trajectories. We propose extensions to CAMShift that address and resolve all of these problems. They allow the accumulation of multiple histograms to model more complex object appearance and the continuous monitoring of object identi- ties to handle ambiguous cases of partial or full occlusion. Most steps of our method are carried out on the GPU for achieving real-time tracking of multiple targets simultaneously. We explain an ecient GPU implementations of histogram generation, probability back projection, im- age moments computations, and histogram intersection. All of these techniques make full use of a GPU's high parallelization.}, subject = {Bildverarbeitung}, language = {en} } @article{BrombachBrunsBimber2008, author = {Brombach, Benjamin and Bruns, Erich and Bimber, Oliver}, title = {Subobject Detection through Spatial Relationships on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.1353}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20081007-14296}, year = {2008}, abstract = {We present a novel image classification technique for detecting multiple objects (called subobjects) in a single image. In addition to image classifiers, we apply spatial relationships among the subobjects to verify and to predict locations of detected and undetected subobjects, respectively. By continuously refining the spatial relationships throughout the detection process, even locations of completely occluded exhibits can be determined. Finally, all detected subobjects are labeled and the user can select the object of interest for retrieving corresponding multimedia information. This approach is applied in the context of PhoneGuide, an adaptive museum guidance system for camera-equipped mobile phones. We show that the recognition of subobjects using spatial relationships is up to 68\% faster than related approaches without spatial relationships. Results of a field experiment in a local museum illustrate that unexperienced users reach an average recognition rate for subobjects of 85.6\% under realistic conditions.}, subject = {Objekterkennung}, language = {en} } @techreport{BrunsBimber2007, author = {Bruns, Erich and Bimber, Oliver}, title = {Adaptive Training of Video Sets for Image Recognition on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.822}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8223}, year = {2007}, abstract = {We present an enhancement towards adaptive video training for PhoneGuide, a digital museum guidance system for ordinary camera-equipped mobile phones. It enables museum visitors to identify exhibits by capturing photos of them. In this article, a combined solution of object recognition and pervasive tracking is extended to a client-server-system for improving data acquisition and for supporting scale-invariant object recognition.}, subject = {Objektverfolgung}, language = {en} } @techreport{FoecklerZeidlerBimber2005, author = {F{\"o}ckler, Paul and Zeidler, Thomas and Bimber, Oliver}, title = {PhoneGuide: Museum Guidance Supported by On-Device Object Recognition on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.650}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-6500}, year = {2005}, abstract = {We present PhoneGuide - an enhanced museum guidance approach that uses camera-equipped mobile phones and on-device object recognition. Our main technical achievement is a simple and light-weight object recognition approach that is realized with single-layer perceptron neuronal networks. In contrast to related systems which perform computational intensive image processing tasks on remote servers, our intention is to carry out all computations directly on the phone. This ensures little or even no network traffic and consequently decreases cost for online times. Our laboratory experiments and field surveys have shown that photographed museum exhibits can be recognized with a probability of over 90\%. We have evaluated different feature sets to optimize the recognition rate and performance. Our experiments revealed that normalized color features are most effective for our method. Choosing such a feature set allows recognizing an object below one second on up-to-date phones. The amount of data that is required for differentiating 50 objects from multiple perspectives is less than 6KBytes.}, subject = {Neuronales Netz}, language = {en} } @inproceedings{SchirmerKleinerOsburg, author = {Schirmer, Ulrike and Kleiner, Florian and Osburg, Andrea}, title = {Objektive Oberfl{\"a}chenbewertung von (P)SCC-Sichtbeton mittels automatisierter Analyse von Bilddaten}, series = {Tagung Bauchemie der GDCH-Fachgruppe Bauchemie, 30. September - 2. Oktober 2019 in Aachen}, booktitle = {Tagung Bauchemie der GDCH-Fachgruppe Bauchemie, 30. September - 2. Oktober 2019 in Aachen}, publisher = {Gesellschaft Deutscher Chemiker}, isbn = {978-3-947197-13-2}, doi = {10.25643/bauhaus-universitaet.4510}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211004-45104}, pages = {8}, abstract = {Sichtbeton ist aufgrund seiner Vielf{\"a}ltigkeit in der Formgebung eines der am meisten verbreiteten Gestaltungsmittel der modernen Architektur und optimal f{\"u}r neue Bauweisen sowie steigende Anforderungen an das Erscheinungsbild {\"o}ffentlicher Bauwerke geeignet. Die Herstellung qualitativ hochwertiger Sichtbetonoberfl{\"a}chen h{\"a}ngt im hohen Maße von den Wechselwirkungen zwischen Beton und Trennmittel, zwischen Trennmittel und Schalmaterial, sowie von der Applikationsart und -menge des Trennmittels ab. In Laborversuchen wurden diese Einfl{\"u}sse auf die Sichtbetonoberfl{\"a}chen eines polymermodifizierten selbstverdichtenden Betons (PSCC) im Vergleich zu einem herk{\"o}mmlichen selbstverdichtenden Beton (SCC) untersucht. Im Rahmen dieser Arbeiten wurde eine Methode zur Beurteilung der Sichtbetonqualit{\"a}t entwickelt, mit welcher Ausschlusskriterien, wie maximale Porosit{\"a}t und Gleichm{\"a}ßigkeit, objektiv und automatisiert bestimmt werden k{\"o}nnen. Ver{\"a}nderungen dieser Werte durch Witterungseinfl{\"u}sse ließen zudem erste R{\"u}ckschl{\"u}sse auf die Dauerhaftigkeit der Sichtbetonoberfl{\"a}chen zu.}, subject = {Sichtbeton}, language = {de} } @article{JiangRoesslerWellmannetal., author = {Jiang, Mingze and R{\"o}ßler, Christiane and Wellmann, Eva and Klaver, Jop and Kleiner, Florian and Schmatz, Joyce}, title = {Workflow for high-resolution phase segmentation of cement clinker fromcombined BSE image and EDX spectral data}, series = {Journal of Microscopy}, volume = {2021}, journal = {Journal of Microscopy}, publisher = {Wiley-Blackwell}, address = {Oxford}, doi = {10.1111/jmi.13072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211215-45449}, pages = {1 -- 7}, abstract = {Burning of clinker is the most influencing step of cement quality during the production process. Appropriate characterisation for quality control and decision-making is therefore the critical point to maintain a stable production but also for the development of alternative cements. Scanning electron microscopy (SEM) in combination with energy dispersive X-ray spectroscopy (EDX) delivers spatially resolved phase and chemical information for cement clinker. This data can be used to quantify phase fractions and chemical composition of identified phases. The contribution aims to provide an overview of phase fraction quantification by semi-automatic phase segmentation using high-resolution backscattered electron (BSE) images and lower-resolved EDX element maps. Therefore, a tool for image analysis was developed that uses state-of-the-art algorithms for pixel-wise image segmentation and labelling in combination with a decision tree that allows searching for specific clinker phases. Results show that this tool can be applied to segment sub-micron scale clinker phases and to get a quantification of all phase fractions. In addition, statistical evaluation of the data is implemented within the tool to reveal whether the imaged area is representative for all clinker phases.}, subject = {Zementklinker}, language = {en} } @inproceedings{PaulRodehorst, author = {Paul, Debus and Rodehorst, Volker}, title = {Multi-Scale Flight Path Planning for UAS Building Inspection}, series = {Proceedings of the 18th International Conference on Computing in Civil and Building Engineering}, volume = {2020}, booktitle = {Proceedings of the 18th International Conference on Computing in Civil and Building Engineering}, editor = {Santos, Toledo}, publisher = {Springer}, doi = {10.25643/bauhaus-universitaet.4205}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201009-42053}, pages = {19}, abstract = {Unmanned aircraft systems (UAS) show large potential for the construction industry. Their use in condition assessment has increased significantly, due to technological and computational progress. UAS play a crucial role in developing a digital maintenance strategy for infrastructure, saving cost and effort, while increasing safety and reliability. Part of that strategy are automated visual UAS inspections of the building's condition. The resulting images can automatically be analyzed to identify and localize damages to the structure that have to be monitored. Further interest in parts of a structure can arise from events like accidents or collisions. Areas of low interest exist, where low resolution monitoring is sufficient. From different requirements for resolution, different levels of detail can be derived. They require special image acquisition parameters that differ mainly in the distance between camera and structure. Areas with a higher level of detail require a smaller distance to the object, producing more images. This work proposes a multi-scale flight path planning procedure, enabling higher resolution requirements for areas of special interest, while reducing the number of required images to a minimum. Careful selection of the camera positions maintains the complete coverage of the structure, while achieving the required resolution in all areas. The result is an efficient UAS inspection, reducing effort for the maintenance of infrastructure.}, subject = {Drohne}, language = {en} }