@techreport{AmanoBimberGrundhoefer2010, author = {Amano, Toshiyuki and Bimber, Oliver and Grundh{\"o}fer, Anselm}, title = {Appearance Enhancement for Visually Impaired with Projector Camera Feedback}, doi = {10.25643/bauhaus-universitaet.1411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20100106-14974}, year = {2010}, abstract = {Visually impaired is a common problem for human life in the world wide. The projector-based AR technique has ability to change appearance of real object, and it can help to improve visibility for visually impaired. We propose a new framework for the appearance enhancement with the projector camera system that employed model predictive controller. This framework enables arbitrary image processing such as photo-retouch software in the real world and it helps to improve visibility for visually impaired. In this article, we show the appearance enhancement result of Peli's method and Wolffshon's method for the low vision, Jefferson's method for color vision deficiencies. Through experiment results, the potential of our method to enhance the appearance for visually impaired was confirmed as same as appearance enhancement for the digital image and television viewing.}, subject = {Maschinelles Sehen}, language = {en} } @techreport{ExnerBrunsKurzetal.2009, author = {Exner, David and Bruns, Erich and Kurz, Daniel and Grundh{\"o}fer, Anselm and Bimber, Oliver}, title = {Fast and Reliable CAMShift Tracking}, organization = {JP AUgmented Reality, Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.1410}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20091217-14962}, year = {2009}, abstract = {CAMShift is a well-established and fundamental algorithm for kernel-based visual object tracking. While it performs well with objects that have a simple and constant appearance, it is not robust in more complex cases. As it solely relies on back projected probabilities it can fail in cases when the object's appearance changes (e.g. due to object or camera movement, or due to lighting changes), when similarly colored objects have to be re-detected or when they cross their trajectories. We propose extensions to CAMShift that address and resolve all of these problems. They allow the accumulation of multiple histograms to model more complex object appearance and the continuous monitoring of object identi- ties to handle ambiguous cases of partial or full occlusion. Most steps of our method are carried out on the GPU for achieving real-time tracking of multiple targets simultaneously. We explain an ecient GPU implementations of histogram generation, probability back projection, im- age moments computations, and histogram intersection. All of these techniques make full use of a GPU's high parallelization.}, subject = {Bildverarbeitung}, language = {en} } @techreport{BimberIwai2008, author = {Bimber, Oliver and Iwai, Daisuke}, title = {Superimposing Dynamic Range}, doi = {10.25643/bauhaus-universitaet.1287}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080422-13585}, year = {2008}, abstract = {We present a simple and cost-efficient way of extending contrast, perceived tonal resolution, and the color space of static hardcopy images, beyond the capabilities of hardcopy devices or low-dynamic range displays alone. A calibrated projector-camera system is applied for automatic registration, scanning and superimposition of hardcopies. We explain how high-dynamic range content can be split for linear devices with different capabilities, how luminance quantization can be optimized with respect to the non-linear response of the human visual system as well as for the discrete nature of the applied modulation devices; and how inverse tone-mapping can be adapted in case only untreated hardcopies and softcopies (such as regular photographs) are available. We believe that our approach has the potential to complement hardcopy-based technologies, such as X-ray prints for filmless imaging, in domains that operate with high quality static image content, like radiology and other medical fields, or astronomy.}, subject = {Bildverarbeitung}, language = {en} } @article{BrunsBimber2008, author = {Bruns, Erich and Bimber, Oliver}, title = {Phone-to-Phone Communication for Adaptive Image Classification}, doi = {10.25643/bauhaus-universitaet.1296}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080722-13685}, year = {2008}, abstract = {In this paper, we present a novel technique for adapting local image classifiers that are applied for object recognition on mobile phones through ad-hoc network communication between the devices. By continuously accumulating and exchanging collected user feedback among devices that are located within signal range, we show that our approach improves the overall classification rate and adapts to dynamic changes quickly. This technique is applied in the context of PhoneGuide - a mobile phone based museum guidance framework that combines pervasive tracking and local object recognition for identifying a large number of objects in uncontrolled museum environments.}, subject = {Peer-to-Peer-Netz}, language = {en} } @techreport{GrosseBimber2008, author = {Grosse, Max and Bimber, Oliver}, title = {Coded Aperture Projection}, doi = {10.25643/bauhaus-universitaet.1234}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080227-13020}, year = {2008}, abstract = {In computer vision, optical defocus is often described as convolution with a filter kernel that corresponds to an image of the aperture being used by the imaging device. The degree of defocus correlates to the scale of the kernel. Convolving an image with the inverse aperture kernel will digitally sharpen the image and consequently compensate optical defocus. This is referred to as deconvolution or inverse filtering. In frequency domain, the reciprocal of the filter kernel is its inverse, and deconvolution reduces to a division. Low magnitudes in the Fourier transform of the aperture image, however, lead to intensity values in spatial domain that exceed the displayable range. Therefore, the corresponding frequencies are not considered, which then results in visible ringing artifacts in the final projection. This is the main limitation of previous approaches, since in frequency domain the Gaussian PSF of spherical apertures does contain a large fraction of low Fourier magnitudes. Applying only small kernel scales will reduce the number of low Fourier magnitudes (and consequently the ringing artifacts) -- but will also lead only to minor focus improvements. To overcome this problem, we apply a coded aperture whose Fourier transform has less low magnitudes initially. Consequently, more frequencies are retained and more image details are reconstructed.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} } @article{BrombachBrunsBimber2008, author = {Brombach, Benjamin and Bruns, Erich and Bimber, Oliver}, title = {Subobject Detection through Spatial Relationships on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.1353}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20081007-14296}, year = {2008}, abstract = {We present a novel image classification technique for detecting multiple objects (called subobjects) in a single image. In addition to image classifiers, we apply spatial relationships among the subobjects to verify and to predict locations of detected and undetected subobjects, respectively. By continuously refining the spatial relationships throughout the detection process, even locations of completely occluded exhibits can be determined. Finally, all detected subobjects are labeled and the user can select the object of interest for retrieving corresponding multimedia information. This approach is applied in the context of PhoneGuide, an adaptive museum guidance system for camera-equipped mobile phones. We show that the recognition of subobjects using spatial relationships is up to 68\% faster than related approaches without spatial relationships. Results of a field experiment in a local museum illustrate that unexperienced users reach an average recognition rate for subobjects of 85.6\% under realistic conditions.}, subject = {Objekterkennung}, language = {en} } @techreport{Bimber2008, author = {Bimber, Oliver}, title = {Superimposing Dynamic Range}, doi = {10.25643/bauhaus-universitaet.1379}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20090303-14662}, year = {2008}, abstract = {Replacing a uniform illumination by a high-frequent illumination enhances the contrast of observed and captured images. We modulate spatially and temporally multiplexed (projected) light with reflective or transmissive matter to achieve high dynamic range visualizations of radiological images on printed paper or ePaper, and to boost the optical contrast of images viewed or imaged with light microscopes.}, subject = {Bildverarbeitung}, language = {en} } @unpublished{ZollmannBimber2007, author = {Zollmann, Stefanie and Bimber, Oliver}, title = {Imperceptible Calibration for Radiometric Compensation}, doi = {10.25643/bauhaus-universitaet.809}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8094}, year = {2007}, abstract = {We present a novel multi-step technique for imperceptible geometry and radiometry calibration of projector-camera systems. Our approach can be used to display geometry and color corrected images on non-optimized surfaces at interactive rates while simultaneously performing a series of invisible structured light projections during runtime. It supports disjoint projector-camera configurations, fast and progressive improvements, as well as real-time correction rates of arbitrary graphical content. The calibration is automatically triggered when mis-registrations between camera, projector and surface are detected.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} } @techreport{BrunsBimber2007, author = {Bruns, Erich and Bimber, Oliver}, title = {Adaptive Training of Video Sets for Image Recognition on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.822}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8223}, year = {2007}, abstract = {We present an enhancement towards adaptive video training for PhoneGuide, a digital museum guidance system for ordinary camera-equipped mobile phones. It enables museum visitors to identify exhibits by capturing photos of them. In this article, a combined solution of object recognition and pervasive tracking is extended to a client-server-system for improving data acquisition and for supporting scale-invariant object recognition.}, subject = {Objektverfolgung}, language = {en} } @incollection{Bimber2006, author = {Bimber, Oliver}, title = {Projector-Based Augmentation}, series = {Emerging Technologies of Augmented Reality: Interfaces \& Design}, booktitle = {Emerging Technologies of Augmented Reality: Interfaces \& Design}, doi = {10.25643/bauhaus-universitaet.735}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-7353}, year = {2006}, abstract = {Projector-based augmentation approaches hold the potential of combining the advantages of well-establishes spatial virtual reality and spatial augmented reality. Immersive, semi-immersive and augmented visualizations can be realized in everyday environments - without the need for special projection screens and dedicated display configurations. Limitations of mobile devices, such as low resolution and small field of view, focus constrains, and ergonomic issues can be overcome in many cases by the utilization of projection technology. Thus, applications that do not require mobility can benefit from efficient spatial augmentations. Examples range from edutainment in museums (such as storytelling projections onto natural stone walls in historical buildings) to architectural visualizations (such as augmentations of complex illumination simulations or modified surface materials in real building structures). This chapter describes projector-camera methods and multi-projector techniques that aim at correcting geometric aberrations, compensating local and global radiometric effects, and improving focus properties of images projected onto everyday surfaces.}, subject = {Erweiterte Realit{\"a}t }, language = {en} }