@techreport{GrosseBimber2008, author = {Grosse, Max and Bimber, Oliver}, title = {Coded Aperture Projection}, doi = {10.25643/bauhaus-universitaet.1234}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080227-13020}, year = {2008}, abstract = {In computer vision, optical defocus is often described as convolution with a filter kernel that corresponds to an image of the aperture being used by the imaging device. The degree of defocus correlates to the scale of the kernel. Convolving an image with the inverse aperture kernel will digitally sharpen the image and consequently compensate optical defocus. This is referred to as deconvolution or inverse filtering. In frequency domain, the reciprocal of the filter kernel is its inverse, and deconvolution reduces to a division. Low magnitudes in the Fourier transform of the aperture image, however, lead to intensity values in spatial domain that exceed the displayable range. Therefore, the corresponding frequencies are not considered, which then results in visible ringing artifacts in the final projection. This is the main limitation of previous approaches, since in frequency domain the Gaussian PSF of spherical apertures does contain a large fraction of low Fourier magnitudes. Applying only small kernel scales will reduce the number of low Fourier magnitudes (and consequently the ringing artifacts) -- but will also lead only to minor focus improvements. To overcome this problem, we apply a coded aperture whose Fourier transform has less low magnitudes initially. Consequently, more frequencies are retained and more image details are reconstructed.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} } @techreport{AmanoBimberGrundhoefer2010, author = {Amano, Toshiyuki and Bimber, Oliver and Grundh{\"o}fer, Anselm}, title = {Appearance Enhancement for Visually Impaired with Projector Camera Feedback}, doi = {10.25643/bauhaus-universitaet.1411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20100106-14974}, year = {2010}, abstract = {Visually impaired is a common problem for human life in the world wide. The projector-based AR technique has ability to change appearance of real object, and it can help to improve visibility for visually impaired. We propose a new framework for the appearance enhancement with the projector camera system that employed model predictive controller. This framework enables arbitrary image processing such as photo-retouch software in the real world and it helps to improve visibility for visually impaired. In this article, we show the appearance enhancement result of Peli's method and Wolffshon's method for the low vision, Jefferson's method for color vision deficiencies. Through experiment results, the potential of our method to enhance the appearance for visually impaired was confirmed as same as appearance enhancement for the digital image and television viewing.}, subject = {Maschinelles Sehen}, language = {en} } @techreport{BrunsBimber2007, author = {Bruns, Erich and Bimber, Oliver}, title = {Adaptive Training of Video Sets for Image Recognition on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.822}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8223}, year = {2007}, abstract = {We present an enhancement towards adaptive video training for PhoneGuide, a digital museum guidance system for ordinary camera-equipped mobile phones. It enables museum visitors to identify exhibits by capturing photos of them. In this article, a combined solution of object recognition and pervasive tracking is extended to a client-server-system for improving data acquisition and for supporting scale-invariant object recognition.}, subject = {Objektverfolgung}, language = {en} } @unpublished{WetzsteinBimber2006, author = {Wetzstein, Gordon and Bimber, Oliver}, title = {A Generalized Approach to Radiometric}, doi = {10.25643/bauhaus-universitaet.762}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-7625}, year = {2006}, abstract = {We propose a novel method that applies the light transport matrix for performing an image-based radiometric compensation which accounts for all possible types of light modulation. For practical application the matrix is decomposed into clusters of mutually influencing projector and camera pixels. The compensation is modeled as a linear system that can be solved with respect to the projector patterns. Precomputing the inverse light transport in combination with an efficient implementation on the GPU makes interactive compensation rates possible. Our generalized method unifies existing approaches that address individual problems. Based on examples, we show that it is possible to project corrected images onto complex surfaces such as an inter-reflecting statuette, glossy wallpaper, or through highly-refractive glass. Furthermore, we illustrate that a side-effect of our approach is an increase in the overall sharpness of defocused projections.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} }