@incollection{Bimber2005, author = {Bimber, Oliver}, title = {HOLOGRAPHICS: Combining Holograms with Interactive Computer Graphics}, series = {New Directions in Holography and Speckles}, booktitle = {New Directions in Holography and Speckles}, doi = {10.25643/bauhaus-universitaet.736}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-7365}, year = {2005}, abstract = {Among all imaging techniques that have been invented throughout the last decades, computer graphics is one of the most successful tools today. Many areas in science, entertainment, education, and engineering would be unimaginable without the aid of 2D or 3D computer graphics. The reason for this success story might be its interactivity, which is an important property that is still not provided efficiently by competing technologies - such as holography. While optical holography and digital holography are limited to presenting a non-interactive content, electroholography or computer generated holograms (CGH) facilitate the computer-based generation and display of holograms at interactive rates [2,3,29,30]. Holographic fringes can be computed by either rendering multiple perspective images, then combining them into a stereogram [4], or simulating the optical interference and calculating the interference pattern [5]. Once computed, such a system dynamically visualizes the fringes with a holographic display. Since creating an electrohologram requires processing, transmitting, and storing a massive amount of data, today's computer technology still sets the limits for electroholography. To overcome some of these performance issues, advanced reduction and compression methods have been developed that create truly interactive electroholograms. Unfortunately, most of these holograms are relatively small, low resolution, and cover only a small color spectrum. However, recent advances in consumer graphics hardware may reveal potential acceleration possibilities that can overcome these limitations [6]. In parallel to the development of computer graphics and despite their non-interactivity, optical and digital holography have created new fields, including interferometry, copy protection, data storage, holographic optical elements, and display holograms. Especially display holography has conquered several application domains. Museum exhibits often use optical holograms because they can present 3D objects with almost no loss in visual quality. In contrast to most stereoscopic or autostereoscopic graphics displays, holographic images can provide all depth cues—perspective, binocular disparity, motion parallax, convergence, and accommodation—and theoretically can be viewed simultaneously from an unlimited number of positions. Displaying artifacts virtually removes the need to build physical replicas of the original objects. In addition, optical holograms can be used to make engineering, medical, dental, archaeological, and other recordings—for teaching, training, experimentation and documentation. Archaeologists, for example, use optical holograms to archive and investigate ancient artifacts [7,8]. Scientists can use hologram copies to perform their research without having access to the original artifacts or settling for inaccurate replicas. Optical holograms can store a massive amount of information on a thin holographic emulsion. This technology can record and reconstruct a 3D scene with almost no loss in quality. Natural color holographic silver halide emulsion with grain sizes of 8nm is today's state-of-the-art [14]. Today, computer graphics and raster displays offer a megapixel resolution and the interactive rendering of megabytes of data. Optical holograms, however, provide a terapixel resolution and are able to present an information content in the range of terabytes in real-time. Both are dimensions that will not be reached by computer graphics and conventional displays within the next years - even if Moore's law proves to hold in future. Obviously, one has to make a decision between interactivity and quality when choosing a display technology for a particular application. While some applications require high visual realism and real-time presentation (that cannot be provided by computer graphics), others depend on user interaction (which is not possible with optical and digital holograms). Consequently, holography and computer graphics are being used as tools to solve individual research, engineering, and presentation problems within several domains. Up until today, however, these tools have been applied separately. The intention of the project which is summarized in this chapter is to combine both technologies to create a powerful tool for science, industry and education. This has been referred to as HoloGraphics. Several possibilities have been investigated that allow merging computer generated graphics and holograms [1]. The goal is to combine the advantages of conventional holograms (i.e. extremely high visual quality and realism, support for all depth queues and for multiple observers at no computational cost, space efficiency, etc.) with the advantages of today's computer graphics capabilities (i.e. interactivity, real-time rendering, simulation and animation, stereoscopic and autostereoscopic presentation, etc.). The results of these investigations are presented in this chapter.}, subject = {Erweiterte Realit{\"a}t }, language = {en} } @incollection{Bimber2006, author = {Bimber, Oliver}, title = {Projector-Based Augmentation}, series = {Emerging Technologies of Augmented Reality: Interfaces \& Design}, booktitle = {Emerging Technologies of Augmented Reality: Interfaces \& Design}, doi = {10.25643/bauhaus-universitaet.735}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-7353}, year = {2006}, abstract = {Projector-based augmentation approaches hold the potential of combining the advantages of well-establishes spatial virtual reality and spatial augmented reality. Immersive, semi-immersive and augmented visualizations can be realized in everyday environments - without the need for special projection screens and dedicated display configurations. Limitations of mobile devices, such as low resolution and small field of view, focus constrains, and ergonomic issues can be overcome in many cases by the utilization of projection technology. Thus, applications that do not require mobility can benefit from efficient spatial augmentations. Examples range from edutainment in museums (such as storytelling projections onto natural stone walls in historical buildings) to architectural visualizations (such as augmentations of complex illumination simulations or modified surface materials in real building structures). This chapter describes projector-camera methods and multi-projector techniques that aim at correcting geometric aberrations, compensating local and global radiometric effects, and improving focus properties of images projected onto everyday surfaces.}, subject = {Erweiterte Realit{\"a}t }, language = {en} } @techreport{Bimber2008, author = {Bimber, Oliver}, title = {Superimposing Dynamic Range}, doi = {10.25643/bauhaus-universitaet.1379}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20090303-14662}, year = {2008}, abstract = {Replacing a uniform illumination by a high-frequent illumination enhances the contrast of observed and captured images. We modulate spatially and temporally multiplexed (projected) light with reflective or transmissive matter to achieve high dynamic range visualizations of radiological images on printed paper or ePaper, and to boost the optical contrast of images viewed or imaged with light microscopes.}, subject = {Bildverarbeitung}, language = {en} } @techreport{BimberIwai2008, author = {Bimber, Oliver and Iwai, Daisuke}, title = {Superimposing Dynamic Range}, doi = {10.25643/bauhaus-universitaet.1287}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080422-13585}, year = {2008}, abstract = {We present a simple and cost-efficient way of extending contrast, perceived tonal resolution, and the color space of static hardcopy images, beyond the capabilities of hardcopy devices or low-dynamic range displays alone. A calibrated projector-camera system is applied for automatic registration, scanning and superimposition of hardcopies. We explain how high-dynamic range content can be split for linear devices with different capabilities, how luminance quantization can be optimized with respect to the non-linear response of the human visual system as well as for the discrete nature of the applied modulation devices; and how inverse tone-mapping can be adapted in case only untreated hardcopies and softcopies (such as regular photographs) are available. We believe that our approach has the potential to complement hardcopy-based technologies, such as X-ray prints for filmless imaging, in domains that operate with high quality static image content, like radiology and other medical fields, or astronomy.}, subject = {Bildverarbeitung}, language = {en} } @article{BimberIwai2009, author = {Bimber, Oliver and Iwai, Daisuke}, title = {Superimposing Dynamic Range}, series = {Eurographics 2009}, journal = {Eurographics 2009}, doi = {10.25643/bauhaus-universitaet.1532}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120130-15325}, year = {2009}, abstract = {Replacing a uniform illumination by a high-frequent illumination enhances the contrast of observed and captured images. We modulate spatially and temporally multiplexed (projected) light with reflective or transmissive matter to achieve high dynamic range visualizations of radiological images on printed paper or ePaper, and to boost the optical contrast of images viewed or imaged with light microscopes.}, subject = {CGI }, language = {en} } @techreport{GrosseBimber2008, author = {Grosse, Max and Bimber, Oliver}, title = {Coded Aperture Projection}, doi = {10.25643/bauhaus-universitaet.1234}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080227-13020}, year = {2008}, abstract = {In computer vision, optical defocus is often described as convolution with a filter kernel that corresponds to an image of the aperture being used by the imaging device. The degree of defocus correlates to the scale of the kernel. Convolving an image with the inverse aperture kernel will digitally sharpen the image and consequently compensate optical defocus. This is referred to as deconvolution or inverse filtering. In frequency domain, the reciprocal of the filter kernel is its inverse, and deconvolution reduces to a division. Low magnitudes in the Fourier transform of the aperture image, however, lead to intensity values in spatial domain that exceed the displayable range. Therefore, the corresponding frequencies are not considered, which then results in visible ringing artifacts in the final projection. This is the main limitation of previous approaches, since in frequency domain the Gaussian PSF of spherical apertures does contain a large fraction of low Fourier magnitudes. Applying only small kernel scales will reduce the number of low Fourier magnitudes (and consequently the ringing artifacts) -- but will also lead only to minor focus improvements. To overcome this problem, we apply a coded aperture whose Fourier transform has less low magnitudes initially. Consequently, more frequencies are retained and more image details are reconstructed.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} } @techreport{GrundhoeferBimber2008, author = {Grundh{\"o}fer, Anselm and Bimber, Oliver}, title = {Dynamic Bluescreens}, doi = {10.25643/bauhaus-universitaet.1233}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080226-13016}, year = {2008}, abstract = {Blue screens and chroma keying technology are essential for digital video composition. Professional studios apply tracking technology to record the camera path for perspective augmentations of the original video footage. Although this technology is well established, it does not offer a great deal of flexibility. For shootings at non-studio sets, physical blue screens might have to be installed, or parts have to be recorded in a studio separately. We present a simple and flexible way of projecting corrected keying colors onto arbitrary diffuse surfaces using synchronized projectors and radiometric compensation. Thereby, the reflectance of the underlying real surface is neutralized. A temporal multiplexing between projection and flash illumination allows capturing the fully lit scene, while still being able to key the foreground objects. In addition, we embed spatial codes into the projected key image to enable the tracking of the camera. Furthermore, the reconstruction of the scene geometry is implicitly supported.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} } @unpublished{GrundhoeferBimber2006, author = {Grundh{\"o}fer, Anselm and Bimber, Oliver}, title = {Real-Time Adaptive Radiometric Compensation}, doi = {10.25643/bauhaus-universitaet.784}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-7848}, year = {2006}, abstract = {Recent radiometric compensation techniques make it possible to project images onto colored and textured surfaces. This is realized with projector-camera systems by scanning the projection surface on a per-pixel basis. With the captured information, a compensation image is calculated that neutralizes geometric distortions and color blending caused by the underlying surface. As a result, the brightness and the contrast of the input image is reduced compared to a conventional projection onto a white canvas. If the input image is not manipulated in its intensities, the compensation image can contain values that are outside the dynamic range of the projector. They will lead to clipping errors and to visible artifacts on the surface. In this article, we present a novel algorithm that dynamically adjusts the content of the input images before radiometric compensation is carried out. This reduces the perceived visual artifacts while simultaneously preserving a maximum of luminance and contrast. The algorithm is implemented entirely on the GPU and is the first of its kind to run in real-time.}, subject = {Maschinelles Sehen}, language = {en} } @article{GrundhoeferSeegerHaentschetal.2007, author = {Grundh{\"o}fer, Anselm and Seeger, Manja and H{\"a}ntsch, Ferry and Bimber, Oliver}, title = {Coded Projection and Illumination for Television Studios}, organization = {Bimber, Fak. M, BUW}, doi = {10.25643/bauhaus-universitaet.800}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8005}, year = {2007}, abstract = {We propose the application of temporally and spatially coded projection and illumination in modern television studios. In our vision, this supports ad-hoc re-illumination, automatic keying, unconstrained presentation of moderation information, camera-tracking, and scene acquisition. In this paper we show how a new adaptive imperceptible pattern projection that considers parameters of human visual perception, linked with real-time difference keying enables an in-shot optical tracking using a novel dynamic multi-resolution marker technique}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} } @techreport{GrundhoeferSeegerHaentschetal.2007, author = {Grundh{\"o}fer, Anselm and Seeger, Manja and H{\"a}ntsch, Ferry and Bimber, Oliver}, title = {Dynamic Adaptation of Projected Imperceptible Codes}, doi = {10.25643/bauhaus-universitaet.816}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8168}, year = {2007}, abstract = {In this paper we present a novel adaptive imperceptible pattern projection technique that considers parameters of human visual perception. A coded image that is invisible for human observers is temporally integrated into the projected image, but can be reconstructed by a synchronized camera. The embedded code is dynamically adjusted on the fly to guarantee its non-perceivability and to adapt it to the current camera pose. Linked with real-time flash keying, for instance, this enables in-shot optical tracking using a dynamic multi-resolution marker technique. A sample prototype is realized that demonstrates the application of our method in the context of augmentations in television studios.}, subject = {Association for Computing Machinery / Special Interest Group on Graphics}, language = {en} }