@techreport{BrunsBimber2007, author = {Bruns, Erich and Bimber, Oliver}, title = {Adaptive Training of Video Sets for Image Recognition on Mobile Phones}, doi = {10.25643/bauhaus-universitaet.822}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-8223}, year = {2007}, abstract = {We present an enhancement towards adaptive video training for PhoneGuide, a digital museum guidance system for ordinary camera-equipped mobile phones. It enables museum visitors to identify exhibits by capturing photos of them. In this article, a combined solution of object recognition and pervasive tracking is extended to a client-server-system for improving data acquisition and for supporting scale-invariant object recognition.}, subject = {Objektverfolgung}, language = {en} } @techreport{BrunsBrombachBimber2007, author = {Bruns, Erich and Brombach, Benjamin and Bimber, Oliver}, title = {Mobile Phone Enabled Museum Guidance with Adaptive Classification}, doi = {10.25643/bauhaus-universitaet.940}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-9406}, year = {2007}, abstract = {Although audio guides are widely established in many museums, they suffer from several drawbacks compared to state-of-the-art multimedia technologies: First, they provide only audible information to museum visitors, while other forms of media presentation, such as reading text or video could be beneficial for museum guidance tasks. Second, they are not very intuitive. Reference numbers have to be manually keyed in by the visitor before information about the exhibit is provided. These numbers are either displayed on visible tags that are located near the exhibited objects, or are printed in brochures that have to be carried. Third, offering mobile guidance equipment to visitors leads to acquisition and maintenance costs that have to be covered by the museum. With our project PhoneGuide we aim at solving these problems by enabling the application of conventional camera-equipped mobile phones for museum guidance purposes. The advantages are obvious: First, today's off-the-shelf mobile phones offer a rich pallet of multimedia functionalities ---ranging from audio (over speaker or head-set) and video (graphics, images, movies) to simple tactile feedback (vibration). Second, integrated cameras, improvements in processor performance and more memory space enable supporting advanced computer vision algorithms. Instead of keying in reference numbers, objects can be recognized automatically by taking non-persistent photographs of them. This is more intuitive and saves museum curators from distributing and maintaining a large number of physical (visible or invisible) tags. Together with a few sensor-equipped reference tags only, computer vision based object recognition allows for the classification of single objects; whereas overlapping signal ranges of object-distinct active tags (such as RFID) would prevent the identification of individuals that are grouped closely together. Third, since we assume that museum visitors will be able to use their own devices, the acquisition and maintenance cost for museum-owned devices decreases.}, subject = {Objektverfolgung}, language = {en} } @techreport{BrunsBrombachZeidleretal.2005, author = {Bruns, Erich and Brombach, Benjamin and Zeidler, Thomas and Bimber, Oliver}, title = {Enabling Mobile Phones To Support Large-Scale Museum Guidance}, doi = {10.25643/bauhaus-universitaet.677}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-6777}, year = {2005}, abstract = {We present a museum guidance system called PhoneGuide that uses widespread camera equipped mobile phones for on-device object recognition in combination with pervasive tracking. It provides additional location- and object-aware multimedia content to museum visitors, and is scalable to cover a large number of museum objects.}, subject = {Objektverfolgung}, language = {en} } @techreport{ExnerBrunsKurzetal.2009, author = {Exner, David and Bruns, Erich and Kurz, Daniel and Grundh{\"o}fer, Anselm and Bimber, Oliver}, title = {Fast and Reliable CAMShift Tracking}, organization = {JP AUgmented Reality, Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.1410}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20091217-14962}, year = {2009}, abstract = {CAMShift is a well-established and fundamental algorithm for kernel-based visual object tracking. While it performs well with objects that have a simple and constant appearance, it is not robust in more complex cases. As it solely relies on back projected probabilities it can fail in cases when the object's appearance changes (e.g. due to object or camera movement, or due to lighting changes), when similarly colored objects have to be re-detected or when they cross their trajectories. We propose extensions to CAMShift that address and resolve all of these problems. They allow the accumulation of multiple histograms to model more complex object appearance and the continuous monitoring of object identi- ties to handle ambiguous cases of partial or full occlusion. Most steps of our method are carried out on the GPU for achieving real-time tracking of multiple targets simultaneously. We explain an ecient GPU implementations of histogram generation, probability back projection, im- age moments computations, and histogram intersection. All of these techniques make full use of a GPU's high parallelization.}, subject = {Bildverarbeitung}, language = {en} }