@inproceedings{AbuAbedMilbradt, author = {Abu Abed, Wassim and Milbradt, Peter}, title = {UNDERSTANDING THE ASPECT OF FUZZINESS IN INTERPOLATION METHODS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2872}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28726}, pages = {22}, abstract = {Fuzzy functions are suitable to deal with uncertainties and fuzziness in a closed form maintaining the informational content. This paper tries to understand, elaborate, and explain the problem of interpolating crisp and fuzzy data using continuous fuzzy valued functions. Two main issues are addressed here. The first covers how the fuzziness, induced by the reduction and deficit of information i.e. the discontinuity of the interpolated points, can be evaluated considering the used interpolation method and the density of the data. The second issue deals with the need to differentiate between impreciseness and hence fuzziness only in the interpolated quantity, impreciseness only in the location of the interpolated points and impreciseness in both the quantity and the location. In this paper, a brief background of the concept of fuzzy numbers and of fuzzy functions is presented. The numerical side of computing with fuzzy numbers is concisely demonstrated. The problem of fuzzy polynomial interpolation, the interpolation on meshes and mesh free fuzzy interpolation is investigated. The integration of the previously noted uncertainty into a coherent fuzzy valued function is discussed. Several sets of artificial and original measured data are used to examine the mentioned fuzzy interpolations.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{AhmedElSayedRashwanKamal, author = {Ahmed El-Sayed, Ahmed and Rashwan, R. A. and Kamal, A.}, title = {HADAMARD GAPS IN WEIGHTED LOGARITHMIC BLOCH SPACE}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2827}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28275}, pages = {20}, abstract = {We give a sufficient and a necessary condition for an analytic function "f" on the unit disk "D" with Hadamard gap to belong to a class of weighted logarithmic Bloch space as well as to the corresponding little weighted logarithmic Bloch space under some conditions posed on the defined weight function. Also, we study the relations between the class of weighted logarithmic Bloch functions and some other classes of analytic functions by the help of analytic functions in the Hadamard gap class.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{AibaMaegaitoSuzuki, author = {Aiba, Yoshihisa and Maegaito, Kentaro and Suzuki, Osamu}, title = {Iteration dynamical systems of discrete Laplacians on the plane lattice(I) (Basic properties and computer simulations of the dynamical systems)}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2917}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29178}, pages = {3}, abstract = {In this study we introduce a concept of discrete Laplacian on the plane lattice and consider its iteration dynamical system. At first we discuss some basic properties on the dynamical system to be proved. Next making their computer simulations, we show that we can realize the following phenomena quite well:(1) The crystal of waters (2) The designs of carpets, embroideries (3) The time change of the numbers of families of extinct animals, and (4) The echo systems of life things. Hence we may expect that we can understand the evolutions and self organizations by use of the dynamical systems. Here we want to make a stress on the following fact: Although several well known chaotic dynamical systems can describe chaotic phenomena, they have difficulties in the descriptions of the evolutions and self organizations.}, subject = {Architektur }, language = {en} } @inproceedings{AlYasiriGuerlebeck, author = {Al-Yasiri, Zainab and G{\"u}rlebeck, Klaus}, title = {ON BOUNDARY VALUE PROBLEMS FOR P-LAPLACE AND P-DIRAC EQUATIONS}, series = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, booktitle = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, editor = {G{\"u}rlebeck, Klaus and Lahmer, Tom}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2792}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-27928}, pages = {8}, abstract = {The p-Laplace equation is a nonlinear generalization of the Laplace equation. This generalization is often used as a model problem for special types of nonlinearities. The p-Laplace equation can be seen as a bridge between very general nonlinear equations and the linear Laplace equation. The aim of this paper is to solve the p-Laplace equation for 2 < p < 3 and to find strong solutions. The idea is to apply a hypercomplex integral operator and spatial function theoretic methods to transform the p-Laplace equation into the p-Dirac equation. This equation will be solved iteratively by using a fixed point theorem.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{BaitschHartmann, author = {Baitsch, Matthias and Hartmann, Dietrich}, title = {A FRAMEWORK FOR THE INTERACTIVE VISUALIZATION OF ENGINEERING MODELS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2919}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29194}, pages = {9}, abstract = {Interactive visualization based on 3D computer graphics nowadays is an indispensable part of any simulation software used in engineering. Nevertheless, the implementation of such visualization software components is often avoided in research projects because it is a challenging and potentially time consuming task. In this contribution, a novel Java framework for the interactive visualization of engineering models is introduced. It supports the task of implementing engineering visualization software by providing adequate program logic as well as high level classes for the visual representation of entities typical for engineering models. The presented framework is built on top of the open source visualization toolkit VTK. In VTK, a visualization model is established by connecting several filter objects in a so called visualization pipeline. Although designing and implementing a good pipeline layout is demanding, VTK does not support the reuse of pipeline layouts directly. Our framework tailors VTK to engineering applications on two levels. On the first level it adds new - engineering model specific - filter classes to VTK. On the second level, ready made pipeline layouts for certain aspects of engineering models are provided. For instance there is a pipeline class for one-dimensional elements like trusses and beams that is capable of showing the elements along with deformations and member forces. In order to facilitate the implementation of a graphical user interface (GUI) for each pipeline class, there exists a reusable Java Swing GUI component that allows the user to configure the appearance of the visualization model. Because of the flexible structure, the framework can be easily adapted and extended to new problem domains. Currently it is used in (i) an object-oriented p-version finite element code for design optimization, (ii) an agent based monitoring system for dam structures and (iii) the simulation of destruction processes by controlled explosives based on multibody dynamics. Application examples from all three domains illustrates that the approach presented is powerful as well as versatile.}, subject = {Architektur }, language = {en} } @inproceedings{BartelsZimmermann, author = {Bartels, Jan-Hendrik and Zimmermann, J{\"u}rgen}, title = {MINIMIZING THE TOTAL DISCOUNTED COST OF DISMANTLING A NUCLEAR POWER PLANT}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2920}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29200}, pages = {9}, abstract = {Due to economical, technical or political reasons all over the world about 100 nuclear power plants have been disconnected until today. All these power stations are still waiting for their complete dismantling which, considering one reactor, causes cost of up to one Bil. Euros and lasts up to 15 years. In our contribution we present a resource-constrained project scheduling approach minimizing the total discounted cost of dismantling a nuclear power plant. A project of dismantling a nuclear power plant can be subdivided into a number of disassembling activities. The execution of these activities requires time and scarce resources like manpower, special equipment or storage facilities for the contaminated material arising from the dismantling. Moreover, we have to regard several minimum and maximum time lags (temporal constraints) between the start times of the different activities. Finally, each disassembling activity can be processed in two alternative execution modes, which lead to different disbursements and determine the resource requirements of the considered activity. The optimization problem is to determine a start time and an execution mode for each activity, such that the discounted cost of the project is minimum, and neither the temporal constraints are violated nor the activities' resource requirements exceed the availability of any scarce resource at any point in time. In our contribution we introduce an appropriate multi-mode project scheduling model with minimum and maximum time lags as well as renewable and cumulative resources for the described optimization problem. Furthermore, we show that the considered optimization problem is NP-hard in the strong sense. For small problem instances, optimal solutions can be gained from a relaxation based enumeration approach which is incorporated into a branch and bound algorithm. In order to be able to solve large problem instances, we also propose a truncated version of the devised branch and bound algorithm.}, subject = {Architektur }, language = {en} } @inproceedings{BauerDudekRichter, author = {Bauer, Marek and Dudek, Mariusz and Richter, Matthias}, title = {RELIABILITY OF TRAM - NETWORK SECTION}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2828}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28281}, pages = {16}, abstract = {We investigate aspects of tram-network section reliability, which operates as a part of the model of whole city tram-network reliability. Here, one of the main points of interest is the character of the chronological development of the disturbances (namely the differences between time of departure provided in schedule and real time of departure) on subsequent sections during tram line operation. These developments were observed in comprehensive measurements done in Krakow, during one of the main transportation nodes (Rondo Mogilskie) rebuilding. All taken building activities cause big disturbances in tram lines operation with effects extended to neighboring sections. In a second part, the stochastic character of section running time will be analyzed more detailed. There will be taken into consideration sections with only one beginning stop and also with two or three beginning stops located at different streets at an intersection. Possibility of adding results from sections with two beginning stops to one set will be checked with suitable statistical tests which are used to compare the means of the two samples. Section running time may depend on the value of gap between two following trams and from the value of deviation from schedule. This dependence will be described by a multi regression formula. The main measurements were done in the city center of Krakow in two stages: before and after big changes in tramway infrastructure.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{BauerKandlerWeiss, author = {Bauer, Marek and Kandler, A. and Weiß, Hendrik}, title = {MODEL OF TRAM LINE OPERATION}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2921}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29217}, pages = {11}, abstract = {From passenger's perspective punctuality is one of the most important features of trams operations. Unfortunately in most cases this feature is only insufficiently fulfilled. In this paper we present a simulation model for trams operation with special focus on punctuality. The aim is to get a helpful tool for designing time-tables and for analyzing the effects by changing priorities for trams in traffic lights respectively the kind of track separation. A realization of trams operations is assumed to be a sequence of running times between successive stops and times spent by tram at the stops. In this paper the running time is modeled by the sum of its mean value and a zero-mean random variable. With the help of multiple regression we find out that the average running time is a function depending on the length of the sections and the number of intersections. The random component is modeled by a sum of two independent zero-mean random variables. One of these variables describes the disturbance caused by the process of waiting at an intersection and the other the disturbance caused by the process of driving. The time spent at a stop is assumed to be a random variable, too. Its distribution is estimated from given measurements of these stop times for different tram lines in Krak{\´o}w. Finally a special case of the introduced model is considered and numerical results are presented. This paper is involved with CIVITAS-CARAVEL project: "Clean and better transport in cites". The project has received research funding from the Community's Sixth Framework Programme. The paper reflects only the author's views and the Community is not liable for any use that may be made of the information contained therein.}, subject = {Architektur }, language = {en} } @inproceedings{BauerRichter, author = {Bauer, Marek and Richter, Matthias}, title = {STATISTICAL ANALYSIS OF TIME LOST BY TRAMS BEFORE DEPARTURE FROM STOPS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2922}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29226}, pages = {18}, abstract = {The ride of the tram along the line, defined by a time-table, consists of the travel time between the subsequent sections and the time spent by tram on the stops. In the paper, statistical data collected in the city of Krakow is presented and evaluated. In polish conditions, for trams the time spent on stops makes up the remarkable amount of 30 \% of the total time of tram line operation. Moreover, this time is characterized by large variability. The time spent by tram on a stop consists of alighting and boarding time and time lost by tram on stop after alighting and boarding time ending, but before departure. Alighting and boarding time itself usually depends on the random number of alighting and boarding passengers and also on the number of passengers which are inside the vehicle. However, the time spent by tram on stop after alighting and boarding time ending is an effect of certain random events, mainly because of impossibility of departure from stop, caused by lack of priorities for public transport vehicles. The main focus of the talk lies on the description and the modelling of these effects. This paper is involved with CIVITAS-CARAVEL project: "Clean and better transport in cites". The project has received research funding from the Community's Sixth Framework Programme. The paper reflects only the author's views and the Community is not liable for any use that may be made of the information contained therein.}, subject = {Architektur }, language = {en} } @inproceedings{BauerRichterWeiss, author = {Bauer, Marek and Richter, Matthias and Weiß, Hendrik}, title = {SIMULATION MODEL OF TRAM ROUTE OPERATION}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2829}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28295}, pages = {19}, abstract = {From passenger's perspective, punctuality is one of the most important features of tram route operation. We present a stochastic simulation model with special focus on determining important factors of influence. The statistical analysis bases on large samples (sample size is nearly 2000) accumulated from comprehensive measurements on eight tram routes in Cracow. For the simulation, we are not only interested in average values but also in stochastic characteristics like the variance and other properties of the distribution. A realization of trams operations is assumed to be a sequence of running times between successive stops and times spent by tram at the stops divided in passengers alighting and boarding times and times waiting for possibility of departure . The running time depends on the kind of track separation including the priorities in traffic lights, the length of the section and the number of intersections. For every type of section, a linear mixed regression model describes the average running time and its variance as functions of the length of the section and the number of intersections. The regression coefficients are estimated by the iterative re-weighted least square method. Alighting and boarding time mainly depends on type of vehicle, number of passengers alighting and boarding and occupancy of vehicle. For the distribution of the time waiting for possibility of departure suitable distributions like Gamma distribution and Lognormal distribution are fitted.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{BeranDlask, author = {Beran, V{\´a}clav and Dlask, Petr}, title = {CONSTRUCTION SPEED AND CASH FLOW OPTIMISATION}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2926}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29269}, pages = {10}, abstract = {Practical examples show that the improvement in cost flow and total amount of money spend in construction and further use may be cut significantly. The calculation is based on spreadsheets calculation, very easy to develop on most PC´s now a days. Construction works, are a field where the evaluation of Cash Flow can be and should be applied. Decisions about cash flow in construction are decisions with long-term impact and long-term memory. Mistakes from the distant past have a massive impact on situations in the present and into the far economic future of economic activities. Two approaches exist. The Just-in-Time (JIT) approach and life cycle costs (LCC) approach. The calculation example shows the dynamic results for the production speed in opposition to stable flow of production in duration of activities. More sophisticated rescheduling in optimal solution might bring in return extra profit. In the technologies and organizational processes for industrial buildings, railways and road reconstruction, public utilities and housing developments there are assembly procedures that are very appropriate for the given purpose, complicated research-, development-, innovation-projects are all very good aspects of these kinds of applications. The investors of large investments and all public invested money may be spent more efficiently if an optimisation speed-strategy can be calculated.}, subject = {Architektur }, language = {en} } @inproceedings{BeranHromada, author = {Beran, V{\´a}clav and Hromada, E.}, title = {SOFTWARE FOR PROJECT RELIABILITY ESTIMATION AND RISK EVALUATION}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2925}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29255}, pages = {16}, abstract = {The contribution presents a model that is able to simulate construction duration and cost for a building project. This model predicts set of expected project costs and duration schedule depending on input parameters such as production speed, scope of work, time schedule, bonding conditions and maximum and minimum deviations from scope of work and production speed. The simulation model is able to calculate, on the basis of input level of probability, the adequate construction cost and time duration of a project. The reciprocal view attends to finding out the adequate level of probability for construction cost and activity durations. Among interpretive outputs of the application software belongs the compilation of a presumed dynamic progress chart. This progress chart represents the expected scenario of development of a building project with the mapping of potential time dislocations for particular activities. The calculation of a presumed dynamic progress chart is based on an algorithm, which calculates mean values as a partial result of the simulated building project. Construction cost and time models are, in many ways, useful tools in project management. Clients are able to make proper decisions about the time and cost schedules of their investments. Consequently, building contractors are able to schedule predicted project cost and duration before any decision is finalized.}, subject = {Architektur }, language = {en} } @inproceedings{BertholdMilbradt, author = {Berthold, Tim and Milbradt, Peter}, title = {ARTIFICIAL NEURONAL NETWORKS IN ENVIRONMENTAL ENGINEERING: THEORY AND APPLICATIONS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2830}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28304}, pages = {14}, abstract = {Models in the context of engineering can be classified in process based and data based models. Whereas the process based model describes the problem by an explicit formulation, the data based model is often used, where no such mapping can be found due to the high complexity of the problem. Artificial Neuronal Networks (ANN) is a data based model, which is able to "learn" a mapping from a set of training patterns. This paper deals with the application of ANN in time dependent bathymetric models. A bathymetric model is a geometric representation of the sea bed. Typically, a bathymetry is been measured and afterwards described by a finite set of measured data. Measuring at different time steps leads to a time dependent bathymetric model. To obtain a continuous surface, the measured data has to be interpolated by some interpolation method. Unlike the explicitly given interpolation methods, the presented time dependent bathymetric model using an ANN trains the approximated surface in space and time in an implicit way. The ANN is trained by topographic measured data, which consists of the location (x,y) and time t. In other words the ANN is trained to reproduce the mapping h = f(x,y,t) and afterwards it is able to approximate the topographic height for a given location and date. In a further step, this model is extended to take meteorological parameters into account. This leads to a model of more predictive character.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{Bilchuk, author = {Bilchuk, Irina}, title = {GEOMETRIC IDENTIFICATION OF OBJECTS IN CIVIL ENGINEERING APPLICATIONS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2927}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29274}, pages = {21}, abstract = {Objects for civil engineering applications can be identified with their reference in memory, their alpha-numeric name or their geometric location. Particularly in graphic user interfaces, it is common to identify objects geometrically by selection with the mouse. As the number of geometric objects in a graphic user interface grows, it becomes increasingly more important to treat the basic operations add, search and remove for geometric objects with great efficiency. Guttmann has proposed the Region-Tree (R-tree) for geometric identification in an environment which uses pages on disc as data structure. Minimal bounding rectangles are used to structure the data in such a way that neighborhood relations can be described effectively. The literature shows that the parameters which influence the efficiency of the R-trees have been studied extensively, but without conclusive results. The goal of the research which is reported in this paper is to determine reliably the parameters which significantly influence the efficiency of R-trees for geometric identification in technical drawings. In order to make this investigation conclusive, it must be performed with the best available software technology. Therefore an object-oriented software for the method is developed. This implementation is tested with technical drawings containing many thousands of geometric objects. These drawings are created automatically by a stochastic generator which is incorporated into a test bed consisting of an editor and a visualisor. This test bed is used to obtain statistics for the main factors which affect the efficiency of R-trees. The investigation shows that the following main factors which affect the efficiency can be identified reliably : number of geometric objects on the drawing the minimum und maximum number of children of a node of the tree the maximum width and height of the minimal bounding rectangles of the geometric objects relative to the size of the drawing.}, subject = {Architektur }, language = {en} } @inproceedings{BrackxDeKnockDeSchepper, author = {Brackx, Fred and De Knock, B. and De Schepper, Hennie}, title = {A MULTI--DIMENSIONAL HILBERT TRANSFORM IN ANISOTROPIC CLIFFORD ANALYSIS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2929}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29297}, pages = {15}, abstract = {In earlier research, generalized multidimensional Hilbert transforms have been constructed in m-dimensional Euclidean space, in the framework of Clifford analysis. Clifford analysis, centred around the notion of monogenic functions, may be regarded as a direct and elegant generalization to higher dimension of the theory of the holomorphic functions in the complex plane. The considered Hilbert transforms, usually obtained as a part of the boundary value of an associated Cauchy transform in m+1 dimensions, might be characterized as isotropic, since the metric in the underlying space is the standard Euclidean one. In this paper we adopt the idea of a so-called anisotropic Clifford setting, which leads to the introduction of a metric dependent m-dimensional Hilbert transform, showing, at least formally, the same properties as the isotropic one. The Hilbert transform being an important tool in signal analysis, this metric dependent setting has the advantage of allowing the adjustment of the co-ordinate system to possible preferential directions in the signals to be analyzed. A striking result to be mentioned is that the associated anisotropic (m+1)-dimensional Cauchy transform is no longer uniquely determined, but may stem from a diversity of (m+1)-dimensional "mother" metrics.}, subject = {Architektur }, language = {en} } @inproceedings{BrackxDeSchepperDeSchepperetal., author = {Brackx, Fred and De Schepper, Hennie and De Schepper, Nele and Sommen, Frank}, title = {HERMITIAN CLIFFORD-HERMITE WAVELETS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2931}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29313}, pages = {13}, abstract = {The one-dimensional continuous wavelet transform is a successful tool for signal and image analysis, with applications in physics and engineering. Clifford analysis offers an appropriate framework for taking wavelets to higher dimension. In the usual orthogonal case Clifford analysis focusses on monogenic functions, i.e. null solutions of the rotation invariant vector valued Dirac operator ∂, defined in terms of an orthogonal basis for the quadratic space Rm underlying the construction of the Clifford algebra R0,m. An intrinsic feature of this function theory is that it encompasses all dimensions at once, as opposed to a tensorial approach with products of one-dimensional phenomena. This has allowed for a very specific construction of higher dimensional wavelets and the development of the corresponding theory, based on generalizations of classical orthogonal polynomials on the real line, such as the radial Clifford-Hermite polynomials introduced by Sommen. In this paper, we pass to the Hermitian Clifford setting, i.e. we let the same set of generators produce the complex Clifford algebra C2n (with even dimension), which we equip with a Hermitian conjugation and a Hermitian inner product. Hermitian Clifford analysis then focusses on the null solutions of two mutually conjugate Hermitian Dirac operators which are invariant under the action of the unitary group. In this setting we construct new Clifford-Hermite polynomials, starting in a natural way from a Rodrigues formula which now involves both Dirac operators mentioned. Due to the specific features of the Hermitian setting, four different types of polynomials are obtained, two types of even degree and two types of odd degree. These polynomials are used to introduce a new continuous wavelet transform, after thorough investigation of all necessary properties of the involved polynomials, the mother wavelet and the associated family of wavelet kernels.}, subject = {Architektur }, language = {en} } @inproceedings{BrackxDeSchepperLunaElizararrasetal., author = {Brackx, Fred and De Schepper, Hennie and Luna-Elizararras, Maria Elena and Shapiro, Michael}, title = {INTEGRAL REPRESENTATIONS IN HERMITEAN CLIFFORD ANALYSIS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2832}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28326}, pages = {13}, abstract = {Euclidean Clifford analysis is a higher dimensional function theory offering a refinement of classical harmonic analysis. The theory is centered around the concept of monogenic functions, i.e. null solutions of a first order vector valued rotation invariant differential operator called the Dirac operator, which factorizes the Laplacian. More recently, Hermitean Clifford analysis has emerged as a new and successful branch of Clifford analysis, offering yet a refinement of the Euclidean case; it focusses on the simultaneous null solutions, called Hermitean (or h-) monogenic functions, of two Hermitean Dirac operators which are invariant under the action of the unitary group. In Euclidean Clifford analysis, the Clifford-Cauchy integral formula has proven to be a corner stone of the function theory, as is the case for the traditional Cauchy formula for holomorphic functions in the complex plane. Previously, a Hermitean Clifford-Cauchy integral formula has been established by means of a matrix approach. This formula reduces to the traditional Martinelli-Bochner formula for holomorphic functions of several complex variables when taking functions with values in an appropriate part of complex spinor space. This means that the theory of Hermitean monogenic functions should encompass also other results of several variable complex analysis as special cases. At present we will elaborate further on the obtained results and refine them, considering fundamental solutions, Borel-Pompeiu representations and the Teoderescu inversion, each of them being developed at different levels, including the global level, handling vector variables, vector differential operators and the Clifford geometric product as well as the blade level were variables and differential operators act by means of the dot and wedge products. A rich world of results reveals itself, indeed including well-known formulae from the theory of several complex variables.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{BrackxDeSchepperSommen, author = {Brackx, Fred and De Schepper, Nele and Sommen, Frank}, title = {Clifford-Hermite and Two-Dimensional Clifford-Gabor Filters For Early Vision}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2930}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29303}, pages = {22}, abstract = {Image processing has been much inspired by the human vision, in particular with regard to early vision. The latter refers to the earliest stage of visual processing responsible for the measurement of local structures such as points, lines, edges and textures in order to facilitate subsequent interpretation of these structures in higher stages (known as high level vision) of the human visual system. This low level visual computation is carried out by cells of the primary visual cortex. The receptive field profiles of these cells can be interpreted as the impulse responses of the cells, which are then considered as filters. According to the Gaussian derivative theory, the receptive field profiles of the human visual system can be approximated quite well by derivatives of Gaussians. Two mathematical models suggested for these receptive field profiles are on the one hand the Gabor model and on the other hand the Hermite model which is based on analysis filters of the Hermite transform. The Hermite filters are derivatives of Gaussians, while Gabor filters, which are defined as harmonic modulations of Gaussians, provide a good approximation to these derivatives. It is important to note that, even if the Gabor model is more widely used than the Hermite model, the latter offers some advantages like being an orthogonal basis and having better match to experimental physiological data. In our earlier research both filter models, Gabor and Hermite, have been developed in the framework of Clifford analysis. Clifford analysis offers a direct, elegant and powerful generalization to higher dimension of the theory of holomorphic functions in the complex plane. In this paper we expose the construction of the Hermite and Gabor filters, both in the classical and in the Clifford analysis framework. We also generalize the concept of complex Gaussian derivative filters to the Clifford analysis setting. Moreover, we present further properties of the Clifford-Gabor filters, such as their relationship with other types of Gabor filters and their localization in the spatial and in the frequency domain formalized by the uncertainty principle.}, subject = {Architektur }, language = {en} } @inproceedings{BultheelJansenMaesetal., author = {Bultheel, Adhemar and Jansen, M. and Maes, J. and Van Aerschot, W. and Vanraes, E.}, title = {SUBDIVIDE AND CONQUER RESOLUTION}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2909}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29091}, pages = {47}, abstract = {This contribution will be freewheeling in the domain of signal, image and surface processing and touch briefly upon some topics that have been close to the heart of people in our research group. A lot of the research of the last 20 years in this domain that has been carried out world wide is dealing with multiresolution. Multiresolution allows to represent a function (in the broadest sense) at different levels of detail. This was not only applied in signals and images but also when solving all kinds of complex numerical problems. Since wavelets came into play in the 1980's, this idea was applied and generalized by many researchers. Therefore we use this as the central idea throughout this text. Wavelets, subdivision and hierarchical bases are the appropriate tools to obtain these multiresolution effects. We shall introduce some of the concepts in a rather informal way and show that the same concepts will work in one, two and three dimensions. The applications in the three cases are however quite different, and thus one wants to achieve very different goals when dealing with signals, images or surfaces. Because completeness in our treatment is impossible, we have chosen to describe two case studies after introducing some concepts in signal processing. These case studies are still the subject of current research. The first one attempts to solve a problem in image processing: how to approximate an edge in an image efficiently by subdivision. The method is based on normal offsets. The second case is the use of Powell-Sabin splines to give a smooth multiresolution representation of a surface. In this context we also illustrate the general method of construction of a spline wavelet basis using a lifting scheme.}, subject = {Architektur }, language = {en} } @inproceedings{CacaoConstalesKrausshar, author = {Cacao, Isabel and Constales, Denis and Kraußhar, Rolf S{\"o}ren}, title = {A UNIFIED APPROACH FOR THE TREATMENT OF SOME HIGHER DIMENSIONAL DIRAC TYPE EQUATIONS ON SPHERES}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2834}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28343}, pages = {8}, abstract = {Using Clifford analysis methods, we provide a unified approach to obtain explicit solutions of some partial differential equations combining the n-dimensional Dirac and Euler operators, including generalizations of the classical time-harmonic Maxwell equations. The obtained regular solutions show strong connections between hypergeometric functions and homogeneous polynomials in the kernel of the Dirac operator.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{CacaoConstalesKrausshar, author = {Cacao, Isabel and Constales, Denis and Kraußhar, Rolf S{\"o}ren}, title = {BESSEL FUNCTIONS AND HIGHER DIMENSIONAL DIRAC TYPE EQUATIONS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2936}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29366}, pages = {8}, abstract = {In this paper we study the structure of the solutions to higher dimensional Dirac type equations generalizing the known λ-hyperholomorphic functions, where λ is a complex parameter. The structure of the solutions to the system of partial differential equations (D- λ) f=0 show a close connection with Bessel functions of first kind with complex argument. The more general system of partial differential equations that is considered in this paper combines Dirac and Euler operators and emphasizes the role of the Bessel functions. However, contrary to the simplest case, one gets now Bessel functions of any arbitrary complex order.}, subject = {Architektur }, language = {en} } @inproceedings{CastilloPerez, author = {Castillo-P{\´e}rez, Ra{\´u}l}, title = {AN APPLICATION OF FORMAL POWER SERIES FOR THE DEVELOPMENT OF OPTICAL FILTERS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2835}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28354}, pages = {8}, abstract = {The application of a recent method using formal power series is proposed. It is based on a new representation for solutions of Sturm-Liouville equations. This method is used to calculate the transmittance and reflectance coefficients of finite inhomogeneous layers with high accuracy and efficiency. Tailoring the refraction index profile defining the inhomogeneous media it is possible to develop very important applications such as optical filters. A number of profiles were evaluated and then some of them selected in order to perform an improvement of their characteristics via the modification of their profiles.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{CastilloPerezCedilloDiazKravchenkoetal., author = {Castillo-P{\´e}rez, Ra{\´u}l and Cedillo - D{\´i}az, A. del C. and Kravchenko, Vladislav and Oviedo - Galdeano, H.}, title = {COMPUTATION OF THE REFLECTANCE AND TRANSMITTANCE FOR AN INHOMOGENEOUS LAYERED MEDIUM WITH TURNING POINT S USING THE WKB AND SPPS METHODS}, series = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 04 - 06 2012, Bauhaus-University Weimar}, booktitle = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 04 - 06 2012, Bauhaus-University Weimar}, editor = {G{\"u}rlebeck, Klaus and Lahmer, Tom and Werner, Frank}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2759}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170306-27598}, pages = {16}, abstract = {Electromagnetic wave propagation is currently present in the vast majority of situations which occur in veryday life, whether in mobile communications, DTV, satellite tracking, broadcasting, etc. Because of this the study of increasingly complex means of propagation of lectromagnetic waves has become necessary in order to optimize resources and increase the capabilities of the devices as required by the growing demand for such services. Within the electromagnetic wave propagation different parameters are considered that characterize it under various circumstances and of particular importance are the reflectance and transmittance. There are several methods or the analysis of the reflectance and transmittance such as the method of approximation by boundary condition, the plane wave expansion method (PWE), etc., but this work focuses on the WKB and SPPS methods. The implementation of the WKB method is relatively simple but is found to be relatively efficient only when working at high frequencies. The SPPS method (Spectral Parameter Powers Series) based on the theory of pseudoanalytic functions, is used to solve this problem through a new representation for solutions of Sturm Liouville equations and has recently proven to be a powerful tool to solve different boundary value and eigenvalue problems. Moreover, it has a very suitable structure for numerical implementation, which in this case took place in the Matlab software for the valuation of both conventional and turning points profiles. The comparison between the two methods allows us to obtain valuable information about their perfor mance which is useful for determining the validity and propriety of their application for solving problems where these parameters are calculated in real life applications.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{ChangChang, author = {Chang, Wei-Tsang and Chang, Teng-Wen}, title = {TIME-BASED FORM TRANSFORMATION WITH FOLDING SPACE}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2937}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29371}, pages = {10}, abstract = {Design activity could be treated as state transition computationally. In stepwise processing, in-between form-states are not easily observed. However, in this research time-based concept is introduced and applied in order to bridge the gap. In architecture, folding is one method of form manipulation and architects also want to search for alternatives by this operation. Besides, folding operation has to be defined and parameterized before time factor is involved as a variable of folding. As a result, time-based transformation provides sequential form states and redirects design activity.}, subject = {Architektur }, language = {en} } @inproceedings{ChudobaScholzenHegger, author = {Chudoba, Rostislav and Scholzen, A. and Hegger, Josef}, title = {MICROPLANE MODEL WITH INITIAL AND DAMAGE-INDUCED ANISOTROPY APPLIED TO TEXTILE-REINFORCED CONCRETE}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2836}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28367}, pages = {8}, abstract = {The presented material model reproduces the anisotropic characteristics of textile reinforced concrete in a smeared manner. This includes both the initial anisotropy introduced by the textile reinforcement, as well as the anisotropic damage evolution reflecting fine patterns of crack bridges. The model is based on the microplane approach. The direction-dependent representation of the material structure into oriented microplanes provides a flexible way to introduce the initial anisotropy. The microplanes oriented in a yarn direction are associated with modified damage laws that reflect the tension-stiffening effect due to the multiple cracking of the matrix along the yarn.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{ConstalesKrausshar, author = {Constales, Denis and Kraußhar, Rolf S{\"o}ren}, title = {ON THE KLEIN-GORDON EQUATION ON THE 3-TORUS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2863}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28639}, pages = {10}, abstract = {In this paper we consider the time independent Klein-Gordon equation on some conformally flat 3-tori with given boundary data. We set up an explicit formula for the fundamental solution. We show that we can represent any solution to the homogeneous Klein-Gordon equation on the torus as finite sum over generalized 3-fold periodic elliptic functions that are in the kernel of the Klein-Gordon operator. Furthermore we prove Cauchy and Green type integral formulas and set up a Teodorescu and Cauchy transform for the toroidal Klein-Gordon operator. These in turn are used to set up explicit formulas for the solution to the inhomogeneous version of the Klein-Gordon equation on the 3-torus.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{ConstalesKrausshar, author = {Constales, Denis and Kraußhar, Rolf S{\"o}ren}, title = {ON THE NAVIER-STOKES EQUATION WITH FREE CONVECTION IN STRIP DOMAINS AND 3D TRIANGULAR CHANNELS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2938}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29387}, pages = {12}, abstract = {The Navier-Stokes equations and related ones can be treated very elegantly with the quaternionic operator calculus developed in a series of works by K. Guerlebeck, W. Sproeossig and others. This study will be extended in this paper. In order to apply the quaternionic operator calculus to solve these types of boundary value problems fully explicitly, one basically needs to evaluate two types of integral operators: the Teodorescu operator and the quaternionic Bergman projector. While the integral kernel of the Teodorescu transform is universal for all domains, the kernel function of the Bergman projector, called the Bergman kernel, depends on the geometry of the domain. With special variants of quaternionic holomorphic multiperiodic functions we obtain explicit formulas for three dimensional parallel plate channels, rectangular block domains and regular triangular channels. The explicit knowledge of the integral kernels makes it then possible to evaluate the operator equations in order to determine the solutions of the boundary value problem explicitly.}, subject = {Architektur }, language = {en} } @inproceedings{CruzFalcaoMalonek, author = {Cruz, J. F. and Falc{\~a}o, M. Irene and Malonek, Helmuth Robert}, title = {3D-MAPPINGS AND THEIR APPROXIMATION BY SERIES OF POWERS OF A SMALL PARAMETER}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2940}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29406}, pages = {14}, abstract = {In classical complex function theory the geometric mapping property of conformality is closely linked with complex differentiability. In contrast to the planar case, in higher dimensions the set of conformal mappings is only the set of M{\"o}bius transformations. Unfortunately, the theory of generalized holomorphic functions (by historical reasons they are called monogenic functions) developed on the basis of Clifford algebras does not cover the set of M{\"o}bius transformations in higher dimensions, since M{\"o}bius transformations are not monogenic. But on the other side, monogenic functions are hypercomplex differentiable functions and the question arises if from this point of view they can still play a special role for other types of 3D-mappings, for instance, for quasi-conformal ones. On the occasion of the 16th IKM 3D-mapping methods based on the application of Bergman's reproducing kernel approach (BKM) have been discussed. Almost all authors working before that with BKM in the Clifford setting were only concerned with the general algebraic and functional analytic background which allows the explicit determination of the kernel in special situations. The main goal of the abovementioned contribution was the numerical experiment by using a Maple software specially developed for that purpose. Since BKM is only one of a great variety of concrete numerical methods developed for mapping problems, our goal is to present a complete different from BKM approach to 3D-mappings. In fact, it is an extension of ideas of L. V. Kantorovich to the 3-dimensional case by using reduced quaternions and some suitable series of powers of a small parameter. Whereas until now in the Clifford case of BKM the recovering of the mapping function itself and its relation to the monogenic kernel function is still an open problem, this approach avoids such difficulties and leads to an approximation by monogenic polynomials depending on that small parameter.}, subject = {Architektur }, language = {en} } @inproceedings{DeBieSommen, author = {De Bie, Hendrik and Sommen, Frank}, title = {VECTOR AND BIVECTOR FOURIER TRANSFORMS IN CLIFFORD ANALYSIS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2837}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28371}, pages = {11}, abstract = {In the past, several types of Fourier transforms in Clifford analysis have been studied. In this paper, first an overview of these different transforms is given. Next, a new equation in a Clifford algebra is proposed, the solutions of which will act as kernels of a new class of generalized Fourier transforms. Two solutions of this equation are studied in more detail, namely a vector-valued solution and a bivector-valued solution, as well as the associated integral transforms.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{DeSchepperBrackxSommen, author = {De Schepper, Nele and Brackx, Fred and Sommen, Frank}, title = {THE FOURIER-BESSEL TRANSFORM}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2838}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28387}, pages = {18}, abstract = {In this paper we devise a new multi-dimensional integral transform within the Clifford analysis setting, the so-called Fourier-Bessel transform. It appears that in the two-dimensional case, it coincides with the Clifford-Fourier and cylindrical Fourier transforms introduced earlier. We show that this new integral transform satisfies operational formulae which are similar to those of the classical tensorial Fourier transform. Moreover the L2-basis elements consisting of generalized Clifford-Hermite functions appear to be eigenfunctions of the Fourier-Bessel transform.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{DeaconvanRooyen, author = {Deacon, Michael-John and van Rooyen, G.C.}, title = {DISTRIBUTED COLLABORATION: ENGINEERING PRACTICE REQUIREMENTS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2941}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29410}, pages = {8}, abstract = {Designing a structure follows a pattern of creating a structural design concept, executing a finite element analysis and developing a design model. A project was undertaken to create computer support for executing these tasks within a collaborative environment. This study focuses on developing a software architecture that integrates the various structural design aspects into a seamless functional collaboratory that satisfies engineering practice requirements. The collaboratory is to support both homogeneous collaboration i.e. between users operating on the same model and heterogeneous collaboration i.e. between users operating on different model types. Collaboration can take place synchronously or asynchronously, and the information exchange is done either at the granularity of objects or at the granularity of models. The objective is to determine from practicing engineers which configurations they regard as best and what features are essential for working in a collaborative environment. Based on the suggestions of these engineers a specification of a collaboration configuration that satisfies engineering practice requirements will be developed.}, subject = {Architektur }, language = {en} } @inproceedings{DjordjevicPetkovicZivkovic, author = {Djordjevic, Djordje and Petkovic, Dusan and Zivkovic, Darko}, title = {THE APPLICATION OF INTERVAL CALCULUS TO ESTIMATION OF PLATE DEFLECTION BY SOLVING POISSON'S PARTIAL DIFFERENTIAL EQUATION}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2839}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28397}, pages = {12}, abstract = {This paper describes the application of interval calculus to calculation of plate deflection, taking in account inevitable and acceptable tolerance of input data (input parameters). The simply supported reinforced concrete plate was taken as an example. The plate was loaded by uniformly distributed loads. Several parameters that influence the plate deflection are given as certain closed intervals. Accordingly, the results are obtained as intervals so it was possible to follow the direct influence of a change of one or more input parameters on output (in our example, deflection) values by using one model and one computing procedure. The described procedure could be applied to any FEM calculation in order to keep calculation tolerances, ISO-tolerances, and production tolerances in close limits (admissible limits). The Wolfram Mathematica has been used as tool for interval calculation.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{DoganArditiGunaydin, author = {Dogan, Sevgi Zeynep and Arditi, D. and Gunaydin, H. Murat}, title = {COMPARISON OF ANN AND CBR MODELS FOR EARLY COST PREDICTION OF STRUCTURAL SYSTEMS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2942}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29421}, abstract = {Reasonably accurate cost estimation of the structural system is quite desirable at the early stages of the design process of a construction project. However, the numerous interactions among the many cost-variables make the prediction difficult. Artificial neural networks (ANN) and case-based reasoning (CBR) are reported to overcome this difficulty. This paper presents a comparison of CBR and ANN augmented by genetic algorithms (GA) conducted by using spreadsheet simulations. GA was used to determine the optimum weights for the ANN and CBR models. The cost data of twenty-nine actual cases of residential building projects were used as an example application. Two different sets of cases were randomly selected from the data set for training and testing purposes. Prediction rates of 84\% in the GA/CBR study and 89\% in the GA/ANN study were obtained. The advantages and disadvantages of the two approaches are discussed in the light of the experiments and the findings. It appears that GA/ANN is a more suitable model for this example of cost estimation where the prediction of numerical values is required and only a limited number of cases exist. The integration of GA into CBR and ANN in a spreadsheet format is likely to improve the prediction rates.}, subject = {Architektur }, language = {en} } @inproceedings{DudekRichter, author = {Dudek, Mariusz and Richter, Matthias}, title = {UNTERSUCHUNGEN ZUR ZUVERL{\"A}SSIGKEIT DES STRAßENBAHNNETZES IN KRAKAU}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2943}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29432}, pages = {19}, abstract = {Der Begriff der Zuverl{\"a}ssigkeit spielt eine zentrale Rolle bei der Bewertung von Verkehrsnetzen. Aus der Sicht der Nutzer des {\"o}ffentlichen Personennahverkehrs ({\"O}PNV) ist eines der wichtigsten Kriterien zur Beurteilung der Qualit{\"a}t des Liniennetzes, ob es m{\"o}glich ist, mit einer großen Sicherheit das Reiseziel in einer vorgegebenen Zeit zu erreichen. Im Vortrag soll dieser Zuverl{\"a}ssigkeitsbegriff mathematisch gefasst werden. Dabei wird zun{\"a}chst auf den {\"u}blichen Begriff der Zuverl{\"a}ssigkeit eines Netzes im Sinne paarweiser Zusammenhangswahrscheinlichkeiten eingegangen. Dieser Begriff wird erweitert durch die Betrachtung der Zuverl{\"a}ssigkeit unter Einbeziehung einer maximal zul{\"a}ssigen Reisezeit. In vergangenen Arbeiten hat sich die Ring-Radius-Struktur als bew{\"a}hrtes Modell f{\"u}r die theoretische Beschreibung von Verkehrsnetzen erwiesen. Diese {\"U}berlegungen sollen nun durch Einbeziehung realer Verkehrsnetzstrukturen erweitert werden. Als konkretes Beispiel dient das Straßenbahnnetz von Krakau. Hier soll insbesondere untersucht werden, welche Auswirkungen ein geplanter Ausbau des Netzes auf die Zuverl{\"a}ssigkeit haben wird. This paper is involved with CIVITAS-CARAVEL project: "Clean and better transport in cites". The project has received research funding from the Community's Sixth Framework Programme. The paper reflects only the author's views and the Community is not liable for any use that may be made of the information contained therein.}, subject = {Architektur }, language = {de} } @inproceedings{DzwigonHempel, author = {Dzwigon, Wieslaw and Hempel, Lorenz}, title = {ZUR SYNCHRONISATION VON LINIEN IM {\"O}PNV}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2944}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29442}, pages = {12}, abstract = {Wir betrachten im {\"O}PNV ({\"O}ffentlichen Personennahverkehr) diejenige Situation, daß zwei Bus- oder Straßenbahnlinien gemeinsame Haltestellen haben. Ziel unserer Untersuchungen ist es, f{\"u}r beide Linien einen solchen Fahrplan zu finden, der f{\"u}r die Fahrg{\"a}ste m{\"o}glichst viel Bequemlichkeit bietet. Die Bedarfsstruktur - die Anzahl von Personen, die die beiden Linien benutzen - setzt dabei gewisse Beschr{\"a}nkungen f{\"u}r die Taktzeiten der beiden Linien. Die verbleibenden Entscheidungsfreiheiten sollen im Sinne der Zielstellung ausgenutzt werden. Im Vortrag wird folgenden Fragen nachgegangen: - nach welchen Kriterien kann man die "Bequemlichkeit" oder die "Synchonisationsg{\"u}te" messen? - wie kann man die einzelnen "Synchronisationsmaße" berechnen ? - wie kann man die verbleibenden Entscheidungsfreiheiten nutzen, um eine m{\"o}glichst gute Synchronisation zu erreichen ? Die Ergebnisse werden dann auf einige Beispiele angewandt und mit den bereitgestellten Methoden L{\"o}sungsvorschl{\"a}ge unterbreitet.}, subject = {Architektur }, language = {de} } @inproceedings{EbertLenzen, author = {Ebert, Carsten and Lenzen, Armin}, title = {OUTPUT-ONLY ANALYSIS FOR EXPERIMENTAL DAMAGE DETECTION OF A TIED-ARCH BRIDGE}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2945}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29452}, pages = {13}, abstract = {In civil engineering it is very difficult and often expensive to excite constructions such as bridges and buildings with an impulse hammer or shaker. This problem can be avoided with the output-only method as special feature of stochastic system identification. The permanently existing ambient noise (e.g. wind, traffic, waves) is sufficient to excite the structures in their operational conditions. The output-only method is able to estimate the observable part of a state-space-model which contains the dynamic characteristics of the measured mechanical system. Because of the assumption that the ambient excitation is white there is no requirement to measure the input. Another advantage of the output-only method is the possibility to get high detailed models by a special method, called polyreference setup. To pretend the availability of a much larger set of sensors the data from varying sensor locations will be collected. Several successive data sets are recorded with sensors at different locations (moving sensors) and fixed locations (reference sensors). The covariance functions of the reference sensors are bases to normalize the moving sensors. The result of the following subspace-based system identification is a high detailed black-box-model that contains the weighting function including the well-known dynamic parameters eigenfrequencies and mode shapes of the mechanical system. Emphasis of this lecture is the presentation of an extensive damage detection experiment. A 53-year old prestressed concrete tied-arch-bridge in H{\"u}nxe (Germany) was deconstructed in 2005. Preliminary numerous vibration measurements were accomplished. The first experiment for system modification was an additional support near the bridge bearing of one main girder. During a further experiment one hanger from one tied arch was cut through as an induced damage. Some first outcomes of the described experiments will be presented.}, subject = {Architektur }, language = {en} } @inproceedings{EbertBernsteinCerejeirasetal., author = {Ebert, Svend and Bernstein, Swanhild and Cerejeiras, Paula and K{\"a}hler, Uwe}, title = {NONZONAL WAVELETS ON S^N}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2840}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28406}, pages = {18}, abstract = {In the present article we will construct wavelets on an arbitrary dimensional sphere S^n due the approach of approximate Identities. There are two equivalently approaches to wavelets. The group theoretical approach formulates a square integrability condition for a group acting via unitary, irreducible representation on the sphere. The connection to the group theoretical approach will be sketched. The concept of approximate identities uses the same constructions in the background, here we select an appropriate section of dilations and translations in the group acting on the sphere in two steps. At First we will formulate dilations in terms of approximate identities and than we call in translations on the sphere as rotations. This leads to the construction of an orthogonal polynomial system in L²(SO(n+1)). That approach is convenient to construct concrete wavelets, since the appropriate kernels can be constructed form the heat kernel leading to the approximate Identity of Gauss-Weierstra\ss. We will work out conditions to functions forming a family of wavelets, subsequently we formulate how we can construct zonal wavelets from a approximate Identity and the relation to admissibility of nonzonal wavelets. Eventually we will give an example of a nonzonal Wavelet on \$S^n\$, which we obtain from the approximate identity of Gauss-Weierstraß.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{EblingScheuermann, author = {Ebling, Julia and Scheuermann, G.}, title = {TEMPLATE MATCHING ON VECTOR FIELDS USING CLIFFORD ALGEBRA}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2946}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29464}, pages = {25}, abstract = {Due to the amount of flow simulation and measurement data, automatic detection, classification and visualization of features is necessary for an inspection. Therefore, many automated feature detection methods have been developed in recent years. However, only one feature class is visualized afterwards in most cases, and many algorithms have problems in the presence of noise or superposition effects. In contrast, image processing and computer vision have robust methods for feature extraction and computation of derivatives of scalar fields. Furthermore, interpolation and other filter can be analyzed in detail. An application of these methods to vector fields would provide a solid theoretical basis for feature extraction. The authors suggest Clifford algebra as a mathematical framework for this task. Clifford algebra provides a unified notation for scalars and vectors as well as a multiplication of all basis elements. The Clifford product of two vectors provides the complete geometric information of the relative positions of these vectors. Integration of this product results in Clifford correlation and convolution which can be used for template matching of vector fields. For frequency analysis of vector fields and the behavior of vector-valued filters, a Clifford Fourier transform has been derived for 2D and 3D. Convolution and other theorems have been proved, and fast algorithms for the computation of the Clifford Fourier transform exist. Therefore the computation of Clifford convolution can be accelerated by computing it in Clifford Fourier domain. Clifford convolution and Fourier transform can be used for a thorough analysis and subsequent visualization of flow fields.}, subject = {Architektur }, language = {en} } @inproceedings{Eickelkamp, author = {Eickelkamp, Jens Peter}, title = {LIQUIDIT{\"A}TSPLANUNG VON BAUPROJEKTEN}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2948}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29483}, pages = {12}, abstract = {Die Liquidit{\"a}tsplanung von Bauunternehmen XE "Liquidit{\"a}tsplanung" gilt als ein wesentliches Steuerungs-, Kontroll- sowie Informationsinstrument f{\"u}r interne und externe Adressaten und {\"u}bt eine Entscheidungsunterst{\"u}tzungsfunktion aus. Da die einzelnen Bauprojekte einen wesentlichen Anteil an den Gesamtkosten des Unternehmens ausmachen, besitzen diese auch einen erheblichen Einfluß auf die Liquidit{\"a}t und die Zahlungsf{\"a}higkeit der Bauunternehmung. Dem folgend ist es in der Baupraxis eine {\"u}bliche Verfahrensweise, die Liquidit{\"a}tsplanung zuerst projektbezogen zu erstellen und anschließend auf Unternehmensebene zu verdichten. Ziel der Ausf{\"u}hrungen ist es, die Zusammenh{\"a}nge von Arbeitskalkulation XE "Arbeitskalkulation" , Ergebnisrechnung XE "Ergebnisrechnung" und Finanzrechnung XE "Finanzrechnung" in Form eines deterministischen XE "Erkl{\"a}rungsmodells" Planungsmodells auf Projektebene darzustellen. Hierbei soll das Verst{\"a}ndnis und die Bedeutung der Verkn{\"u}pfungen zwischen dem technisch-orientierten Bauablauf und dessen Darstellung im Rechnungs- und Finanzwesen herausgestellt werden. Die Vorg{\"a}nge aus der Bauabwicklung, das heißt die Abarbeitung der Bauleistungsverzeichnispositionen und deren zeitliche Darstellung in einem Bauzeitenplan sind periodisiert in Gr{\"o}ßen der Betriebsbuchhaltung (Leistung, Kosten) zu transformieren und anschließend in der Finanzrechnung (Einzahlungen., Auszahlungen) nach Kreditoren und Debitoren aufzuschl{\"u}sseln.}, subject = {Architektur }, language = {de} } @inproceedings{EiermannErnstUllmann, author = {Eiermann, Michael and Ernst, O. and Ullmann, Elisabeth}, title = {SOLUTION STRATEGIES FOR STOCHASTIC FINITE ELEMENT DISCRETIZATIONS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2949}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29493}, pages = {11}, abstract = {We consider efficient numerical methods for the solution of partial differential equations with stochastic coefficients or right hand side. The discretization is performed by the stochastic finite element method (SFEM). Separation of spatial and stochastic variables in the random input data is achieved via a Karhunen-Lo{\`e}ve expansion or Wiener's polynomial chaos expansion. We discuss solution strategies for the Galerkin system that take advantage of the special structure of the system matrix. For stochastic coefficients linear in a set of independent random variables we employ Krylov subspace recycling techniques after having decoupled the large SFEM stiffness matrix.}, subject = {Architektur }, language = {en} } @inproceedings{EngelkeSchuster, author = {Engelke, Gerald and Schuster, Otmar}, title = {OPENING THE RESERVE OF ECONOMIC EFFICIENCY IN LOGISTICAL AND FACILITY MANAGEMENT SERVICES}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.3017}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-30177}, pages = {8}, abstract = {In many branches companies often lose the visibility of their human and technical resources of their field service. On the one hand the people in the fieldservice are often free like kings on the other hand they do not take part of the daily communication in the central office and suffer under the lacking involvement in the decisions inside the central office. The result is inefficiency. Reproaches in both directions follow. With the radio systems and then mobile phones the ditch began to dry up. But the solutions are far from being productive.}, subject = {Architektur }, language = {en} } @inproceedings{Eriksson, author = {Eriksson, Sirkka-Liisa}, title = {MEAN VALUE PROPERTIES FOR THE WEINSTEIN EQUATION AND MODIFIED DIRAC OPERATORS}, series = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 04 - 06 2012, Bauhaus-University Weimar}, booktitle = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 04 - 06 2012, Bauhaus-University Weimar}, editor = {G{\"u}rlebeck, Klaus and Lahmer, Tom and Werner, Frank}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2762}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-27621}, pages = {16}, abstract = {We study the Weinstein equation u on the upper half space R3+. The Weinstein equation is connected to the axially symmetric potentials. We compute solutions of the Weinstein equation depending on the hyperbolic distance and x2. These results imply the explicit mean value properties. We also compute the fundamental solution. The main tools are the hyperbolic metric and its invariance properties.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{ErikssonKettunen, author = {Eriksson, Sirkka-Liisa and Kettunen, Jarkko}, title = {HYPERMONOGENIC POLYNOMIALS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2950}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29509}, pages = {22}, abstract = {It is well know that the power function is not monogenic. There are basically two ways to include the power function into the set of solutions: The hypermonogenic functions or holomorphic Cliffordian functions. L. Pernas has found out the dimension of the space of homogenous holomorphic Cliffordian polynomials of degree m, but his approach did not include a basis. It is known that the hypermonogenic functions are included in the space of holomorphic Cliffordian functions. As our main result we show that we can construct a basis for the right module of homogeneous holomorphic Cliffordian polynomials of degree m using hypermonogenic polynomials and their derivatives. To that end we first recall the function spaces of monogenic, hypermonogenic and holomorphic Cliffordian functions and give the results needed in the proof of our main theorem. We list some basic polynomials and their properties for the various function spaces. In particular, we consider recursive formulas, rules of differentiation and properties of linear independency for the polynomials.}, subject = {Architektur }, language = {en} } @inproceedings{ErlemannHartmann, author = {Erlemann, Kai and Hartmann, Dietrich}, title = {PARALLELIZATION OF A MICROSCOPIC TRAFFIC SIMULATION SYSTEM USING MPIJAVA}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2951}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29516}, pages = {8}, abstract = {Traffic simulation is a valuable tool for the design and evaluation of road networks. Over the years, the level of detail to which urban and freeway traffic can be simulated has increased steadily, shifting from a merely qualitative macroscopic perspective to a very detailed microscopic view, where the behavior of individual vehicles is emulated realistically. With the improvement of behavioral models, however, the computational complexity has also steadily increased, as more and more aspects of real-life traffic have to be considered by the simulation environment. Despite the constant increase in computing power of modern personal computers, microscopic simulation stays computationally expensive, limiting the maximum network size than can be simulated on a single-processor computer in reasonable time. Parallelization can distribute the computing load from a single computer system to a cluster of several computing nodes. To this end, the exisiting simulation framework had to be adapted to allow for a distributed approach. As the simulation is ultimately targeted to be executed in real-time, incorporating real traffic data, only a spatial partition of the simulation was considered, meaning the road network has to be partitioned into subnets of comparable complexity, to ensure a homogenous load balancing. The partition process must also ensure, that the division between subnets does only occur in regions, where no strong interaction between the separated road segments occurs (i.e. not in the direct vicinity of junctions). In this paper, we describe a new microscopic reasoning voting strategy, and discuss in how far the increasing computational costs of these more complex behaviors lend themselves to a parallelized approach. We show the parallel architecture employed, the communication between computing units using MPIJava, and the benefits and pitfalls of adapting a single computer application to be used on a multi-node computing cluster.}, subject = {Architektur }, language = {en} } @inproceedings{EygelaarvanRooyen, author = {Eygelaar, Anton and van Rooyen, G.C.}, title = {ENGINEERING PROCESS MODEL SPECIFICATION AND RESOURCE LEVELING}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2952}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29529}, pages = {18}, abstract = {The use of process models in the analysis, optimization and simulation of processes has proven to be extremely beneficial in the instances where they could be applied appropriately. However, the Architecture/Engineering/Construction (AEC) industries present unique challenges that complicate the modeling of their processes. A simple Engineering process model, based on the specification of Tasks, Datasets, Persons and Tools, and certain relations between them, have been developed, and its advantages over conventional techniques have been illustrated. Graph theory is used as the mathematical foundation mapping Tasks, Datasets, Persons and Tools to vertices and the relations between them to edges forming a directed graph. The acceptance of process modeling in AEC industries not only depends on the results it can provide, but the ease at which these results can be attained. Specifying a complex AEC process model is a dynamic exercise that is characterized by many modifications over the process model's lifespan. This article looks at reducing specification complexity, reducing the probability for erroneous input and allowing consistent model modification. Furthermore, the problem of resource leveling is discussed. Engineering projects are often executed with limited resources and determining the impact of such restrictions on the sequence of Tasks is important. Resource Leveling concerns itself with these restrictions caused by limited resources. This article looks at using Task shifting strategies to find a near-optimal sequence of Tasks that guarantees consistent Dataset evolution while resolving resource restrictions.}, subject = {Architektur }, language = {en} } @inproceedings{FalcaoCruzMalonek, author = {Falc{\~a}o, M. Irene and Cruz, J. F. and Malonek, Helmuth Robert}, title = {REMARKS ON THE GENERATION OF MONOGENIC FUNCTIONS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2939}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29390}, pages = {18}, abstract = { In this paper we consider three different methods for generating monogenic functions. The first one is related to Fueter's well known approach to the generation of monogenic quaternion-valued functions by means of holomorphic functions, the second one is based on the solution of hypercomplex differential equations and finally the third one is a direct series approach, based on the use of special homogeneous polynomials. We illustrate the theory by generating three different exponential functions and discuss some of their properties. Formula que se usa em preprints e artigos da nossa UI\&D (acho demasiado completo): Partially supported by the R\\&D unit \emph{Matem\'atica a Aplica\c\~es} (UIMA) of the University of Aveiro, through the Portuguese Foundation for Science and Technology (FCT), co-financed by the European Community fund FEDER.}, subject = {Architektur }, language = {en} } @inproceedings{Faustino, author = {Faustino, Nelson}, title = {FISCHER DECOMPOSITION FOR DIFFERENCE DIRAC OPERATORS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2955}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29551}, pages = {10}, abstract = {We establish the basis of a discrete function theory starting with a Fischer decomposition for difference Dirac operators. Discrete versions of homogeneous polynomials, Euler and Gamma operators are obtained. As a consequence we obtain a Fischer decomposition for the discrete Laplacian. For the sake of simplicity we consider in the first part only Dirac operators which contain only forward or backward finite differences. Of course, these Dirac operators do not factorize the classic discrete Laplacian. Therefore, we will consider a different definition of a difference Dirac operator in the quaternionic case which do factorizes the discrete Laplacian.}, subject = {Architektur }, language = {en} } @inproceedings{FerreiraVieira, author = {Ferreira, Milton dos Santos and Vieira, Nelson}, title = {EIGENFUNCTIONS AND FUNDAMENTAL SOLUTIONS FOR THE FRACTIONAL LAPLACIAN IN 3 DIMENSIONS}, series = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, booktitle = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, editor = {G{\"u}rlebeck, Klaus and Lahmer, Tom}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2796}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-27968}, pages = {6}, abstract = {Recently there has been a surge of interest in PDEs involving fractional derivatives in different fields of engineering. In this extended abstract we present some of the results developedin [3]. We compute the fundamental solution for the three-parameter fractional Laplace operator Δ by transforming the eigenfunction equation into an integral equation and applying the method of separation of variables. The obtained solutions are expressed in terms of Mittag-Leffer functions. For more details we refer the interested reader to [3] where it is also presented an operational approach based on the two Laplace transform.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{FlaigApel, author = {Flaig, Thomas and Apel, Thomas}, title = {SIMULATION AND MATHEMATICAL OPTIMIZATION OF THE HYDRATION OF CONCRETE FOR AVOIDING THERMAL CRACKS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2842}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28424}, pages = {15}, abstract = {After mixing of concrete, the hardening starts by an exothermic chemical reaction known as hydration. As the reaction rate depends on the temperature the time in the description of the hydration is replaced by the maturity which is defined as an integral over a certain function depending on the temperature. The temperature distribution is governed by the heat equation with a right hand side depending on the maturity and the temperature itself. We compare of the performance of different time integration schemes of higher order with an automatic time step control. The simulation of the heat distribution is of importance as the development of mechanical properties is driven by the hydration. During this process it is possible that the tensile stresses exceed the tensile strength and cracks occur. The goal is to produce cheap concrete without cracks. Simple crack-criterions use only temperature differences, more involved ones are based on thermal stresses. If the criterion predicts cracks some changes in the input data are needed. This can be interpreted as optimization. The final goal will be to adopt model based optimization (in contrast to simulation based optimization) to the problem of the hydration of young concrete and the avoidance of cracks. The first step is the simulation of the hydration, which we focus in this paper.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{Franssens, author = {Franssens, Ghislain R.}, title = {INTRODUCTION TO CLIFFORD ANALYSIS OVER PSEUDO-EUCLIDEAN SPACE}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2843}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28433}, pages = {16}, abstract = {An introduction is given to Clifford Analysis over pseudo-Euclidean space of arbitrary signature, called for short Ultrahyperbolic Clifford Analysis (UCA). UCA is regarded as a function theory of Clifford-valued functions, satisfying a first order partial differential equation involving a vector-valued differential operator, called a Dirac operator. The formulation of UCA presented here pays special attention to its geometrical setting. This permits to identify tensors which qualify as geometrically invariant Dirac operators and to take a position on the naturalness of contravariant and covariant versions of such a theory. In addition, a formal method is described to construct the general solution to the aforementioned equation in the context of covariant UCA.}, subject = {Angewandte Informatik}, language = {en} }