@article{Koenig, author = {K{\"o}nig, Reinhard}, title = {Die Stadt der Agenten und Automaten}, series = {FORUM - Architektur \& Bauforum}, journal = {FORUM - Architektur \& Bauforum}, doi = {10.25643/bauhaus-universitaet.2608}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26083}, abstract = {PLANUNGSUNTERST{\"U}TZUNG DURCH DIE ANALYSE R{\"A}UMLICHER PROZESSE MITTELS COMPUTERSIMULATIONEN. Erst wenn man - zumindest im Prinzip - versteht, wie eine Stadt mit ihren komplexen, verwobenen Vorg{\"a}ngen im Wesentlichen funktioniert, ist eine sinnvolle Stadtplanung m{\"o}glich. Denn jede Planung bedeutet einen Eingriff in den komplexen Organismus einer Stadt. Findet dieser Eingriff ohne Wissen {\"u}ber die Funktionsweise des Organismus statt, k{\"o}nnen auch die Auswirkungen nicht abgesch{\"a}tzt werden. Dieser Beitrag stellt dar, wie urbane Prozesse mittels Computersimulationen unter Zuhilfenahme so genannter Multi-Agenten-Systeme und Zellul{\"a}rer Automaten verstanden werden k{\"o}nnen. von}, subject = {CAD}, language = {de} } @article{Koenig, author = {K{\"o}nig, Reinhard}, title = {Computers in the design phase - Ten thesis on their uselessness}, series = {Der Generalist}, journal = {Der Generalist}, doi = {10.25643/bauhaus-universitaet.2607}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26075}, abstract = {At the end of the 1960s, architects at various universities world- wide began to explore the potential of computer technology for their profession. With the decline in prices for PCs in the 1990s and the development of various computer-aided architectural design systems (CAAD), the use of such systems in architectural and planning offices grew continuously. Because today no ar- chitectural office manages without a costly CAAD system and because intensive soſtware training has become an integral part of a university education, the question arises about what influence the various computer systems have had on the design process forming the core of architectural practice. The text at hand devel- ops ten theses about why there has been no success to this day in introducing computers such that new qualitative possibilities for design result. RESTRICTEDNESS}, subject = {CAD}, language = {en} } @article{KoenigBauriedel, author = {K{\"o}nig, Reinhard and Bauriedel, Christian}, title = {Generating settlement structures: a method for urban planning and analysis supported by cellular automata}, series = {Environment and Planning B: Planning and Design}, journal = {Environment and Planning B: Planning and Design}, doi = {10.25643/bauhaus-universitaet.2605}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160624-26054}, pages = {602 -- 624}, abstract = {Previous models for the explanation of settlement processes pay little attention to the interactions between settlement spreading and road networks. On the basis of a dielectric breakdown model in combination with cellular automata, we present a method to steer precisely the generation of settlement structures with regard to their global and local density as well as the size and number of forming clusters. The resulting structures depend on the logic of how the dependence of the settlements and the road network is implemented to the simulation model. After analysing the state of the art we begin with a discussion of the mutual dependence of roads and land development. Next, we elaborate a model that permits the precise control of permeability in the developing structure as well as the settlement density, using the fewest necessary control parameters. On the basis of different characteristic values, possible settlement structures are analysed and compared with each other. Finally, we reflect on the theoretical contribution of the model with regard to the context of urban dynamics.}, language = {en} } @article{BimberIwai2009, author = {Bimber, Oliver and Iwai, Daisuke}, title = {Superimposing Dynamic Range}, series = {Eurographics 2009}, journal = {Eurographics 2009}, doi = {10.25643/bauhaus-universitaet.1532}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120130-15325}, year = {2009}, abstract = {Replacing a uniform illumination by a high-frequent illumination enhances the contrast of observed and captured images. We modulate spatially and temporally multiplexed (projected) light with reflective or transmissive matter to achieve high dynamic range visualizations of radiological images on printed paper or ePaper, and to boost the optical contrast of images viewed or imaged with light microscopes.}, subject = {CGI }, language = {en} } @article{Knecht, author = {Knecht, Katja}, title = {Augmented Urban Model: Ein Tangible User Interface zur Unterst{\"u}tzung von Stadtplanungsprozessen}, doi = {10.25643/bauhaus-universitaet.2674}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160823-26740}, abstract = {Im architektonischen und st{\"a}dtebaulichen Kontext erf{\"u}llen physische und digitale Modelle aufgrund ihrer weitgehend komplement{\"a}ren Eigenschaften und Qualit{\"a}ten unterschiedliche, nicht verkn{\"u}pfte Aufgaben und Funktionen im Entwurfs- und Planungsprozess. W{\"a}hrend physische Modelle vor allem als Darstellungs- und Kommunikationsmittel aber auch als Arbeitswerkzeug genutzt werden, unterst{\"u}tzen digitale Modelle dar{\"u}ber hinaus die Evaluation eines Entwurfs durch computergest{\"u}tzte Analyse- und Simulationstechniken. Analysiert wurden im Rahmen der in diesem Arbeitspapier vorgestellten Arbeit neben dem Einsatz des Modells als analogem und digitalem Werkzeug im Entwurf die Bedeutung des Modells f{\"u}r den Arbeitsprozess sowie Vorbilder aus dem Bereich der Tangible User Interfaces mit Bezug zu Architek¬tur und St{\"a}dtebau. Aus diesen Betrachtungen heraus wurde ein Prototyp entwickelt, das Augmented Urban Model, das unter anderem auf den fr{\"u}hen Projekten und Forschungsans{\"a}tzen aus dem Gebiet der Tangible User Interfaces aufsetzt, wie dem metaDESK von Ullmer und Ishii und dem Urban Planning Tool Urp von Underkoffler und Ishii. Das Augmented Urban Model zielt darauf ab, die im aktuellen Entwurfs- und Planungsprozess fehlende Br{\"u}cke zwischen realen und digitalen Modellwelten zu schlagen und gleichzeitig eine neue tangible Benutzerschnittstelle zu schaffen, welche die Manipulation von und die Interaktion mit digitalen Daten im realen Raum erm{\"o}glicht.}, subject = {tangible user interface}, language = {de} } @article{KoehlerKoenig, author = {K{\"o}hler, Hermann and K{\"o}nig, Reinhard}, title = {Aktionsr{\"a}ume in Dresden}, doi = {10.25643/bauhaus-universitaet.2672}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160822-26726}, abstract = {In vorliegender Studie werden die Aktionsr{\"a}ume von Befragten in Dresden {\"u}ber eine standardisierte Befragung (n=360) untersucht. Die den Aktionsr{\"a}umen zugrundeliegenden Aktivit{\"a}ten werden unterschieden in Einkaufen f{\"u}r den t{\"a}glichen Bedarf, Ausgehen (z.B. in Caf{\´e}, Kneipe, Gastst{\"a}tte), Erholung im Freien (z.B. spazieren gehen, Nutzung von Gr{\"u}nanlagen) und private Geselligkeit (z.B. Feiern, Besuch von Verwandten/Freunden). Der Aktionsradius wird unterschieden in Wohnviertel, Nachbarviertel und sonstiges weiteres Stadtgebiet. Um aus den vier betrachteten Aktivit{\"a}ten einen umfassenden Kennwert f{\"u}r den durchschnittlichen Aktionsradius eines Befragten zu bilden, wird ein Modell f{\"u}r den Kennwert eines Aktionsradius entwickelt. Die Studie kommt zu dem Ergebnis, dass das Alter der Befragten einen signifikanten - wenn auch geringen - Einfluss auf den Aktionsradius hat. Das Haushaltsnettoeinkommen hat einen mit Einschr{\"a}nkung signifikanten, ebenfalls geringen Einfluss auf allt{\"a}gliche Aktivit{\"a}ten der Befragten.}, subject = {Aktionsraumforschung}, language = {de} } @article{Koehler, author = {K{\"o}hler, Hermann}, title = {Ergebnisse der Befragung zu Wohnstandortpr{\"a}ferenzen von Lebensweltsegmenten in Dresden}, doi = {10.25643/bauhaus-universitaet.2670}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160822-26704}, abstract = {In vorliegender Studie werden die Wohnstandortpr{\"a}ferenzen der Sinus-Milieugruppen in Dresden {\"u}ber eine standardisierte Befragung (n=318) untersucht. Es wird unterschieden zwischen handlungsleitenden Wohnstandortpr{\"a}ferenzen, die durch Anhaltspunkte auf der Handlungsebene st{\"a}rker in Betracht gezogen werden sollten, und Wohnstandortpr{\"a}ferenzen, welche eher orientierenden Charakter haben. Die Wohnstandortpr{\"a}ferenzen werden untersucht anhand der Kategorien Ausstattung/Zustand der Wohnung/des n{\"a}heren Wohnumfeldes, Versorgungsstruktur, soziales Umfeld, Baustrukturtyp, Ortsgebundenheit sowie des Aspektes des Images eines Stadtviertels. Um die Befragten den Sinus-Milieugruppen zuordnen zu k{\"o}nnen, wird ein Lebensweltsegment-Modell entwickelt, welches den Anspruch hat, die Sinus-Milieugruppen in der Tendenz abzubilden. Die Studie kommt zu dem Ergebnis, dass die Angeh{\"o}rigen der verschiedenen Lebensweltsegmente in jeder Kategorie - wenn auch z.T. auf geringerem Niveau - signifikante Unterschiede in der Bewertung einzelner Wohnstandortpr{\"a}ferenzen aufweisen.}, subject = {Milieuforschung}, language = {de} } @article{TonnTatarin, author = {Tonn, Christian and Tatarin, Ren{\´e}}, title = {Volumen Rendering in der Architektur: {\"U}berlagerung und Kombination von 3D Voxel Volumendaten mit 3D Geb{\"a}udemodellen}, doi = {10.25643/bauhaus-universitaet.2671}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160822-26718}, abstract = {Volumerendering ist eine Darstellungstechnik, um verschiedene r{\"a}umliche Mess- und Simulationsdaten anschaulich, interaktiv grafisch darzustellen. Im folgenden Beitrag wird ein Verfahren vorgestellt, mehrere Volumendaten mit einem Architekturfl{\"a}chenmodell zu {\"u}berlagern. Diese komplexe Darstellungsberechnung findet mit hardwarebeschleunigten Shadern auf der Grafikkarte statt. Im Beitrag wird hierzu der implementierte Softwareprototyp "VolumeRendering" vorgestellt. Neben dem interaktiven Berechnungsverfahren wurde ebenso Wert auf eine nutzerfreundliche Bedienung gelegt. Das Ziel bestand darin, eine einfache Bewertung der Volumendaten durch Fachplaner zu erm{\"o}glichen. Durch die {\"U}berlagerung, z. B. verschiedener Messverfahren mit einem Fl{\"a}chenmodell, ergeben sich Synergien und neue Auswertungsm{\"o}glichkeiten. Abschließend wird anhand von Beispielen aus einem interdisziplin{\"a}ren Forschungsprojekt die Anwendung des Softwareprototyps illustriert.}, subject = {Multiple Volume Rendering}, language = {de} } @article{Kalisch, author = {Kalisch, Dominik}, title = {Wissen wer wo wohnt}, doi = {10.25643/bauhaus-universitaet.2669}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160822-26695}, abstract = {In cities people live together in neighbourhoods. Here they can find the infrastructure they need, starting with shops for the daily purpose to the life-cycle based infrastructures like kindergartens or nursing homes. But not all neighbourhoods are identical. The infrastructure mixture varies from neighbourhood to neighbourhood, but different people have different needs which can change e.g. based on the life cycle situation or their affiliation to a specific milieu. We can assume that a person or family tries to settle in a specific neighbourhood that satisfies their needs. So, if the residents are happy with a neighbourhood, we can further assume that this neighbourhood satisfies their needs. The socio-oeconomic panel (SOEP) of the German Institute for Economy (DIW) is a survey that investigates the economic structure of the German population. Every four years one part of this survey includes questions about what infrastructures can be found in the respondents neighbourhood and the satisfaction of the respondent with their neighbourhood. Further, it is possible to add a milieu estimation for each respondent or household. This gives us the possibility to analyse the typical neighbourhoods in German cities as well as the infrastructure profiles of the different milieus. Therefore, we take the environment variables from the dataset and recode them into a binary variable - whether an infrastructure is available or not. According to Faust (2005), these sets can also be understood, as a network of actors in a neighbourhood, which share two, three or more infrastructures. Like these networks, this neighbourhood network can also be visualized as a bipartite affiliation network and therefore analysed using correspondence analysis. We will show how a neighbourhood analysis will benefit from an upstream correspondence analysis and how this could be done. We will also present and discuss the results of such an analysis.}, subject = {urban planning}, language = {de} } @article{KnechtKoenig, author = {Knecht, Katja and K{\"o}nig, Reinhard}, title = {Automatische Grundst{\"u}cksumlegung mithilfe von Unterteilungsalgorithmen und typenbasierte Generierung von Stadtstrukturen}, doi = {10.25643/bauhaus-universitaet.2673}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160822-26730}, abstract = {Dieses Arbeitspapier beschreibt, wie ausgehend von einem vorhandenen Straßennetzwerk Bebauungsareale mithilfe von Unterteilungsalgorithmen automatisch umgelegt, d.h. in Grundst{\"u}cke unterteilt, und anschließend auf Basis verschiedener st{\"a}dtebaulicher Typen bebaut werden k{\"o}nnen. Die Unterteilung von Bebauungsarealen und die Generierung von Bebauungsstrukturen unterliegen dabei bestimmten stadtplanerischen Einschr{\"a}nkungen, Vorgaben und Parametern. Ziel ist es aus den dargestellten Untersuchungen heraus ein Vorschlagssystem f{\"u}r stadtplanerische Entw{\"u}rfe zu entwickeln, das anhand der Umsetzung eines ersten Softwareprototyps zur Generierung von Stadtstrukturen weiter diskutiert wird.}, subject = {Automatisierung}, language = {de} } @article{KoenigStandfestSchmitt, author = {K{\"o}nig, Reinhard and Standfest, Matthias and Schmitt, Gerhard}, title = {Evolutionary multi-criteria optimization for building layout planning: Exemplary application based on the PSSA framework}, series = {32nd eCAADe Conference - Volume 2}, journal = {32nd eCAADe Conference - Volume 2}, editor = {Thompson, Emine Mine}, doi = {10.25643/bauhaus-universitaet.2513}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160121-25139}, pages = {567 -- 574}, abstract = {When working on urban planning projects there are usually multiple aspects to consider. Often these aspects are contradictory and it is not possible to choose one over the other; instead, they each need to be fulfilled as well as possible. Planners typically draw on past experience when subjectively prioritising which aspects to consider with which degree of importance for their planning concepts. This practice, although understandable, places power and authority in the hands of people who have varying degrees of expertise, which means that the best possible solution is not always found, because it is either not sought or the problem is regarded as being too complex for human capabilities. To improve this situation, the project presented here shows the potential of multi-criteria optimisation algorithms using the example of a new housing layout for an urban block. In addition it is shown, how Self-Organizing-Maps can be used to visualise multi-dimensional solution spaces in an easy analysable and comprehensible form.}, subject = {Architektur}, language = {en} } @article{KoenigKnecht, author = {K{\"o}nig, Reinhard and Knecht, Katja}, title = {Comparing two evolutionary algorithm based methods for layout generation: Dense packing versus subdivision}, series = {Artificial Intelligence for Engineering Design, Analysis and Manufacturing}, journal = {Artificial Intelligence for Engineering Design, Analysis and Manufacturing}, pages = {285 -- 299}, abstract = {We present and compare two evolutionary algorithm based methods for rectangular architectural layout generation: dense packing and subdivision algorithms.We analyze the characteristics of the two methods on the basis of three floor plan sce- narios. Our analyses include the speed with which solutions are generated, the reliability with which optimal solutions can be found, and the number of different solutions that can be found overall. In a following step, we discuss the methods with respect to their different user interaction capabilities. In addition, we show that each method has the capability to generate more complex L-shaped layouts. Finally,we conclude that neither of the methods is superior but that each of them is suitable for use in distinct application scenarios because of its different properties.}, subject = {Architektur}, language = {en} } @article{Koenig, author = {K{\"o}nig, Reinhard}, title = {Interview on Information Architecture}, series = {Swiss Architecture in the Moving Image}, journal = {Swiss Architecture in the Moving Image}, doi = {10.25643/bauhaus-universitaet.2507}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180422-25078}, pages = {151 -- 154}, abstract = {Interview on Information Architecture}, subject = {Architektur}, language = {en} } @article{KleinKoenig, author = {Klein, Bernhard and K{\"o}nig, Reinhard}, title = {Computational Urban Planning: Using the Value Lab as Control Center}, series = {FCL Magazine, Special Issue Simulation Platform}, journal = {FCL Magazine, Special Issue Simulation Platform}, doi = {10.25643/bauhaus-universitaet.2601}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26011}, pages = {38 -- 45}, abstract = {Urban planning involves many aspects and various disciplines, demanding an asynchronous planning approach. The level of complexity rises with each aspect to be considered and makes it difficult to find universally satisfactory solutions. To improve this situation we propose a new approach, which complement traditional design methods with a computational urban plan- ning method that can fulfil formalizable design requirements automatically. Based on this approach we present a design space exploration framework for complex urban planning projects. For a better understanding of the idea of design space exploration, we introduce the concept of a digital scout which guides planners through the design space and assists them in their creative explorations. The scout can support planners during manual design by informing them about potential im- pacts or by suggesting different solutions that fulfill predefined quality requirements. The planner can change flexibly between a manually controlled and a completely automated design process. The developed system is presented using an exemplary urban planning scenario on two levels from the street layout to the placement of building volumes. Based on Self-Organizing Maps we implemented a method which makes it possible to visualize the multi-dimensional solution space in an easily analysable and comprehensible form.}, subject = {Stadtgestaltung}, language = {en} } @article{TreyerKleinKoenigetal., author = {Treyer, Lukas and Klein, Bernhard and K{\"o}nig, Reinhard and Meixner, Christine}, title = {Lightweight Urban Computation Interchange (LUCI): A System to Couple Heterogenous Simulations and Views}, series = {Spatial Information Research}, journal = {Spatial Information Research}, doi = {10.25643/bauhaus-universitaet.2603}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26037}, pages = {1 -- 12}, abstract = {In this paper we introduce LUCI, a Lightweight Urban Calculation Interchange system, designed to bring the advantages of calculation and content co-ordination system to small planning and design groups by the means of an open source middle-ware. The middle-ware focuses on problems typical to urban planning and therefore features a geo-data repository as well as a job runtime administration, to coordinate simulation models and its multiple views. The described system architecture is accompanied by two exemplary use cases, that have been used to test and further develop our concepts and implementations.}, language = {en} } @article{HijaziKoenigSchneideretal., author = {Hijazi, Ihab Hamzi and K{\"o}nig, Reinhard and Schneider, Sven and Li, Xin and Bielik, Martin and Schmitt, Gerhard and Donath, Dirk}, title = {Geostatistical Analysis for the Study of Relationships between the Emotional Responses of Urban Walkers to Urban Spaces}, series = {International Journal of E-Planning Research}, journal = {International Journal of E-Planning Research}, doi = {10.25643/bauhaus-universitaet.2602}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26025}, pages = {1 -- 19}, abstract = {The described study aims to find correlations between urban spatial configurations and human emotions. To this end, the authors measured people's emotions while they walk along a path in an urban area using an instrument that measures skin conductance and skin temperature. The corresponding locations of the test persons were measured recorded by using a GPS-tracker (n=13). The results are interpreted and categorized as measures for positive and negative emotional arousal. To evaluate the technical and methodological process. The test results offer initial evidence that certain spaces or spatial sequences do cause positive or negative emotional arousal while others are relatively neutral. To achieve the goal of the study, the outcome was used as a basis for the study of testing correlations between people's emotional responses and urban spatial configurations represented by Isovist properties of the urban form. By using their model the authors can explain negative emotional arousal for certain places, but they couldn't find a model to predict emotional responses for individual spatial configurations.}, subject = {Geografie}, language = {en} } @article{MosaviHosseiniImaniZalzaretal., author = {Mosavi, Amir and Hosseini Imani, Mahmood and Zalzar, Shaghayegh and Shamshirband, Shahaboddin}, title = {Strategic Behavior of Retailers for Risk Reduction and Profit Increment via Distributed Generators and Demand Response Programs}, series = {Energies}, volume = {2018}, journal = {Energies}, number = {11, 6}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11061602}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180628-37546}, pages = {24}, abstract = {Following restructuring of power industry, electricity supply to end-use customers has undergone fundamental changes. In the restructured power system, some of the responsibilities of the vertically integrated distribution companies have been assigned to network managers and retailers. Under the new situation, retailers are in charge of providing electrical energy to electricity consumers who have already signed contract with them. Retailers usually provide the required energy at a variable price, from wholesale electricity markets, forward contracts with energy producers, or distributed energy generators, and sell it at a fixed retail price to its clients. Different strategies are implemented by retailers to reduce the potential financial losses and risks associated with the uncertain nature of wholesale spot electricity market prices and electrical load of the consumers. In this paper, the strategic behavior of retailers in implementing forward contracts, distributed energy sources, and demand-response programs with the aim of increasing their profit and reducing their risk, while keeping their retail prices as low as possible, is investigated. For this purpose, risk management problem of the retailer companies collaborating with wholesale electricity markets, is modeled through bi-level programming approach and a comprehensive framework for retail electricity pricing, considering customers' constraints, is provided in this paper. In the first level of the proposed bi-level optimization problem, the retailer maximizes its expected profit for a given risk level of profit variability, while in the second level, the customers minimize their consumption costs. The proposed programming problem is modeled as Mixed Integer programming (MIP) problem and can be efficiently solved using available commercial solvers. The simulation results on a test case approve the effectiveness of the proposed demand-response program based on dynamic pricing approach on reducing the retailer's risk and increasing its profit. In this paper, the decision-making problem of the retailers under dynamic pricing approach for demand response integration have been investigated. The retailer was supposed to rely on forward contracts, DGs, and spot electricity market to supply the required active and reactive power of its customers. To verify the effectiveness of the proposed model, four schemes for retailer's scheduling problem are considered and the resulted profit under each scheme are analyzed and compared. The simulation results on a test case indicate that providing more options for the retailer to buy the required power of its customers and increase its flexibility in buying energy from spot electricity market reduces the retailers' risk and increases its profit. From the customers' perspective also the retailers'accesstodifferentpowersupplysourcesmayleadtoareductionintheretailelectricityprices. Since the retailer would be able to decrease its electricity selling price to the customers without losing its profitability, with the aim of attracting more customers. Inthiswork,theconditionalvalueatrisk(CVaR)measureisusedforconsideringandquantifying riskinthedecision-makingproblems. Amongallthepossibleoptioninfrontoftheretailertooptimize its profit and risk, demand response programs are the most beneficial option for both retailer and its customers. The simulation results on the case study prove that implementing dynamic pricing approach on retail electricity prices to integrate demand response programs can successfully provoke customers to shift their flexible demand from peak-load hours to mid-load and low-load hours. Comparing the simulation results of the third and fourth schemes evidences the impact of DRPs and customers' load shifting on the reduction of retailer's risk, as well as the reduction of retailer's payment to contract holders, DG owners, and spot electricity market. Furthermore, the numerical results imply on the potential of reducing average retail prices up to 8\%, under demand response activation. Consequently, it provides a win-win solution for both retailer and its customers.}, subject = {Risikomanagement}, language = {en} } @article{GhazvineiDarvishiMosavietal., author = {Ghazvinei, Pezhman Taherei and Darvishi, Hossein Hassanpour and Mosavi, Amir and Yusof, Khamaruzaman bin Wan and Alizamir, Meysam and Shamshirband, Shahaboddin and Chau, Kwok-Wing}, title = {Sugarcane growth prediction based on meteorological parameters using extreme learning machine and artificial neural network}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2018}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {12,1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2018.1526119}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181017-38129}, pages = {738 -- 749}, abstract = {Management strategies for sustainable sugarcane production need to deal with the increasing complexity and variability of the whole sugar system. Moreover, they need to accommodate the multiple goals of different industry sectors and the wider community. Traditional disciplinary approaches are unable to provide integrated management solutions, and an approach based on whole systems analysis is essential to bring about beneficial change to industry and the community. The application of this approach to water management, environmental management and cane supply management is outlined, where the literature indicates that the application of extreme learning machine (ELM) has never been explored in this realm. Consequently, the leading objective of the current research was set to filling this gap by applying ELM to launch swift and accurate model for crop production data-driven. The key learning has been the need for innovation both in the technical aspects of system function underpinned by modelling of sugarcane growth. Therefore, the current study is an attempt to establish an integrate model using ELM to predict the concluding growth amount of sugarcane. Prediction results were evaluated and further compared with artificial neural network (ANN) and genetic programming models. Accuracy of the ELM model is calculated using the statistics indicators of Root Means Square Error (RMSE), Pearson Coefficient (r), and Coefficient of Determination (R2) with promising results of 0.8, 0.47, and 0.89, respectively. The results also show better generalization ability in addition to faster learning curve. Thus, proficiency of the ELM for supplementary work on advancement of prediction model for sugarcane growth was approved with promising results.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @article{FaizollahzadehArdabiliNajafiAlizamiretal., author = {Faizollahzadeh Ardabili, Sina and Najafi, Bahman and Alizamir, Meysam and Mosavi, Amir and Shamshirband, Shahaboddin and Rabczuk, Timon}, title = {Using SVM-RSM and ELM-RSM Approaches for Optimizing the Production Process of Methyl and Ethyl Esters}, series = {Energies}, journal = {Energies}, number = {11, 2889}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11112889}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181025-38170}, pages = {1 -- 20}, abstract = {The production of a desired product needs an effective use of the experimental model. The present study proposes an extreme learning machine (ELM) and a support vector machine (SVM) integrated with the response surface methodology (RSM) to solve the complexity in optimization and prediction of the ethyl ester and methyl ester production process. The novel hybrid models of ELM-RSM and ELM-SVM are further used as a case study to estimate the yield of methyl and ethyl esters through a trans-esterification process from waste cooking oil (WCO) based on American Society for Testing and Materials (ASTM) standards. The results of the prediction phase were also compared with artificial neural networks (ANNs) and adaptive neuro-fuzzy inference system (ANFIS), which were recently developed by the second author of this study. Based on the results, an ELM with a correlation coefficient of 0.9815 and 0.9863 for methyl and ethyl esters, respectively, had a high estimation capability compared with that for SVM, ANNs, and ANFIS. Accordingly, the maximum production yield was obtained in the case of using ELM-RSM of 96.86\% for ethyl ester at a temperature of 68.48 °C, a catalyst value of 1.15 wt. \%, mixing intensity of 650.07 rpm, and an alcohol to oil molar ratio (A/O) of 5.77; for methyl ester, the production yield was 98.46\% at a temperature of 67.62 °C, a catalyst value of 1.1 wt. \%, mixing intensity of 709.42 rpm, and an A/O of 6.09. Therefore, ELM-RSM increased the production yield by 3.6\% for ethyl ester and 3.1\% for methyl ester, compared with those for the experimental data.}, subject = {Biodiesel}, language = {en} } @article{BielikSchneiderKuligaetal., author = {Bielik, Martin and Schneider, Sven and Kuliga, Saskia and Griego, Danielle and Ojha, Varun and K{\"o}nig, Reinhard and Schmitt, Gerhard and Donath, Dirk}, title = {Examining Trade-Offs between Social, Psychological, and Energy Potential of Urban Form}, series = {ISPRS International Journal of Geo-Information}, volume = {2019}, journal = {ISPRS International Journal of Geo-Information}, editor = {Resch, Bernd and Szell, Michael}, doi = {10.3390/ijgi8020052}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190408-38695}, abstract = {Urban planners are often challenged with the task of developing design solutions which must meet multiple, and often contradictory, criteria. In this paper, we investigated the trade-offs between social, psychological, and energy potential of the fundamental elements of urban form: the street network and the building massing. Since formal mehods to evaluate urban form from the psychological and social point of view are not readily available, we developed a methodological framework to quantify these criteria as the first contribution in this paper. To evaluate the psychological potential, we conducted a three-tiered empirical study starting from real world environments and then abstracting them to virtual environments. In each context, the implicit (physiological) response and explicit (subjective) response of pedestrians were measured. To quantify the social potential, we developed a street network centrality-based measure of social accessibility. For the energy potential, we created an energy model to analyze the impact of pure geometric form on the energy demand of the building stock. The second contribution of this work is a method to identify distinct clusters of urban form and, for each, explore the trade-offs between the select design criteria. We applied this method to two case studies identifying nine types of urban form and their respective potential trade-offs, which are directly applicable for the assessment of strategic decisions regarding urban form during the early planning stages.}, subject = {Planung}, language = {en} } @article{OuaerHosseiniAmaretal., author = {Ouaer, Hocine and Hosseini, Amir Hossein and Amar, Menad Nait and Ben Seghier, Mohamed El Amine and Ghriga, Mohammed Abdelfetah and Nabipour, Narjes and Andersen, P{\aa}l {\O}steb{\o} and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Rigorous Connectionist Models to Predict Carbon Dioxide Solubility in Various Ionic Liquids}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, Issue 1, 304}, publisher = {MDPI}, doi = {https://doi.org/10.3390/app10010304}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200107-40558}, pages = {18}, abstract = {Estimating the solubility of carbon dioxide in ionic liquids, using reliable models, is of paramount importance from both environmental and economic points of view. In this regard, the current research aims at evaluating the performance of two data-driven techniques, namely multilayer perceptron (MLP) and gene expression programming (GEP), for predicting the solubility of carbon dioxide (CO2) in ionic liquids (ILs) as the function of pressure, temperature, and four thermodynamical parameters of the ionic liquid. To develop the above techniques, 744 experimental data points derived from the literature including 13 ILs were used (80\% of the points for training and 20\% for validation). Two backpropagation-based methods, namely Levenberg-Marquardt (LM) and Bayesian Regularization (BR), were applied to optimize the MLP algorithm. Various statistical and graphical assessments were applied to check the credibility of the developed techniques. The results were then compared with those calculated using Peng-Robinson (PR) or Soave-Redlich-Kwong (SRK) equations of state (EoS). The highest coefficient of determination (R2 = 0.9965) and the lowest root mean square error (RMSE = 0.0116) were recorded for the MLP-LMA model on the full dataset (with a negligible difference to the MLP-BR model). The comparison of results from this model with the vastly applied thermodynamic equation of state models revealed slightly better performance, but the EoS approaches also performed well with R2 from 0.984 up to 0.996. Lastly, the newly established correlation based on the GEP model exhibited very satisfactory results with overall values of R2 = 0.9896 and RMSE = 0.0201.}, subject = {Maschinelles Lernen}, language = {en} } @article{AhmadiBaghbanSadeghzadehetal., author = {Ahmadi, Mohammad Hossein and Baghban, Alireza and Sadeghzadeh, Milad and Zamen, Mohammad and Mosavi, Amir and Shamshirband, Shahaboddin and Kumar, Ravinder and Mohammadi-Khanaposhtani, Mohammad}, title = {Evaluation of electrical efficiency of photovoltaic thermal solar collector}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {volume 14, issue 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1734094}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200304-41049}, pages = {545 -- 565}, abstract = {In this study, machine learning methods of artificial neural networks (ANNs), least squares support vector machines (LSSVM), and neuro-fuzzy are used for advancing prediction models for thermal performance of a photovoltaic-thermal solar collector (PV/T). In the proposed models, the inlet temperature, flow rate, heat, solar radiation, and the sun heat have been considered as the input variables. Data set has been extracted through experimental measurements from a novel solar collector system. Different analyses are performed to examine the credibility of the introduced models and evaluate their performances. The proposed LSSVM model outperformed the ANFIS and ANNs models. LSSVM model is reported suitable when the laboratory measurements are costly and time-consuming, or achieving such values requires sophisticated interpretations.}, subject = {Fotovoltaik}, language = {en} } @article{MosaviShamshirbandEsmaeilbeikietal., author = {Mosavi, Amir and Shamshirband, Shahaboddin and Esmaeilbeiki, Fatemeh and Zarehaghi, Davoud and Neyshabouri, Mohammadreza and Samadianfard, Saeed and Ghorbani, Mohammad Ali and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Comparative analysis of hybrid models of firefly optimization algorithm with support vector machines and multilayer perceptron for predicting soil temperature at different depths}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, Issue 1}, doi = {10.1080/19942060.2020.1788644}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200911-42347}, pages = {939 -- 953}, abstract = {This research aims to model soil temperature (ST) using machine learning models of multilayer perceptron (MLP) algorithm and support vector machine (SVM) in hybrid form with the Firefly optimization algorithm, i.e. MLP-FFA and SVM-FFA. In the current study, measured ST and meteorological parameters of Tabriz and Ahar weather stations in a period of 2013-2015 are used for training and testing of the studied models with one and two days as a delay. To ascertain conclusive results for validation of the proposed hybrid models, the error metrics are benchmarked in an independent testing period. Moreover, Taylor diagrams utilized for that purpose. Obtained results showed that, in a case of one day delay, except in predicting ST at 5 cm below the soil surface (ST5cm) at Tabriz station, MLP-FFA produced superior results compared with MLP, SVM, and SVM-FFA models. However, for two days delay, MLP-FFA indicated increased accuracy in predicting ST5cm and ST 20cm of Tabriz station and ST10cm of Ahar station in comparison with SVM-FFA. Additionally, for all of the prescribed models, the performance of the MLP-FFA and SVM-FFA hybrid models in the testing phase was found to be meaningfully superior to the classical MLP and SVM models.}, subject = {Bodentemperatur}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} } @article{HassannatajJoloudariHassannatajJoloudariSaadatfaretal., author = {Hassannataj Joloudari, Javad and Hassannataj Joloudari, Edris and Saadatfar, Hamid and GhasemiGol, Mohammad and Razavi, Seyyed Mohammad and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Nadai, Laszlo}, title = {Coronary Artery Disease Diagnosis: Ranking the Significant Features Using a Random Trees Model}, series = {International Journal of Environmental Research and Public Health, IJERPH}, volume = {2020}, journal = {International Journal of Environmental Research and Public Health, IJERPH}, number = {Volume 17, Issue 3, 731}, publisher = {MDPI}, doi = {10.3390/ijerph17030731}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40819}, pages = {24}, abstract = {Heart disease is one of the most common diseases in middle-aged citizens. Among the vast number of heart diseases, coronary artery disease (CAD) is considered as a common cardiovascular disease with a high death rate. The most popular tool for diagnosing CAD is the use of medical imaging, e.g., angiography. However, angiography is known for being costly and also associated with a number of side effects. Hence, the purpose of this study is to increase the accuracy of coronary heart disease diagnosis through selecting significant predictive features in order of their ranking. In this study, we propose an integrated method using machine learning. The machine learning methods of random trees (RTs), decision tree of C5.0, support vector machine (SVM), and decision tree of Chi-squared automatic interaction detection (CHAID) are used in this study. The proposed method shows promising results and the study confirms that the RTs model outperforms other models.}, subject = {Maschinelles Lernen}, language = {en} } @article{KargarSamadianfardParsaetal., author = {Kargar, Katayoun and Samadianfard, Saeed and Parsa, Javad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir and Chau, Kwok-Wing}, title = {Estimating longitudinal dispersion coefficient in natural streams using empirical models and machine learning algorithms}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1712260}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40775}, pages = {311 -- 322}, abstract = {The longitudinal dispersion coefficient (LDC) plays an important role in modeling the transport of pollutants and sediment in natural rivers. As a result of transportation processes, the concentration of pollutants changes along the river. Various studies have been conducted to provide simple equations for estimating LDC. In this study, machine learning methods, namely support vector regression, Gaussian process regression, M5 model tree (M5P) and random forest, and multiple linear regression were examined in predicting the LDC in natural streams. Data sets from 60 rivers around the world with different hydraulic and geometric features were gathered to develop models for LDC estimation. Statistical criteria, including correlation coefficient (CC), root mean squared error (RMSE) and mean absolute error (MAE), were used to scrutinize the models. The LDC values estimated by these models were compared with the corresponding results of common empirical models. The Taylor chart was used to evaluate the models and the results showed that among the machine learning models, M5P had superior performance, with CC of 0.823, RMSE of 454.9 and MAE of 380.9. The model of Sahay and Dutta, with CC of 0.795, RMSE of 460.7 and MAE of 306.1, gave more precise results than the other empirical models. The main advantage of M5P models is their ability to provide practical formulae. In conclusion, the results proved that the developed M5P model with simple formulations was superior to other machine learning models and empirical models; therefore, it can be used as a proper tool for estimating the LDC in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{DehghaniSalehiMosavietal., author = {Dehghani, Majid and Salehi, Somayeh and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Ghamisi, Pedram}, title = {Spatial Analysis of Seasonal Precipitation over Iran: Co-Variation with Climate Indices}, series = {ISPRS, International Journal of Geo-Information}, volume = {2020}, journal = {ISPRS, International Journal of Geo-Information}, number = {Volume 9, Issue 2, 73}, publisher = {MDPI}, doi = {10.3390/ijgi9020073}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40740}, pages = {23}, abstract = {Temporary changes in precipitation may lead to sustained and severe drought or massive floods in different parts of the world. Knowing the variation in precipitation can effectively help the water resources decision-makers in water resources management. Large-scale circulation drivers have a considerable impact on precipitation in different parts of the world. In this research, the impact of El Ni{\~n}o-Southern Oscillation (ENSO), Pacific Decadal Oscillation (PDO), and North Atlantic Oscillation (NAO) on seasonal precipitation over Iran was investigated. For this purpose, 103 synoptic stations with at least 30 years of data were utilized. The Spearman correlation coefficient between the indices in the previous 12 months with seasonal precipitation was calculated, and the meaningful correlations were extracted. Then, the month in which each of these indices has the highest correlation with seasonal precipitation was determined. Finally, the overall amount of increase or decrease in seasonal precipitation due to each of these indices was calculated. Results indicate the Southern Oscillation Index (SOI), NAO, and PDO have the most impact on seasonal precipitation, respectively. Additionally, these indices have the highest impact on the precipitation in winter, autumn, spring, and summer, respectively. SOI has a diverse impact on winter precipitation compared to the PDO and NAO, while in the other seasons, each index has its special impact on seasonal precipitation. Generally, all indices in different phases may decrease the seasonal precipitation up to 100\%. However, the seasonal precipitation may increase more than 100\% in different seasons due to the impact of these indices. The results of this study can be used effectively in water resources management and especially in dam operation.}, subject = {Maschinelles Lernen}, language = {en} } @article{SaqlaiGhaniKhanetal., author = {Saqlai, Syed Muhammad and Ghani, Anwar and Khan, Imran and Ahmed Khan Ghayyur, Shahbaz and Shamshirband, Shahaboddin and Nabipour, Narjes and Shokri, Manouchehr}, title = {Image Analysis Using Human Body Geometry and Size Proportion Science for Action Classification}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {volume 10, issue 16, article 5453}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10165453}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200904-42322}, pages = {24}, abstract = {Gestures are one of the basic modes of human communication and are usually used to represent different actions. Automatic recognition of these actions forms the basis for solving more complex problems like human behavior analysis, video surveillance, event detection, and sign language recognition, etc. Action recognition from images is a challenging task as the key information like temporal data, object trajectory, and optical flow are not available in still images. While measuring the size of different regions of the human body i.e., step size, arms span, length of the arm, forearm, and hand, etc., provides valuable clues for identification of the human actions. In this article, a framework for classification of the human actions is presented where humans are detected and localized through faster region-convolutional neural networks followed by morphological image processing techniques. Furthermore, geometric features from human blob are extracted and incorporated into the classification rules for the six human actions i.e., standing, walking, single-hand side wave, single-hand top wave, both hands side wave, and both hands top wave. The performance of the proposed technique has been evaluated using precision, recall, omission error, and commission error. The proposed technique has been comparatively analyzed in terms of overall accuracy with existing approaches showing that it performs well in contrast to its counterparts.}, subject = {Bildanalyse}, language = {en} } @article{HarirchianLahmerKumarietal., author = {Harirchian, Ehsan and Lahmer, Tom and Kumari, Vandana and Jadhav, Kirti}, title = {Application of Support Vector Machine Modeling for the Rapid Seismic Hazard Safety Evaluation of Existing Buildings}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {volume 13, issue 13, 3340}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13133340}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200707-41915}, pages = {15}, abstract = {The economic losses from earthquakes tend to hit the national economy considerably; therefore, models that are capable of estimating the vulnerability and losses of future earthquakes are highly consequential for emergency planners with the purpose of risk mitigation. This demands a mass prioritization filtering of structures to identify vulnerable buildings for retrofitting purposes. The application of advanced structural analysis on each building to study the earthquake response is impractical due to complex calculations, long computational time, and exorbitant cost. This exhibits the need for a fast, reliable, and rapid method, commonly known as Rapid Visual Screening (RVS). The method serves as a preliminary screening platform, using an optimum number of seismic parameters of the structure and predefined output damage states. In this study, the efficacy of the Machine Learning (ML) application in damage prediction through a Support Vector Machine (SVM) model as the damage classification technique has been investigated. The developed model was trained and examined based on damage data from the 1999 D{\"u}zce Earthquake in Turkey, where the building's data consists of 22 performance modifiers that have been implemented with supervised machine learning.}, subject = {Erdbeben}, language = {en} } @article{KavrakovKareemMorgenthal, author = {Kavrakov, Igor and Kareem, Ahsan and Morgenthal, Guido}, title = {Comparison Metrics for Time-histories: Application to Bridge Aerodynamics}, doi = {10.25643/bauhaus-universitaet.4186}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200625-41863}, pages = {28}, abstract = {Wind effects can be critical for the design of lifelines such as long-span bridges. The existence of a significant number of aerodynamic force models, used to assess the performance of bridges, poses an important question regarding their comparison and validation. This study utilizes a unified set of metrics for a quantitative comparison of time-histories in bridge aerodynamics with a host of characteristics. Accordingly, nine comparison metrics are included to quantify the discrepancies in local and global signal features such as phase, time-varying frequency and magnitude content, probability density, nonstationarity and nonlinearity. Among these, seven metrics available in the literature are introduced after recasting them for time-histories associated with bridge aerodynamics. Two additional metrics are established to overcome the shortcomings of the existing metrics. The performance of the comparison metrics is first assessed using generic signals with prescribed signal features. Subsequently, the metrics are applied to a practical example from bridge aerodynamics to quantify the discrepancies in the aerodynamic forces and response based on numerical and semi-analytical aerodynamic models. In this context, it is demonstrated how a discussion based on the set of comparison metrics presented here can aid a model evaluation by offering deeper insight. The outcome of the study is intended to provide a framework for quantitative comparison and validation of aerodynamic models based on the underlying physics of fluid-structure interaction. Immediate further applications are expected for the comparison of time-histories that are simulated by data-driven approaches.}, subject = {Ingenieurwissenschaften}, language = {en} } @article{ShabaniSamadianfardSattarietal., author = {Shabani, Sevda and Samadianfard, Saeed and Sattari, Mohammad Taghi and Mosavi, Amir and Shamshirband, Shahaboddin and Kmet, Tibor and V{\´a}rkonyi-K{\´o}czy, Annam{\´a}ria R.}, title = {Modeling Pan Evaporation Using Gaussian Process Regression K-Nearest Neighbors Random Forest and Support Vector Machines; Comparative Analysis}, series = {Atmosphere}, volume = {2020}, journal = {Atmosphere}, number = {Volume 11, Issue 1, 66}, doi = {10.3390/atmos11010066}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200110-40561}, pages = {17}, abstract = {Evaporation is a very important process; it is one of the most critical factors in agricultural, hydrological, and meteorological studies. Due to the interactions of multiple climatic factors, evaporation is considered as a complex and nonlinear phenomenon to model. Thus, machine learning methods have gained popularity in this realm. In the present study, four machine learning methods of Gaussian Process Regression (GPR), K-Nearest Neighbors (KNN), Random Forest (RF) and Support Vector Regression (SVR) were used to predict the pan evaporation (PE). Meteorological data including PE, temperature (T), relative humidity (RH), wind speed (W), and sunny hours (S) collected from 2011 through 2017. The accuracy of the studied methods was determined using the statistical indices of Root Mean Squared Error (RMSE), correlation coefficient (R) and Mean Absolute Error (MAE). Furthermore, the Taylor charts utilized for evaluating the accuracy of the mentioned models. The results of this study showed that at Gonbad-e Kavus, Gorgan and Bandar Torkman stations, GPR with RMSE of 1.521 mm/day, 1.244 mm/day, and 1.254 mm/day, KNN with RMSE of 1.991 mm/day, 1.775 mm/day, and 1.577 mm/day, RF with RMSE of 1.614 mm/day, 1.337 mm/day, and 1.316 mm/day, and SVR with RMSE of 1.55 mm/day, 1.262 mm/day, and 1.275 mm/day had more appropriate performances in estimating PE values. It was found that GPR for Gonbad-e Kavus Station with input parameters of T, W and S and GPR for Gorgan and Bandar Torkmen stations with input parameters of T, RH, W and S had the most accurate predictions and were proposed for precise estimation of PE. The findings of the current study indicated that the PE values may be accurately estimated with few easily measured meteorological parameters.}, subject = {Maschinelles Lernen}, language = {en} } @article{AbbaspourGilandehMolaeeSabzietal., author = {Abbaspour-Gilandeh, Yousef and Molaee, Amir and Sabzi, Sajad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir}, title = {A Combined Method of Image Processing and Artificial Neural Network for the Identification of 13 Iranian Rice Cultivars}, series = {agronomy}, volume = {2020}, journal = {agronomy}, number = {Volume 10, Issue 1, 117}, publisher = {MDPI}, doi = {10.3390/agronomy10010117}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200123-40695}, pages = {21}, abstract = {Due to the importance of identifying crop cultivars, the advancement of accurate assessment of cultivars is considered essential. The existing methods for identifying rice cultivars are mainly time-consuming, costly, and destructive. Therefore, the development of novel methods is highly beneficial. The aim of the present research is to classify common rice cultivars in Iran based on color, morphologic, and texture properties using artificial intelligence (AI) methods. In doing so, digital images of 13 rice cultivars in Iran in three forms of paddy, brown, and white are analyzed through pre-processing and segmentation of using MATLAB. Ninety-two specificities, including 60 color, 14 morphologic, and 18 texture properties, were identified for each rice cultivar. In the next step, the normal distribution of data was evaluated, and the possibility of observing a significant difference between all specificities of cultivars was studied using variance analysis. In addition, the least significant difference (LSD) test was performed to obtain a more accurate comparison between cultivars. To reduce data dimensions and focus on the most effective components, principal component analysis (PCA) was employed. Accordingly, the accuracy of rice cultivar separations was calculated for paddy, brown rice, and white rice using discriminant analysis (DA), which was 89.2\%, 87.7\%, and 83.1\%, respectively. To identify and classify the desired cultivars, a multilayered perceptron neural network was implemented based on the most effective components. The results showed 100\% accuracy of the network in identifying and classifying all mentioned rice cultivars. Hence, it is concluded that the integrated method of image processing and pattern recognition methods, such as statistical classification and artificial neural networks, can be used for identifying and classification of rice cultivars.}, subject = {Maschinelles Lernen}, language = {en} } @article{FaroughiKarimimoshaverArametal., author = {Faroughi, Maryam and Karimimoshaver, Mehrdad and Aram, Farshid and Solgi, Ebrahim and Mosavi, Amir and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Computational modeling of land surface temperature using remote sensing data to investigate the spatial arrangement of buildings and energy consumption relationship}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {https://doi.org/10.1080/19942060.2019.1707711}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200110-40585}, pages = {254 -- 270}, abstract = {The effect of urban form on energy consumption has been the subject of various studies around the world. Having examined the effect of buildings on energy consumption, these studies indicate that the physical form of a city has a notable impact on the amount of energy consumed in its spaces. The present study identified the variables that affected energy consumption in residential buildings and analyzed their effects on energy consumption in four neighborhoods in Tehran: Apadana, Bimeh, Ekbatan-phase I, and Ekbatan-phase II. After extracting the variables, their effects are estimated with statistical methods, and the results are compared with the land surface temperature (LST) remote sensing data derived from Landsat 8 satellite images taken in the winter of 2019. The results showed that physical variables, such as the size of buildings, population density, vegetation cover, texture concentration, and surface color, have the greatest impacts on energy usage. For the Apadana neighborhood, the factors with the most potent effect on energy consumption were found to be the size of buildings and the population density. However, for other neighborhoods, in addition to these two factors, a third factor was also recognized to have a significant effect on energy consumption. This third factor for the Bimeh, Ekbatan-I, and Ekbatan-II neighborhoods was the type of buildings, texture concentration, and orientation of buildings, respectively.}, subject = {Fernerkung}, language = {en} } @article{NabipourMosaviBaghbanetal., author = {Nabipour, Narjes and Mosavi, Amir and Baghban, Alireza and Shamshirband, Shahaboddin and Felde, Imre}, title = {Extreme Learning Machine-Based Model for Solubility Estimation of Hydrocarbon Gases in Electrolyte Solutions}, series = {Processes}, volume = {2020}, journal = {Processes}, number = {Volume 8, Issue 1, 92}, publisher = {MDPI}, doi = {10.3390/pr8010092}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200113-40624}, pages = {12}, abstract = {Calculating hydrocarbon components solubility of natural gases is known as one of the important issues for operational works in petroleum and chemical engineering. In this work, a novel solubility estimation tool has been proposed for hydrocarbon gases—including methane, ethane, propane, and butane—in aqueous electrolyte solutions based on extreme learning machine (ELM) algorithm. Comparing the ELM outputs with a comprehensive real databank which has 1175 solubility points yielded R-squared values of 0.985 and 0.987 for training and testing phases respectively. Furthermore, the visual comparison of estimated and actual hydrocarbon solubility led to confirm the ability of proposed solubility model. Additionally, sensitivity analysis has been employed on the input variables of model to identify their impacts on hydrocarbon solubility. Such a comprehensive and reliable study can help engineers and scientists to successfully determine the important thermodynamic properties, which are key factors in optimizing and designing different industrial units such as refineries and petrochemical plants.}, subject = {Maschinelles Lernen}, language = {en} } @article{MeiabadiMoradiKaramimoghadametal., author = {Meiabadi, Mohammad Saleh and Moradi, Mahmoud and Karamimoghadam, Mojtaba and Ardabili, Sina and Bodaghi, Mahdi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Modeling the Producibility of 3D Printing in Polylactic Acid Using Artificial Neural Networks and Fused Filament Fabrication}, series = {polymers}, volume = {2021}, journal = {polymers}, number = {Volume 13, issue 19, article 3219}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/polym13193219}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220110-45518}, pages = {1 -- 21}, abstract = {Polylactic acid (PLA) is a highly applicable material that is used in 3D printers due to some significant features such as its deformation property and affordable cost. For improvement of the end-use quality, it is of significant importance to enhance the quality of fused filament fabrication (FFF)-printed objects in PLA. The purpose of this investigation was to boost toughness and to reduce the production cost of the FFF-printed tensile test samples with the desired part thickness. To remove the need for numerous and idle printing samples, the response surface method (RSM) was used. Statistical analysis was performed to deal with this concern by considering extruder temperature (ET), infill percentage (IP), and layer thickness (LT) as controlled factors. The artificial intelligence method of artificial neural network (ANN) and ANN-genetic algorithm (ANN-GA) were further developed to estimate the toughness, part thickness, and production-cost-dependent variables. Results were evaluated by correlation coefficient and RMSE values. According to the modeling results, ANN-GA as a hybrid machine learning (ML) technique could enhance the accuracy of modeling by about 7.5, 11.5, and 4.5\% for toughness, part thickness, and production cost, respectively, in comparison with those for the single ANN method. On the other hand, the optimization results confirm that the optimized specimen is cost-effective and able to comparatively undergo deformation, which enables the usability of printed PLA objects.}, subject = {3D-Druck}, language = {en} } @article{HahlbrockBraunHeideletal., author = {Hahlbrock, David and Braun, Michael and Heidel, Robin and Lemmen, Patrik and Boumann, Roland and Bruckmann, Tobias and Schramm, Dieter and Helm, Volker and Willmann, Jan}, title = {Cable Robotic 3D-printing: additive manufacturing on the construction site}, series = {Construction Robotics}, volume = {2022}, journal = {Construction Robotics}, publisher = {Springer International Publishing}, address = {Cham}, doi = {10.1007/s41693-022-00082-3}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230124-48791}, pages = {1 -- 14}, abstract = {This paper outlines an important step in characterizing a novel field of robotic construction research where a cable-driven parallel robot is used to extrude cementitious material in three-dimensional space, and thus offering a comprehensive new approach to computational design and construction, and to robotic fabrication at larger scales. Developed by the Faculty of Art and Design at Bauhaus-University Weimar (Germany), the faculty of Architecture at the University of Applied Sciences Dortmund (Germany) and the Chair of Mechatronics at the University of Duisburg-Essen (Germany), this approach offers unique advantages over existing additive manufacturing methods: the system is easily transportable and scalable, it does not require additional formwork or scaffolding, and it offers digital integration and informational oversight across the entire design and building process. This paper considers 1) key research components of cable robotic 3D-printing (such as computational design, material exploration, and robotic control), and 2) the integration of these parameters into a unified design and building process. The demonstration of the approach at full-scale is of particular concern.}, subject = {Robotik}, language = {en} } @article{Stadler, author = {Stadler, Max}, title = {Gr{\"u}nderzeit. Hightech und Alternativen der Wissenschaft in West-Berlin}, series = {NTM Zeitschrift f{\"u}r Geschichte der Wissenschaften, Technik und Medizin}, volume = {2022}, journal = {NTM Zeitschrift f{\"u}r Geschichte der Wissenschaften, Technik und Medizin}, number = {30 (2022)}, publisher = {Basel}, address = {Birkh{\"a}user}, doi = {10.1007/s00048-022-00352-9}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230124-48800}, pages = {599 -- 632}, abstract = {Zu den diversen Unternehmungen sozialbewegter „Gegenwissenschaft", die um 1980 auf der Bildfl{\"a}che der BRD erschienen, z{\"a}hlte der 1982 gegr{\"u}ndete Berliner Wissenschaftsladen e. V., kurz WILAB - eine Art „alternatives" Spin-off der Technischen Universit{\"a}t Berlin. Der vorliegende Beitrag situiert die Ausgr{\"u}ndung des „Ladens" im Kontext zeitgen{\"o}ssischer Fortschritte der (regionalen) Forschungs- und Technologiepolitik. Gezeigt wird, wie der deindustrialisierenden Inselstadt, qua „innovationspolitischer" Gegensteuerung, dabei sogar eine gewisse Vorreiterrolle zukam: {\"u}ber die Stadtgrenzen hinaus sichtbare Neuerungen wie die Gr{\"u}ndermesse BIG TECH oder das 1983 er{\"o}ffnete Berliner Innovations- und Gr{\"u}nderzentrum (BIG), der erste „Incubator" [sic] der BRD, etwa gingen auf das Konto der 1977/78 lancierten Technologie-Transferstelle der TU Berlin, TU-transfer. Anders gesagt: tendenziell bekam man es hier nun mit Verh{\"a}ltnissen zu tun, die immer weniger mit den Tr{\"a}umen einer „kritischen", nicht-fremdbestimmten (Gegen‑)Wissenschaft kompatibel waren. Latent kontr{\"a}r zur historiographischen Prominenz des wissenschaftskritischen Zeitgeists fristeten „alternativen" Zielsetzungen verpflichtete Unternehmungen wie „WILAB" ein relativ marginalisiertes Nischendasein. Dennoch wirft das am WILAB verfolgte, so gesehen wenig aussichtsreiche Anliegen, eine andere, n{\"a}mlich „humanere" Informationstechnologie in die Wege zu leiten, ein instruktives Licht auf die Aufbr{\"u}che „unternehmerischer" Wissenschaft in der BRD um 1980.}, subject = {Berlin }, language = {de} } @article{SoebkeLueck, author = {S{\"o}bke, Heinrich and L{\"u}ck, Andrea}, title = {Framing Algorithm-Driven Development of Sets of Objectives Using Elementary Interactions}, series = {Applied System Innovation}, volume = {2022}, journal = {Applied System Innovation}, number = {Volume 5, issue 3, article 49}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/asi5030049}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220713-46624}, pages = {1 -- 20}, abstract = {Multi-criteria decision analysis (MCDA) is an established methodology to support the decision-making of multi-objective problems. For conducting an MCDA, in most cases, a set of objectives (SOO) is required, which consists of a hierarchical structure comprised of objectives, criteria, and indicators. The development of an SOO is usually based on moderated development processes requiring high organizational and cognitive effort from all stakeholders involved. This article proposes elementary interactions as a key paradigm of an algorithm-driven development process for an SOO that requires little moderation efforts. Elementary interactions are self-contained information requests that may be answered with little cognitive effort. The pairwise comparison of elements in the well-known analytical hierarchical process (AHP) is an example of an elementary interaction. Each elementary interaction in the development process presented contributes to the stepwise development of an SOO. Based on the hypothesis that an SOO may be developed exclusively using elementary interactions (EIs), a concept for a multi-user platform is proposed. Essential components of the platform are a Model Aggregator, an Elementary Interaction Stream Generator, a Participant Manager, and a Discussion Forum. While the latter component serves the professional exchange of the participants, the first three components are intended to be automatable by algorithms. The platform concept proposed has been evaluated partly in an explorative validation study demonstrating the general functionality of the algorithms outlined. In summary, the platform concept suggested demonstrates the potential to ease SOO development processes as the platform concept does not restrict the application domain; it is intended to work with little administration moderation efforts, and it supports the further development of an existing SOO in the event of changes in external conditions. The algorithm-driven development of SOOs proposed in this article may ease the development of MCDA applications and, thus, may have a positive effect on the spread of MCDA applications.}, subject = {Multikriteria-Entscheidung}, language = {en} } @article{SchwenkeSoebkeKraft, author = {Schwenke, Nicolas and S{\"o}bke, Heinrich and Kraft, Eckhard}, title = {Potentials and Challenges of Chatbot-Supported Thesis Writing: An Autoethnography}, series = {Trends in Higher Education}, volume = {2023}, journal = {Trends in Higher Education}, number = {Volume 2, issue 4}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/higheredu2040037}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20231207-65016}, pages = {611 -- 635}, abstract = {The release of the large language model-based chatbot ChatGPT 3.5 in November 2022 has brought considerable attention to the subject of artificial intelligence, not only to the public. From the perspective of higher education, ChatGPT challenges various learning and assessment formats as it significantly reduces the effectiveness of their learning and assessment functionalities. In particular, ChatGPT might be applied to formats that require learners to generate text, such as bachelor theses or student research papers. Accordingly, the research question arises to what extent writing of bachelor theses is still a valid learning and assessment format. Correspondingly, in this exploratory study, the first author was asked to write his bachelor's thesis exploiting ChatGPT. For tracing the impact of ChatGPT methodically, an autoethnographic approach was used. First, all considerations on the potential use of ChatGPT were documented in logs, and second, all ChatGPT chats were logged. Both logs and chat histories were analyzed and are presented along with the recommendations for students regarding the use of ChatGPT suggested by a common framework. In conclusion, ChatGPT is beneficial for thesis writing during various activities, such as brainstorming, structuring, and text revision. However, there are limitations that arise, e.g., in referencing. Thus, ChatGPT requires continuous validation of the outcomes generated and thus fosters learning. Currently, ChatGPT is valued as a beneficial tool in thesis writing. However, writing a conclusive thesis still requires the learner's meaningful engagement. Accordingly, writing a thesis is still a valid learning and assessment format. With further releases of ChatGPT, an increase in capabilities is to be expected, and the research question needs to be reevaluated from time to time.}, subject = {Chatbot}, language = {en} }