@inproceedings{hohenecker-etal-2020-systematic,
title = "Systematic Comparison of Neural Architectures and Training Approaches for Open Information Extraction",
author = "Hohenecker, Patrick and
Mtumbuka, Frank and
Kocijan, Vid and
Lukasiewicz, Thomas",
editor = "Webber, Bonnie and
Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-main.690",
doi = "10.18653/v1/2020.emnlp-main.690",
pages = "8554--8565",
abstract = "The goal of open information extraction (OIE) is to extract facts from natural language text, and to represent them as structured triples of the form {\textless}subject,predicate, object{\textgreater}. For example, given the sentence {``}Beethoven composed the Ode to Joy.{''}, we are expected to extract the triple {\textless}Beethoven, composed, Ode to Joy{\textgreater}. In this work, we systematically compare different neural network architectures and training approaches, and improve the performance of the currently best models on the OIE16 benchmark (Stanovsky and Dagan, 2016) by 0.421 F1 score and 0.420 AUC-PR, respectively, in our experiments (i.e., by more than 200{\%} in both cases). Furthermore, we show that appropriate problem and loss formulations often affect the performance more than the network architecture.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hohenecker-etal-2020-systematic">
<titleInfo>
<title>Systematic Comparison of Neural Architectures and Training Approaches for Open Information Extraction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Patrick</namePart>
<namePart type="family">Hohenecker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Frank</namePart>
<namePart type="family">Mtumbuka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vid</namePart>
<namePart type="family">Kocijan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Lukasiewicz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bonnie</namePart>
<namePart type="family">Webber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The goal of open information extraction (OIE) is to extract facts from natural language text, and to represent them as structured triples of the form \textlesssubject,predicate, object\textgreater. For example, given the sentence “Beethoven composed the Ode to Joy.”, we are expected to extract the triple \textlessBeethoven, composed, Ode to Joy\textgreater. In this work, we systematically compare different neural network architectures and training approaches, and improve the performance of the currently best models on the OIE16 benchmark (Stanovsky and Dagan, 2016) by 0.421 F1 score and 0.420 AUC-PR, respectively, in our experiments (i.e., by more than 200% in both cases). Furthermore, we show that appropriate problem and loss formulations often affect the performance more than the network architecture.</abstract>
<identifier type="citekey">hohenecker-etal-2020-systematic</identifier>
<identifier type="doi">10.18653/v1/2020.emnlp-main.690</identifier>
<location>
<url>https://aclanthology.org/2020.emnlp-main.690</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>8554</start>
<end>8565</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Systematic Comparison of Neural Architectures and Training Approaches for Open Information Extraction
%A Hohenecker, Patrick
%A Mtumbuka, Frank
%A Kocijan, Vid
%A Lukasiewicz, Thomas
%Y Webber, Bonnie
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F hohenecker-etal-2020-systematic
%X The goal of open information extraction (OIE) is to extract facts from natural language text, and to represent them as structured triples of the form \textlesssubject,predicate, object\textgreater. For example, given the sentence “Beethoven composed the Ode to Joy.”, we are expected to extract the triple \textlessBeethoven, composed, Ode to Joy\textgreater. In this work, we systematically compare different neural network architectures and training approaches, and improve the performance of the currently best models on the OIE16 benchmark (Stanovsky and Dagan, 2016) by 0.421 F1 score and 0.420 AUC-PR, respectively, in our experiments (i.e., by more than 200% in both cases). Furthermore, we show that appropriate problem and loss formulations often affect the performance more than the network architecture.
%R 10.18653/v1/2020.emnlp-main.690
%U https://aclanthology.org/2020.emnlp-main.690
%U https://doi.org/10.18653/v1/2020.emnlp-main.690
%P 8554-8565
Markdown (Informal)
[Systematic Comparison of Neural Architectures and Training Approaches for Open Information Extraction](https://aclanthology.org/2020.emnlp-main.690) (Hohenecker et al., EMNLP 2020)
ACL