data
dict |
---|
{
"proceeding": {
"id": "12OmNwGIcBY",
"title": "2017 IEEE International Conference on Data Mining Workshops (ICDMW)",
"acronym": "icdmw",
"groupId": "1001620",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvFHfGd",
"doi": "10.1109/ICDMW.2017.154",
"title": "Combining Active Learning and Semi-Supervised Learning by Using Selective Label Spreading",
"normalizedTitle": "Combining Active Learning and Semi-Supervised Learning by Using Selective Label Spreading",
"abstract": "In the literature, a number of methods have been proposed for semi-supervised learning. Recently, graph-based methods of semi-supervised learning have become popular because of their capability of handling large amounts of unlabeled data. However, the existing graph based semi-supervised learning algorithms do not optimize the process of selecting better labeled data. We have developed a new selective semi-supervised learning algorithm, called selective label spreading (SLS) by integrating the active learning model into the label spreading framework. SLS optimizes the process of selecting better labeled data in order to improve classification performance. We applied SLS to the well-known hand-written digits recognition data set and demonstrated that SLS can improve the classification performance. The selective label spreading scheme requires a much smaller number of queries to achieve high accuracy compared with random query selection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the literature, a number of methods have been proposed for semi-supervised learning. Recently, graph-based methods of semi-supervised learning have become popular because of their capability of handling large amounts of unlabeled data. However, the existing graph based semi-supervised learning algorithms do not optimize the process of selecting better labeled data. We have developed a new selective semi-supervised learning algorithm, called selective label spreading (SLS) by integrating the active learning model into the label spreading framework. SLS optimizes the process of selecting better labeled data in order to improve classification performance. We applied SLS to the well-known hand-written digits recognition data set and demonstrated that SLS can improve the classification performance. The selective label spreading scheme requires a much smaller number of queries to achieve high accuracy compared with random query selection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the literature, a number of methods have been proposed for semi-supervised learning. Recently, graph-based methods of semi-supervised learning have become popular because of their capability of handling large amounts of unlabeled data. However, the existing graph based semi-supervised learning algorithms do not optimize the process of selecting better labeled data. We have developed a new selective semi-supervised learning algorithm, called selective label spreading (SLS) by integrating the active learning model into the label spreading framework. SLS optimizes the process of selecting better labeled data in order to improve classification performance. We applied SLS to the well-known hand-written digits recognition data set and demonstrated that SLS can improve the classification performance. The selective label spreading scheme requires a much smaller number of queries to achieve high accuracy compared with random query selection.",
"fno": "3800a850",
"keywords": [
"Learning Artificial Intelligence",
"Pattern Classification",
"SLS",
"Selective Label Spreading Scheme",
"Selective Semisupervised Learning Algorithm",
"Active Learning Model",
"Labeled Data",
"Classification Performance",
"Semisupervised Learning",
"Training",
"Training Data",
"Biomedical Imaging",
"Uncertainty",
"Predictive Models",
"Labeling",
"Selection",
"Label Distribution",
"Graph",
"Classification"
],
"authors": [
{
"affiliation": null,
"fullName": "Xu Chen",
"givenName": "Xu",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tao Wang",
"givenName": "Tao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdmw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-11-01T00:00:00",
"pubType": "proceedings",
"pages": "850-857",
"year": "2017",
"issn": "2375-9259",
"isbn": "978-1-5386-3800-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3800a842",
"articleId": "12OmNrMZpzq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3800a858",
"articleId": "12OmNwErpXy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bracis/2015/0016/0/0016a049",
"title": "Semi-supervised Multi-label k-Nearest Neighbors Classification Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2015/0016a049/12OmNBqMDEK",
"parentPublication": {
"id": "proceedings/bracis/2015/0016/0",
"title": "2015 Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2016/5910/0/07836804",
"title": "A Semi-Supervised Ensemble Approach for Multi-label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2016/07836804/12OmNrJAefD",
"parentPublication": {
"id": "proceedings/icdmw/2016/5910/0",
"title": "2016 IEEE 16th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmla/2016/6167/0/07838223",
"title": "Semi-Supervised Learning with Bidirectional Adaptive Pairwise Encoding",
"doi": null,
"abstractUrl": "/proceedings-article/icmla/2016/07838223/12OmNxFaLCC",
"parentPublication": {
"id": "proceedings/icmla/2016/6167/0",
"title": "2016 15th IEEE International Conference on Machine Learning and Applications (ICMLA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a392",
"title": "Semi-Supervised Learning with Interactive Label Propagation Guided by Feature Space Projections",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a392/17D45WaTkgw",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08257908",
"title": "Robust multi-label semi-supervised classification",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08257908/17D45Wuc32u",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500c061",
"title": "HierMatch: Leveraging Label Hierarchies for Improving Semi-Supervised Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500c061/1B13aSvRocM",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a794",
"title": "Semi-supervised Medical Image Classification Combining Metric Pseudo-Label and Classification Pseudo-Label",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a794/1FUUuSzCHzq",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e452",
"title": "Semi-Supervised Learning for Few-Shot Image-to-Image Translation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e452/1m3okMEuJbi",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412747",
"title": "Towards Robust Learning with Different Label Noise Distributions",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412747/1tmiyKL1FzW",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900c697",
"title": "Contrastive Learning Improves Model Robustness Under Label Noise",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900c697/1yXsFPEbHGw",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "18j8Ecq0jn2",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "18j8NGRjKve",
"doi": "10.1109/WACV.2019.00190",
"title": "IDD: A Dataset for Exploring Problems of Autonomous Navigation in Unconstrained Environments",
"normalizedTitle": "IDD: A Dataset for Exploring Problems of Autonomous Navigation in Unconstrained Environments",
"abstract": "While several datasets for autonomous navigation have become available in recent years, they have tended to focus on structured driving environments. This usually corresponds to well-delineated infrastructure such as lanes, a small number of well-defined categories for traffic participants, low variation in object or background appearance and strong adherence to traffic rules. We propose DS, a novel dataset for road scene understanding in unstructured environments where the above assumptions are largely not satisfied. It consists of 10,004 images, finely annotated with 34 classes collected from 182 drive sequences on Indian roads. The label set is expanded in comparison to popular benchmarks such as Cityscapes, to account for new classes. It also reflects label distributions of road scenes significantly different from existing datasets, with most classes displaying greater within-class diversity. Consistent with real driving behaviors, it also identifies new classes such as drivable areas besides the road. We propose a new four-level label hierarchy, which allows varying degrees of complexity and opens up possibilities for new training methods. Our empirical study provides an in-depth analysis of the label characteristics. State-of-the-art methods for semantic segmentation achieve much lower accuracies on our dataset, demonstrating its distinction compared to Cityscapes. Finally, we propose that our dataset is an ideal opportunity for new problems such as domain adaptation, few-shot learning and behavior prediction in road scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While several datasets for autonomous navigation have become available in recent years, they have tended to focus on structured driving environments. This usually corresponds to well-delineated infrastructure such as lanes, a small number of well-defined categories for traffic participants, low variation in object or background appearance and strong adherence to traffic rules. We propose DS, a novel dataset for road scene understanding in unstructured environments where the above assumptions are largely not satisfied. It consists of 10,004 images, finely annotated with 34 classes collected from 182 drive sequences on Indian roads. The label set is expanded in comparison to popular benchmarks such as Cityscapes, to account for new classes. It also reflects label distributions of road scenes significantly different from existing datasets, with most classes displaying greater within-class diversity. Consistent with real driving behaviors, it also identifies new classes such as drivable areas besides the road. We propose a new four-level label hierarchy, which allows varying degrees of complexity and opens up possibilities for new training methods. Our empirical study provides an in-depth analysis of the label characteristics. State-of-the-art methods for semantic segmentation achieve much lower accuracies on our dataset, demonstrating its distinction compared to Cityscapes. Finally, we propose that our dataset is an ideal opportunity for new problems such as domain adaptation, few-shot learning and behavior prediction in road scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While several datasets for autonomous navigation have become available in recent years, they have tended to focus on structured driving environments. This usually corresponds to well-delineated infrastructure such as lanes, a small number of well-defined categories for traffic participants, low variation in object or background appearance and strong adherence to traffic rules. We propose DS, a novel dataset for road scene understanding in unstructured environments where the above assumptions are largely not satisfied. It consists of 10,004 images, finely annotated with 34 classes collected from 182 drive sequences on Indian roads. The label set is expanded in comparison to popular benchmarks such as Cityscapes, to account for new classes. It also reflects label distributions of road scenes significantly different from existing datasets, with most classes displaying greater within-class diversity. Consistent with real driving behaviors, it also identifies new classes such as drivable areas besides the road. We propose a new four-level label hierarchy, which allows varying degrees of complexity and opens up possibilities for new training methods. Our empirical study provides an in-depth analysis of the label characteristics. State-of-the-art methods for semantic segmentation achieve much lower accuracies on our dataset, demonstrating its distinction compared to Cityscapes. Finally, we propose that our dataset is an ideal opportunity for new problems such as domain adaptation, few-shot learning and behavior prediction in road scenes.",
"fno": "197500b743",
"keywords": [
"Image Segmentation",
"Learning Artificial Intelligence",
"Mobile Robots",
"Path Planning",
"Road Traffic",
"Robot Vision",
"Traffic Engineering Computing",
"Autonomous Navigation",
"Unconstrained Environments",
"Structured Driving Environments",
"Traffic Participants",
"Traffic Rules",
"Unstructured Environments",
"Indian Roads",
"Road Scenes",
"Driving Behaviors",
"Four Level Label Hierarchy",
"Cityscapes",
"Few Shot Learning",
"Behavior Prediction",
"Semantic Segmentation",
"Roads",
"Autonomous Robots",
"Semantics",
"Automobiles",
"Urban Areas",
"Animals",
"Motorcycles"
],
"authors": [
{
"affiliation": null,
"fullName": "Girish Varma",
"givenName": "Girish",
"surname": "Varma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Anbumani Subramanian",
"givenName": "Anbumani",
"surname": "Subramanian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Anoop Namboodiri",
"givenName": "Anoop",
"surname": "Namboodiri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Manmohan Chandraker",
"givenName": "Manmohan",
"surname": "Chandraker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "C.V. Jawahar",
"givenName": "C.V.",
"surname": "Jawahar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1743-1751",
"year": "2019",
"issn": "1550-5790",
"isbn": "978-1-7281-1975-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "197500b734",
"articleId": "18j8MdG6BYk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "197500b752",
"articleId": "18j8MqOcb1m",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851d213",
"title": "The Cityscapes Dataset for Semantic Urban Scene Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d213/12OmNAtaS0N",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032f000",
"title": "The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032f000/12OmNrJAdN1",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fit/2015/9666/0/9666a178",
"title": "Augmenting Autonomous Vehicular Communication Using the Appreciation Emotion: A Mamdani Fuzzy Inference System Model",
"doi": null,
"abstractUrl": "/proceedings-article/fit/2015/9666a178/12OmNrJRPjl",
"parentPublication": {
"id": "proceedings/fit/2015/9666/0",
"title": "2015 13th International Conference on Frontiers of Information Technology (FIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09712346",
"title": "ROAD: The Road Event Awareness Dataset for Autonomous Driving",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09712346/1AZL0P4dL1e",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/8.739E307",
"title": "Detecting, Tracking and Counting Motorcycle Rider Traffic Violations on Unconstrained Roads",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/8.739E307/1G57l0uwq88",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600t9001",
"title": "Animal Kingdom: A Large and Diverse Dataset for Animal Behavior Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600t9001/1H0LAZjYlTG",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600e471",
"title": "IDD-3D: Indian Driving Dataset for 3D Unstructured Road Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600e471/1KxUljqhQdi",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600b221",
"title": "RailSem19: A Dataset for Semantic Rail Scene Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600b221/1iTvhBXKHJe",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800l1618",
"title": "nuScenes: A Multimodal Dataset for Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800l1618/1m3nGHQO3HW",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2563",
"title": "Severity-Aware Semantic Segmentation With Reinforced Wasserstein Training",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2563/1m3olQaZMME",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1AjSWMljRoA",
"title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"acronym": "ase",
"groupId": "1000064",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1AjTfGOSCwU",
"doi": "10.1109/ASE51524.2021.9678638",
"title": "Unsupervised Labeling and Extraction of Phrase-based Concepts in Vulnerability Descriptions",
"normalizedTitle": "Unsupervised Labeling and Extraction of Phrase-based Concepts in Vulnerability Descriptions",
"abstract": "People usually describe the key characteristics of software vulnerabilities in natural language mixed with domain-specific names and concepts. This textual nature poses a significant challenge for the automatic analysis of vulnerabilities. Automatic extraction of key vulnerability aspects is highly desirable but demands significant effort to manually label data for model training. In this paper, we propose an unsupervised approach to label and extract important vulnerability concepts in textural vulnerability descriptions (TVDs). We focus on three types of phrase-based vulnerability concepts (root cause, attack vector, and impact) as they are much more difficult to label and extract than name- or number-based entities (i.e., vendor, product, and version). Our approach is based on a key observation that the same-type of phrases, no matter how they differ in sentence structures and phrase expressions, usually share syntactically similar paths in the sentence parsing trees. Therefore, we propose two path representations (absolute paths and relative paths) and use an auto-encoder to encode such syntactic similarities. To address the discrete nature of our paths, we enhance traditional Variational Auto-encoder (VAE) with Gumble-Max trick for categorical data distribution, and thus creates a Categorical VAE (CaVAE). In the latent space of absolute and relative paths, we further use FIt-TSNE and clustering techniques to generate clusters of the same-type of concepts. Our evaluation confirms the effectiveness of our CaVAE for encoding path representations and the accuracy of vulnerability concepts in the resulting clusters. In a concept classification task, our unsupervisedly labeled vulnerability concepts outperform the two manually labeled datasets from previous work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "People usually describe the key characteristics of software vulnerabilities in natural language mixed with domain-specific names and concepts. This textual nature poses a significant challenge for the automatic analysis of vulnerabilities. Automatic extraction of key vulnerability aspects is highly desirable but demands significant effort to manually label data for model training. In this paper, we propose an unsupervised approach to label and extract important vulnerability concepts in textural vulnerability descriptions (TVDs). We focus on three types of phrase-based vulnerability concepts (root cause, attack vector, and impact) as they are much more difficult to label and extract than name- or number-based entities (i.e., vendor, product, and version). Our approach is based on a key observation that the same-type of phrases, no matter how they differ in sentence structures and phrase expressions, usually share syntactically similar paths in the sentence parsing trees. Therefore, we propose two path representations (absolute paths and relative paths) and use an auto-encoder to encode such syntactic similarities. To address the discrete nature of our paths, we enhance traditional Variational Auto-encoder (VAE) with Gumble-Max trick for categorical data distribution, and thus creates a Categorical VAE (CaVAE). In the latent space of absolute and relative paths, we further use FIt-TSNE and clustering techniques to generate clusters of the same-type of concepts. Our evaluation confirms the effectiveness of our CaVAE for encoding path representations and the accuracy of vulnerability concepts in the resulting clusters. In a concept classification task, our unsupervisedly labeled vulnerability concepts outperform the two manually labeled datasets from previous work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "People usually describe the key characteristics of software vulnerabilities in natural language mixed with domain-specific names and concepts. This textual nature poses a significant challenge for the automatic analysis of vulnerabilities. Automatic extraction of key vulnerability aspects is highly desirable but demands significant effort to manually label data for model training. In this paper, we propose an unsupervised approach to label and extract important vulnerability concepts in textural vulnerability descriptions (TVDs). We focus on three types of phrase-based vulnerability concepts (root cause, attack vector, and impact) as they are much more difficult to label and extract than name- or number-based entities (i.e., vendor, product, and version). Our approach is based on a key observation that the same-type of phrases, no matter how they differ in sentence structures and phrase expressions, usually share syntactically similar paths in the sentence parsing trees. Therefore, we propose two path representations (absolute paths and relative paths) and use an auto-encoder to encode such syntactic similarities. To address the discrete nature of our paths, we enhance traditional Variational Auto-encoder (VAE) with Gumble-Max trick for categorical data distribution, and thus creates a Categorical VAE (CaVAE). In the latent space of absolute and relative paths, we further use FIt-TSNE and clustering techniques to generate clusters of the same-type of concepts. Our evaluation confirms the effectiveness of our CaVAE for encoding path representations and the accuracy of vulnerability concepts in the resulting clusters. In a concept classification task, our unsupervisedly labeled vulnerability concepts outperform the two manually labeled datasets from previous work.",
"fno": "033700a943",
"keywords": [
"Training",
"Natural Languages",
"Training Data",
"Machine Learning",
"Syntactics",
"Software",
"Labeling",
"Textual Vulnerability Descriptions",
"Vulnerability Concepts",
"Unsupervised Representation Learning",
"Concept Labeling And Extraction"
],
"authors": [
{
"affiliation": "College of Intelligence and Computing Tianjin University,Tianjin,China",
"fullName": "Sofonias Yitagesu",
"givenName": "Sofonias",
"surname": "Yitagesu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research School of Computer Science Australian National University, Data61 CSIRO,Australia",
"fullName": "Zhenchang Xing",
"givenName": "Zhenchang",
"surname": "Xing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Intelligence and Computing Tianjin University,Tianjin,China",
"fullName": "Xiaowang Zhang",
"givenName": "Xiaowang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Intelligence and Computing Tianjin University,Tianjin,China",
"fullName": "Zhiyong Feng",
"givenName": "Zhiyong",
"surname": "Feng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Intelligence and Computing Tianjin University,Tianjin,China",
"fullName": "Xiaohong Li",
"givenName": "Xiaohong",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Intelligence and Computing Tianjin University,Tianjin,China",
"fullName": "Linyi Han",
"givenName": "Linyi",
"surname": "Han",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ase",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "943-954",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0337-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "033700a930",
"articleId": "1AjT4JzUKFa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "033700a955",
"articleId": "1AjT1v40WRy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2016/5473/0/07837858",
"title": "Learning Hierarchically Decomposable Concepts with Active Over-Labeling",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2016/07837858/12OmNCdk2PG",
"parentPublication": {
"id": "proceedings/icdm/2016/5473/0",
"title": "2016 IEEE 16th International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2013/3142/0/3143b161",
"title": "Rapidly Labeling and Tracking Dynamically Evolving Concepts in Data Streams",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2013/3143b161/12OmNqBKUcA",
"parentPublication": {
"id": "proceedings/icdmw/2013/3142/0",
"title": "2013 IEEE 13th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a905",
"title": "Video Event Understanding Using Natural Language Descriptions",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a905/12OmNvwkumz",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2009/3836/1/3836a318",
"title": "A New Labeling Scheme without Re-labeling Using Circular Concepts for Dynamic XML Data",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2009/3836a318/12OmNyqzLVV",
"parentPublication": {
"id": "proceedings/cit/2009/3836/1",
"title": "2009 Ninth IEEE International Conference on Computer and Information Technology. CIT 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/spire/2001/1192/0/00989766",
"title": "Semantic labeling - unveiling the main components of meaning of free-text",
"doi": null,
"abstractUrl": "/proceedings-article/spire/2001/00989766/12OmNzkuKFY",
"parentPublication": {
"id": "proceedings/spire/2001/1192/0",
"title": "String Processing and Information Retrieval, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2018/9159/0/08594936",
"title": "Estimating Latent Relative Labeling Importances for Multi-label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2018/08594936/17D45WrVgdl",
"parentPublication": {
"id": "proceedings/icdm/2018/9159/0",
"title": "2018 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2022/6603/0/660300a164",
"title": "Automatic labeling of the elements of a vulnerability report CVE with NLP",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2022/660300a164/1GvdRLeLQJ2",
"parentPublication": {
"id": "proceedings/iri/2022/6603/0",
"title": "2022 IEEE 23rd International Conference on Information Reuse and Integration for Data Science (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issrew/2019/5138/0/513800a270",
"title": "The Challenges of Labeling Vulnerability-Contributing Commits",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2019/513800a270/1hrL2gmwyS4",
"parentPublication": {
"id": "proceedings/issrew/2019/5138/0",
"title": "2019 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102716",
"title": "Phrase-Level Global-Local Hybrid Model For Sentence Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102716/1kwr8mEV5eg",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msr/2021/8710/0/871000a029",
"title": "Automatic Part-of-Speech Tagging for Security Vulnerability Descriptions",
"doi": null,
"abstractUrl": "/proceedings-article/msr/2021/871000a029/1tB7j0Nw4DK",
"parentPublication": {
"id": "proceedings/msr/2021/8710/0/",
"title": "2021 IEEE/ACM 18th International Conference on Mining Software Repositories (MSR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqIhFPn",
"title": "2014 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"acronym": "aipr",
"groupId": "1000046",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNA14Aip",
"doi": "10.1109/AIPR.2014.7041912",
"title": "High dynamic range (HDR) video processing for the exploitation of high bit-depth sensors in human-monitored surveillance",
"normalizedTitle": "High dynamic range (HDR) video processing for the exploitation of high bit-depth sensors in human-monitored surveillance",
"abstract": "High bit-depth video data is becoming more common in imaging and remote sensing because higher bit-depth cameras are becoming more affordable. Displays often represent images in lower bit-depths, and human vision is not able to completely exploit this additional information in its native form. These problems are addressed with High Dynamic Range (HDR) tone mapping, which nonlinearly maps lightness levels from a high bit-depth image into a lower bit-depth representation in a way that attempts to retain and accentuate the maximum amount of useful information therein. We have adapted the well-known Contrast Limited Adaptive Histogram Equalization (CLAHE) algorithm into the application of HDR video tone mapping by using time-adaptive local histogram transformations. In addition to lightness contrast, we use the transformations in the L*a*b* color space to amplify color contrast in the video stream. The transformed HDR video data maintains important details in local contrast while maintaining relative lightness levels locally through time. Our results show that time-adapted HDR tone mapping methods can be used in real-time video processing to store and display HDR data in low bit-depth formats with less loss of useful information compared to simple truncation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "High bit-depth video data is becoming more common in imaging and remote sensing because higher bit-depth cameras are becoming more affordable. Displays often represent images in lower bit-depths, and human vision is not able to completely exploit this additional information in its native form. These problems are addressed with High Dynamic Range (HDR) tone mapping, which nonlinearly maps lightness levels from a high bit-depth image into a lower bit-depth representation in a way that attempts to retain and accentuate the maximum amount of useful information therein. We have adapted the well-known Contrast Limited Adaptive Histogram Equalization (CLAHE) algorithm into the application of HDR video tone mapping by using time-adaptive local histogram transformations. In addition to lightness contrast, we use the transformations in the L*a*b* color space to amplify color contrast in the video stream. The transformed HDR video data maintains important details in local contrast while maintaining relative lightness levels locally through time. Our results show that time-adapted HDR tone mapping methods can be used in real-time video processing to store and display HDR data in low bit-depth formats with less loss of useful information compared to simple truncation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High bit-depth video data is becoming more common in imaging and remote sensing because higher bit-depth cameras are becoming more affordable. Displays often represent images in lower bit-depths, and human vision is not able to completely exploit this additional information in its native form. These problems are addressed with High Dynamic Range (HDR) tone mapping, which nonlinearly maps lightness levels from a high bit-depth image into a lower bit-depth representation in a way that attempts to retain and accentuate the maximum amount of useful information therein. We have adapted the well-known Contrast Limited Adaptive Histogram Equalization (CLAHE) algorithm into the application of HDR video tone mapping by using time-adaptive local histogram transformations. In addition to lightness contrast, we use the transformations in the L*a*b* color space to amplify color contrast in the video stream. The transformed HDR video data maintains important details in local contrast while maintaining relative lightness levels locally through time. Our results show that time-adapted HDR tone mapping methods can be used in real-time video processing to store and display HDR data in low bit-depth formats with less loss of useful information compared to simple truncation.",
"fno": "07041912",
"keywords": [
"Adaptive Equalisers",
"Image Colour Analysis",
"Image Representation",
"Transforms",
"Video Signal Processing",
"Video Surveillance",
"High Bit Depth Video Data",
"Human Vision",
"High Dynamic Range Tone Mapping",
"High Bit Depth Image",
"Real Time Video Processing",
"Time Adapted HDR Tone Mapping Methods",
"Relative Lightness Levels",
"Local Contrast",
"Video Stream",
"Color Contrast",
"Time Adaptive Local Histogram Transformations",
"HDR Video Tone Mapping",
"CLAHE Algorithm",
"Contrast Limited Adaptive Histogram Equalization Algorithm",
"Lower Bit Depth Representation",
"Streaming Media",
"Image Color Analysis",
"Histograms",
"Interpolation",
"Dynamic Range",
"Imaging",
"Surveillance",
"HDR",
"10 Bit Video",
"Bit Depth",
"CLAHE",
"EWMA",
"Remote Sensing",
"Surveillance"
],
"authors": [
{
"affiliation": "Imaging Systems and Processing Department, The Applied Research Laboratory at the Pennsylvania State University, University Park, PA",
"fullName": "Donald J. Natale",
"givenName": "Donald J.",
"surname": "Natale",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imaging Systems and Processing Department, The Applied Research Laboratory at the Pennsylvania State University, University Park, PA",
"fullName": "Matthew S. Baran",
"givenName": "Matthew S.",
"surname": "Baran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imaging Systems and Processing Department, The Applied Research Laboratory at the Pennsylvania State University, University Park, PA",
"fullName": "Richard L. Tutwiler",
"givenName": "Richard L.",
"surname": "Tutwiler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": "1550-5219",
"isbn": "978-1-4799-5921-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07041911",
"articleId": "12OmNzTH15s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07041913",
"articleId": "12OmNwBjP5r",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2008/3381/0/3381a256",
"title": "Real-Time Tone Mapping for High-Resolution HDR Images",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/3381a256/12OmNAlNiC2",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2015/6759/0/07301364",
"title": "A real-time high dynamic range HD video camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2015/07301364/12OmNrYlmMt",
"parentPublication": {
"id": "proceedings/cvprw/2015/6759/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890713",
"title": "HDR2014 - A high dynamic range image quality database",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890713/12OmNwDSdsg",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607639",
"title": "Image characteristic oriented tone mapping for high dynamic range images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607639/12OmNxWcHab",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/1/07294799",
"title": "Tone mapping for single-shot HDR imaging",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07294799/12OmNyL0Tml",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/1",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2017/4662/0/08388632",
"title": "Revisited histogram equalization as HDR images tone mapping operators",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2017/08388632/12OmNz2kqpY",
"parentPublication": {
"id": "proceedings/isspit/2017/4662/0",
"title": "2017 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551531",
"title": "Premium Hdr: The Impact of a Singleword on the Quality of Experience of Hdr Video",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551531/17D45VtKivb",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b798",
"title": "Image Correction via Deep Reciprocating HDR Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b798/17D45XERmmf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2022/5824/0/582400a729",
"title": "Joint Multi-Scale Tone Mapping and Denoising for HDR Image Enhancement",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2022/582400a729/1B12qQ9zLfG",
"parentPublication": {
"id": "proceedings/wacvw/2022/5824/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2019/0801/0/08940155",
"title": "HDR Video Production Process Based on Optical Principle",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2019/08940155/1gjROPTQtxu",
"parentPublication": {
"id": "proceedings/icis/2019/0801/0",
"title": "2019 IEEE/ACIS 18th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": null,
"article": {
"id": "12OmNCctfaE",
"doi": "10.1109/CSIE.2009.627",
"title": "Color Vision Based High Dynamic Range Images Rendering",
"normalizedTitle": "Color Vision Based High Dynamic Range Images Rendering",
"abstract": "The algorithms, mapping the high dynamic range images to the low dynamic range images, have been described in many literatures. Especially, tone mapping operators are the typical algorithms which are designed to produce visibility and the overall impression of brightness, contrast and color of HDR images onto relative LDR displays and printers. But the results of these algorithms did not match these of the psychophysical experiments based on Human Visual System. The mismatches were specially presented on color, so Kuang al. et proposed a display rendering algorithm based on color appearance models to improve the color rendering. In our testing experiments of the algorithm of Kuang al. et., it found that the colors of the rendering images shifted to the blue and color leakage also happened. In this paper, we proposed a HDR rendering algorithm based on color vision of the human to resolve the issues of the algorithm of Kuang al. et.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The algorithms, mapping the high dynamic range images to the low dynamic range images, have been described in many literatures. Especially, tone mapping operators are the typical algorithms which are designed to produce visibility and the overall impression of brightness, contrast and color of HDR images onto relative LDR displays and printers. But the results of these algorithms did not match these of the psychophysical experiments based on Human Visual System. The mismatches were specially presented on color, so Kuang al. et proposed a display rendering algorithm based on color appearance models to improve the color rendering. In our testing experiments of the algorithm of Kuang al. et., it found that the colors of the rendering images shifted to the blue and color leakage also happened. In this paper, we proposed a HDR rendering algorithm based on color vision of the human to resolve the issues of the algorithm of Kuang al. et.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The algorithms, mapping the high dynamic range images to the low dynamic range images, have been described in many literatures. Especially, tone mapping operators are the typical algorithms which are designed to produce visibility and the overall impression of brightness, contrast and color of HDR images onto relative LDR displays and printers. But the results of these algorithms did not match these of the psychophysical experiments based on Human Visual System. The mismatches were specially presented on color, so Kuang al. et proposed a display rendering algorithm based on color appearance models to improve the color rendering. In our testing experiments of the algorithm of Kuang al. et., it found that the colors of the rendering images shifted to the blue and color leakage also happened. In this paper, we proposed a HDR rendering algorithm based on color vision of the human to resolve the issues of the algorithm of Kuang al. et.",
"fno": "3507f583",
"keywords": [
"High Dynamic Range",
"Rendering",
"Color Vision Color Leakage"
],
"authors": [
{
"affiliation": null,
"fullName": "Wan Xiaoxia",
"givenName": "Wan",
"surname": "Xiaoxia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xie Dehong",
"givenName": "Xie",
"surname": "Dehong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "csie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-03-01T00:00:00",
"pubType": "proceedings",
"pages": "583-587",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3507-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3507f578",
"articleId": "12OmNzdoN77",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3507f588",
"articleId": "12OmNvDI3X9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130326",
"title": "Color correction using rotation matrix for HDR rendering in iCAM06",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130326/12OmNCcKQK0",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2016/0806/0/07550918",
"title": "A mosaic style rendering method based on fuzzy color modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2016/07550918/12OmNrAMF1Y",
"parentPublication": {
"id": "proceedings/icis/2016/0806/0",
"title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571171",
"title": "Molecular Rendering with Medieval and Renaissance Color Theory",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571171/12OmNxj23hk",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109c676",
"title": "Noise-Insensitive Contrast Enhancement for Rendering High-Dynamic-Range Images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109c676/12OmNyNQSO8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770269",
"title": "Two-Channel Technique for High Dynamic Range Image Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770269/12OmNyo1nMX",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2008/3278/0/3278b305",
"title": "A New Probabilistic Visual Secret Sharing Scheme for Color Images",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2008/3278b305/12OmNyxXlnK",
"parentPublication": {
"id": "proceedings/iih-msp/2008/3278/0",
"title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0235",
"title": "Image-Based Color Ink Diffusion Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0235/13rRUxASuSC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2018/9120/0/08612851",
"title": "Color Based HDR Image Retrieval using HSV Histogram and Color Moments",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2018/08612851/17D45WGGoLh",
"parentPublication": {
"id": "proceedings/aiccsa/2018/9120/0",
"title": "2018 IEEE/ACS 15th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09887904",
"title": "Fast and Accurate Illumination Estimation Using LDR Panoramic Images for Realistic Rendering",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09887904/1GBRnHyZ1bW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10036136",
"title": "Hybrid High Dynamic Range Imaging fusing Neuromorphic and Conventional Images",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10036136/1KsSqwx1dhC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNynsbwy",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"acronym": "iccp",
"groupId": "1800125",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCuDzub",
"doi": "10.1109/ICCPHOT.2009.5559003",
"title": "Artifact-free High Dynamic Range imaging",
"normalizedTitle": "Artifact-free High Dynamic Range imaging",
"abstract": "The contrast in real world scenes is often beyond what consumer cameras can capture. For these situations, High Dynamic Range (HDR) images can be generated by taking multiple exposures of the same scene. When fusing information from different images, however, the slightest change in the scene can generate artifacts which dramatically limit the potential of this solution. We present a technique capable of dealing with a large amount of movement in the scene: we find, in all the available exposures, patches consistent with a reference image previously selected from the stack. We generate the HDR image by averaging the radiance estimates of all such regions and we compensate for camera calibration errors by removing potential seams. We show that our method works even in cases when many moving objects cover large regions of the scene.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The contrast in real world scenes is often beyond what consumer cameras can capture. For these situations, High Dynamic Range (HDR) images can be generated by taking multiple exposures of the same scene. When fusing information from different images, however, the slightest change in the scene can generate artifacts which dramatically limit the potential of this solution. We present a technique capable of dealing with a large amount of movement in the scene: we find, in all the available exposures, patches consistent with a reference image previously selected from the stack. We generate the HDR image by averaging the radiance estimates of all such regions and we compensate for camera calibration errors by removing potential seams. We show that our method works even in cases when many moving objects cover large regions of the scene.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The contrast in real world scenes is often beyond what consumer cameras can capture. For these situations, High Dynamic Range (HDR) images can be generated by taking multiple exposures of the same scene. When fusing information from different images, however, the slightest change in the scene can generate artifacts which dramatically limit the potential of this solution. We present a technique capable of dealing with a large amount of movement in the scene: we find, in all the available exposures, patches consistent with a reference image previously selected from the stack. We generate the HDR image by averaging the radiance estimates of all such regions and we compensate for camera calibration errors by removing potential seams. We show that our method works even in cases when many moving objects cover large regions of the scene.",
"fno": "05559003",
"keywords": [
"Image Colour Analysis",
"Image Motion Analysis",
"Image Recognition",
"Dynamic Range Imaging",
"Consumer Cameras",
"High Dynamic Range",
"HDR",
"Image Reference",
"Camera Calibration Errors",
"Pixel",
"Cameras",
"Dynamic Range",
"Image Color Analysis",
"Estimation",
"Noise"
],
"authors": [
{
"affiliation": "University of California, Santa Cruz, USA",
"fullName": "Orazio Gallo",
"givenName": "Orazio",
"surname": "Gallo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nokia Research Center, Palo Alto, USA",
"fullName": "Natasha Gelfandz",
"givenName": "Natasha",
"surname": "Gelfandz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nokia Research Center, Palo Alto, USA",
"fullName": "Wei-Chao Chen",
"givenName": null,
"surname": "Wei-Chao Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nokia Research Center, Palo Alto, USA",
"fullName": "Marius Tico",
"givenName": "Marius",
"surname": "Tico",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nokia Research Center, Palo Alto, USA",
"fullName": "Kari Pulli",
"givenName": "Kari",
"surname": "Pulli",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2009",
"issn": null,
"isbn": "978-1-4244-4534-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05559006",
"articleId": "12OmNyXMQ9U",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2004/2158/2/01315160",
"title": "Probability models for high dynamic range imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315160/12OmNAo45P9",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/2",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2014/5188/0/06831807",
"title": "Single shot high dynamic range imaging using piecewise linear estimators",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2014/06831807/12OmNB1eJxy",
"parentPublication": {
"id": "proceedings/iccp/2014/5188/0",
"title": "2014 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a720",
"title": "Super-high Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a720/12OmNCm7BGS",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2015/6759/0/07301364",
"title": "A real-time high dynamic range HD video camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2015/07301364/12OmNrYlmMt",
"parentPublication": {
"id": "proceedings/cvprw/2015/6759/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042299",
"title": "Minimal Capture Sets for Multi-Exposure Enhanced-Dynamic-Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042299/12OmNyuyaax",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/06/06915885",
"title": "Robust High Dynamic Range Imaging by Rank Minimization",
"doi": null,
"abstractUrl": "/journal/tp/2015/06/06915885/13rRUxlgxXH",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a041",
"title": "Multi-Scale Dense Networks for Deep High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a041/18j8GO2zHcA",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a823",
"title": "A Lightweight Network for High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a823/1G56wO1KMbS",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2021/04/08930095",
"title": "Subjective Evaluation of High Dynamic Range Imaging for Face Matching",
"doi": null,
"abstractUrl": "/journal/ec/2021/04/08930095/1fCCON8Dwwo",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09594668",
"title": "Deep Learning for HDR Imaging: State-of-the-Art and Future Trends",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09594668/1y5Z4IEE9Uc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyO8tMM",
"title": "2016 Data Compression Conference (DCC)",
"acronym": "dcc",
"groupId": "1000177",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxcMSkC",
"doi": "10.1109/DCC.2016.78",
"title": "High Dynamic Range Video Coding with Backward Compatibility",
"normalizedTitle": "High Dynamic Range Video Coding with Backward Compatibility",
"abstract": "This paper presents a method for efficient compression of high dynamic range (HDR) and wide color gamut (WCG) video data. The proposed solution consists of two major elements: a conventional video codec (e.g., HEVC) and pre-and post-processing steps applied prior to encoding and after decoding process, respectively. The proposed HDR/WCG video coding system can be configured to provide two configurations: (1) a non-backward compatible bitstream with improved HDR video quality and (2) a SDR backward compatible bitstream with balanced visual quality between the reconstructed signal by the SDR and the HDR receivers. The simulations conducted under the MPEG Common Test Conditions for HDR demonstrate that the compression efficiency of the proposed solution outperforms the anchor solution on objective metrics. Additionally, subjective evaluations conducted under MPEG revealed improved visual quality for the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a method for efficient compression of high dynamic range (HDR) and wide color gamut (WCG) video data. The proposed solution consists of two major elements: a conventional video codec (e.g., HEVC) and pre-and post-processing steps applied prior to encoding and after decoding process, respectively. The proposed HDR/WCG video coding system can be configured to provide two configurations: (1) a non-backward compatible bitstream with improved HDR video quality and (2) a SDR backward compatible bitstream with balanced visual quality between the reconstructed signal by the SDR and the HDR receivers. The simulations conducted under the MPEG Common Test Conditions for HDR demonstrate that the compression efficiency of the proposed solution outperforms the anchor solution on objective metrics. Additionally, subjective evaluations conducted under MPEG revealed improved visual quality for the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a method for efficient compression of high dynamic range (HDR) and wide color gamut (WCG) video data. The proposed solution consists of two major elements: a conventional video codec (e.g., HEVC) and pre-and post-processing steps applied prior to encoding and after decoding process, respectively. The proposed HDR/WCG video coding system can be configured to provide two configurations: (1) a non-backward compatible bitstream with improved HDR video quality and (2) a SDR backward compatible bitstream with balanced visual quality between the reconstructed signal by the SDR and the HDR receivers. The simulations conducted under the MPEG Common Test Conditions for HDR demonstrate that the compression efficiency of the proposed solution outperforms the anchor solution on objective metrics. Additionally, subjective evaluations conducted under MPEG revealed improved visual quality for the proposed method.",
"fno": "07786173",
"keywords": [
"Image Reconstruction",
"Video Codecs",
"Video Coding",
"High Dynamic Range Video Coding",
"Backward Compatibility",
"HDR Video Data",
"Wide Color Gamut Video Data",
"WCG Video Data",
"Video Codec",
"HEVC",
"Video Preprocessing",
"Video Postprocessing",
"Encoding Process",
"Decoding Process",
"HDR WCG Video Coding System",
"Non Backward Compatible Bitstream",
"Improved HDR Video Quality",
"SDR Backward Compatible Bitstream",
"Visual Quality",
"Signal Reconstruction",
"SDR Receivers",
"HDR Receivers",
"MPEG Common Test Conditions",
"Objective Metrics",
"Subjective Evaluations",
"Improved Visual Quality",
"Image Color Analysis",
"Transfer Functions",
"Dynamic Range",
"Streaming Media",
"Transform Coding",
"Decoding",
"Video Coding"
],
"authors": [
{
"affiliation": null,
"fullName": "Dmytro Rusanovskyy",
"givenName": "Dmytro",
"surname": "Rusanovskyy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Done Bugdayci Sansli",
"givenName": "Done Bugdayci",
"surname": "Sansli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Adarsh Ramasubramonian",
"givenName": "Adarsh",
"surname": "Ramasubramonian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sungwon Lee",
"givenName": "Sungwon",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Joel Sole",
"givenName": "Joel",
"surname": "Sole",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marta Karczewicz",
"givenName": "Marta",
"surname": "Karczewicz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "289-298",
"year": "2016",
"issn": "1068-0314",
"isbn": "978-1-5090-1853-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07786172",
"articleId": "12OmNqBbHFo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07786174",
"articleId": "12OmNwwd2Pg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cmpsac/1994/6705/0/00342760",
"title": "Video coding for HDTV systems",
"doi": null,
"abstractUrl": "/proceedings-article/cmpsac/1994/00342760/12OmNB836JI",
"parentPublication": {
"id": "proceedings/cmpsac/1994/6705/0",
"title": "Proceedings Eighteenth Annual International Computer Software and Applications Conference (COMPSAC 94)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2017/2937/0/2937a350",
"title": "Adaptive Quantization-Based HDR Video Coding with HEVC Main 10 Profile",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a350/12OmNrFkeVu",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786256",
"title": "Low-Complexity, Backward-Compatible Coding of High Dynamic Range Images and Video",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786256/12OmNvAiSzG",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890713",
"title": "HDR2014 - A high dynamic range image quality database",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890713/12OmNwDSdsg",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607621",
"title": "Reconfigurable bit-stream parser",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607621/12OmNwtn3nO",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786174",
"title": "Optimal Bitrate Allocation for High Dynamic Range and Wide Color Gamut Services Deployment Using SHVC",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786174/12OmNwwd2Pg",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786175",
"title": "Backward Compatible HDR Video Compression System",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786175/12OmNx5pj21",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786176",
"title": "Luma Adjustment for High Dynamic Range Video",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786176/12OmNzd7bqa",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a409",
"title": "Reverse Tone Mapping of High Dynamic Range Video Using Gaussian Process Regression",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a409/19wB6BeF7Ne",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956659",
"title": "DeepHS-HDRVideo: Deep High Speed High Dynamic Range Video Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956659/1IHpfcAcNUc",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxuXcvH",
"title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)",
"acronym": "icis",
"groupId": "1001200",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxw5Bpw",
"doi": "10.1109/ICIS.2016.7550796",
"title": "High dynamic range image composition using a linear interpolation approach",
"normalizedTitle": "High dynamic range image composition using a linear interpolation approach",
"abstract": "Linear interpolation is a simple yet effective technique for image composition which works wells for low dynamic range (LDR) images with a fixed range of pixel values. However, it cannot provide good performance for high dynamic range (HDR) images because a high luminance image usually dominates the composition result. This paper proposes a novel algorithm for HDR composition using the linear interpolation. Our scheme decomposes HDR images to be composed into three layers where a linear interpolation can be applied on each layer individually that has been normalized. The algorithm contains three steps including the image decomposition, the image feature composition, and finally the HDR map estimation and image re-rendering. Experimental results show that the proposed approach can produce informative HDR composition images balancing the influence caused by the low or high luminance and preserving the contrast, colors, and salience. The comparison demonstrates that our scheme outperforms the current state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Linear interpolation is a simple yet effective technique for image composition which works wells for low dynamic range (LDR) images with a fixed range of pixel values. However, it cannot provide good performance for high dynamic range (HDR) images because a high luminance image usually dominates the composition result. This paper proposes a novel algorithm for HDR composition using the linear interpolation. Our scheme decomposes HDR images to be composed into three layers where a linear interpolation can be applied on each layer individually that has been normalized. The algorithm contains three steps including the image decomposition, the image feature composition, and finally the HDR map estimation and image re-rendering. Experimental results show that the proposed approach can produce informative HDR composition images balancing the influence caused by the low or high luminance and preserving the contrast, colors, and salience. The comparison demonstrates that our scheme outperforms the current state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Linear interpolation is a simple yet effective technique for image composition which works wells for low dynamic range (LDR) images with a fixed range of pixel values. However, it cannot provide good performance for high dynamic range (HDR) images because a high luminance image usually dominates the composition result. This paper proposes a novel algorithm for HDR composition using the linear interpolation. Our scheme decomposes HDR images to be composed into three layers where a linear interpolation can be applied on each layer individually that has been normalized. The algorithm contains three steps including the image decomposition, the image feature composition, and finally the HDR map estimation and image re-rendering. Experimental results show that the proposed approach can produce informative HDR composition images balancing the influence caused by the low or high luminance and preserving the contrast, colors, and salience. The comparison demonstrates that our scheme outperforms the current state-of-the-art methods.",
"fno": "07550796",
"keywords": [
"Brightness",
"Colour",
"Feature Extraction",
"Interpolation",
"Rendering Computer Graphics",
"High Dynamic Range Imaging",
"Image Composition",
"Linear Interpolation Approach",
"Low Dynamic Range Imaging",
"LDR",
"High Luminance Image",
"HDR Image Decomposition",
"Image Feature Composition",
"Map Estimation",
"Image Re Rendering",
"Image Color Analysis",
"Interpolation",
"Dynamic Range",
"Low Pass Filters",
"Fading Channels",
"Information Filters",
"High Dynamic Range Images",
"Exposure",
"Composition",
"Linear Interpolation",
"Tone Mapping",
"Decomposition"
],
"authors": [
{
"affiliation": "National Center for High-Performance Computing, NARLabs, Hsinchu, Taiwan, National Chung Hsing University, Taichung, Taiwan",
"fullName": "Yun-Te Lin",
"givenName": "Yun-Te",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chung Hsing University, Taichung, Taiwan",
"fullName": "Ming-Long Huang",
"givenName": "Ming-Long",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chung Hsing University, Taichung, Taiwan",
"fullName": "Chung-Ming Wang",
"givenName": "Chung-Ming",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icis",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0806-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07550795",
"articleId": "12OmNyjLoRO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07550797",
"articleId": "12OmNBr4eq3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2014/5188/0/06831807",
"title": "Single shot high dynamic range imaging using piecewise linear estimators",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2014/06831807/12OmNB1eJxy",
"parentPublication": {
"id": "proceedings/iccp/2014/5188/0",
"title": "2014 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a720",
"title": "Super-high Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a720/12OmNCm7BGS",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainaw/2008/3096/0/3096b029",
"title": "Image Compression Suitable for High Dynamic Range Image Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ainaw/2008/3096b029/12OmNvlPkAt",
"parentPublication": {
"id": "proceedings/ainaw/2008/3096/0",
"title": "2008 22nd International Conference on Advanced Information Networking and Applications (AINA 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2015/8146/0/07340511",
"title": "Evaluation of noise suppression and luminance reconstruction in high dynamic range image deghosting methods",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2015/07340511/12OmNwbcJ5e",
"parentPublication": {
"id": "proceedings/icat/2015/8146/0",
"title": "2015 XXV International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607639",
"title": "Image characteristic oriented tone mapping for high dynamic range images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607639/12OmNxWcHab",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109c676",
"title": "Noise-Insensitive Contrast Enhancement for Rendering High-Dynamic-Range Images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109c676/12OmNyNQSO8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2016/1853/0/07786176",
"title": "Luma Adjustment for High Dynamic Range Video",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2016/07786176/12OmNzd7bqa",
"parentPublication": {
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/06/mcg2005060057",
"title": "High-Dynamic-Range Still-Image Encoding in JPEG 2000",
"doi": null,
"abstractUrl": "/magazine/cg/2005/06/mcg2005060057/13rRUwgQpwS",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b182",
"title": "Bidirectional Motion Estimation with Cyclic Cost Volume for High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b182/1G56AFpQBri",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956659",
"title": "DeepHS-HDRVideo: Deep High Speed High Dynamic Range Video Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956659/1IHpfcAcNUc",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzy7uOF",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyo1nMX",
"doi": "10.1109/IV.2004.1320156",
"title": "Two-Channel Technique for High Dynamic Range Image Visualization",
"normalizedTitle": "Two-Channel Technique for High Dynamic Range Image Visualization",
"abstract": "Advances in digital imaging technologies make increasingly available high quality images characterized by High Dinamic Range (HDR) of color components. Usual display devices are unable to visualize the information content of HDR images, especially in presence of light. For this reason special compression techniques for image rendering on conventional displays have been developed. In this work a new technique based on simultaneous compression of low frequency radiance variations and detail enhancement is presented. In this work a method working in the transform domain of an edge oriented wavelet transform is presented. Low-frequency components and high resolution wavelet coefficients are separately manipulated before image reconstruction in order to reduce the overall dynamic range. The effectiveness of the technique is shown by means of significant examples.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Advances in digital imaging technologies make increasingly available high quality images characterized by High Dinamic Range (HDR) of color components. Usual display devices are unable to visualize the information content of HDR images, especially in presence of light. For this reason special compression techniques for image rendering on conventional displays have been developed. In this work a new technique based on simultaneous compression of low frequency radiance variations and detail enhancement is presented. In this work a method working in the transform domain of an edge oriented wavelet transform is presented. Low-frequency components and high resolution wavelet coefficients are separately manipulated before image reconstruction in order to reduce the overall dynamic range. The effectiveness of the technique is shown by means of significant examples.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Advances in digital imaging technologies make increasingly available high quality images characterized by High Dinamic Range (HDR) of color components. Usual display devices are unable to visualize the information content of HDR images, especially in presence of light. For this reason special compression techniques for image rendering on conventional displays have been developed. In this work a new technique based on simultaneous compression of low frequency radiance variations and detail enhancement is presented. In this work a method working in the transform domain of an edge oriented wavelet transform is presented. Low-frequency components and high resolution wavelet coefficients are separately manipulated before image reconstruction in order to reduce the overall dynamic range. The effectiveness of the technique is shown by means of significant examples.",
"fno": "21770269",
"keywords": [
"High Dynamic Range Images",
"Range Compression",
"Complex Edge Wavelet"
],
"authors": [
{
"affiliation": "Fondazione \"Ugo Bordoni\", Roma, Italy",
"fullName": "L. Capodiferro",
"givenName": "L.",
"surname": "Capodiferro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Rome \"La Sapienza\", Roma, Italy",
"fullName": "E. D. Di Claudio",
"givenName": "E. D. Di",
"surname": "Claudio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Rome \"La Sapienza\", Roma, Italy",
"fullName": "F. Iacolucci",
"givenName": "F.",
"surname": "Iacolucci",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Rome \"La Sapienza\", Roma, Italy",
"fullName": "A. Laurenti",
"givenName": "A.",
"surname": "Laurenti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Rome \"La Sapienza\", Roma, Italy",
"fullName": "G. Jacovitti",
"givenName": "G.",
"surname": "Jacovitti",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-07-01T00:00:00",
"pubType": "proceedings",
"pages": "269-273",
"year": "2004",
"issn": "1093-9547",
"isbn": "0-7695-2177-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "21770265",
"articleId": "12OmNz2kqpu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "21770277",
"articleId": "12OmNx4Q6L3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cenics/2010/4089/0/4089a040",
"title": "A New Approach to Compression of Medical Ultrasound Images Using Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/cenics/2010/4089a040/12OmNARAn7e",
"parentPublication": {
"id": "proceedings/cenics/2010/4089/0",
"title": "Advances in Circuits, Electronics and Micro-electronics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciic/2010/4152/0/4152a039",
"title": "Multilingual Lossy Text Compression Using Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/iciic/2010/4152a039/12OmNAgoV60",
"parentPublication": {
"id": "proceedings/iciic/2010/4152/0",
"title": "Integrated Intelligent Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2010/4249/0/4249a104",
"title": "Research on Image Compression Algorithm Based on SPHIT",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2010/4249a104/12OmNAmVH7G",
"parentPublication": {
"id": "proceedings/icinis/2010/4249/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acct/2013/4941/0/06524275",
"title": "Analysis of Multispectral Image Using Discrete Wavelet Transform",
"doi": null,
"abstractUrl": "/proceedings-article/acct/2013/06524275/12OmNCcbE9D",
"parentPublication": {
"id": "proceedings/acct/2013/4941/0",
"title": "2013 Third International Conference on Advanced Computing & Communication Technologies (ACCT 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/is3c/2012/4655/0/4655a393",
"title": "Enhancement Speech Compression Technique Using Modern Wavelet Transforms",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2012/4655a393/12OmNrYlmPl",
"parentPublication": {
"id": "proceedings/is3c/2012/4655/0",
"title": "Computer, Consumer and Control, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainaw/2008/3096/0/3096b029",
"title": "Image Compression Suitable for High Dynamic Range Image Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ainaw/2008/3096b029/12OmNvlPkAt",
"parentPublication": {
"id": "proceedings/ainaw/2008/3096/0",
"title": "2008 22nd International Conference on Advanced Information Networking and Applications (AINA 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctd/2009/3892/2/3892b395",
"title": "An Efficient Image Compression Technique Using Peak Transform",
"doi": null,
"abstractUrl": "/proceedings-article/icctd/2009/3892b395/12OmNxWLTjA",
"parentPublication": {
"id": "proceedings/icctd/2009/3892/2",
"title": "Computer Technology and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/1/00413276",
"title": "A two-dimensional translation invariant wavelet representation and its applications",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413276/12OmNzUPpec",
"parentPublication": {
"id": "proceedings/icip/1994/6952/3",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/06/mcg2005060057",
"title": "High-Dynamic-Range Still-Image Encoding in JPEG 2000",
"doi": null,
"abstractUrl": "/magazine/cg/2005/06/mcg2005060057/13rRUwgQpwS",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010057",
"title": "High-Dynamic-Range Texture Compression for Rendering Systems of Different Capacities",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010057/13rRUwhHcQP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G55WEFExd6",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G56FK3UGPe",
"doi": "10.1109/CVPRW56347.2022.00070",
"title": "Multi-Bracket High Dynamic Range Imaging with Event Cameras",
"normalizedTitle": "Multi-Bracket High Dynamic Range Imaging with Event Cameras",
"abstract": "Modern high dynamic range (HDR) imaging pipelines align and fuse multiple low dynamic range (LDR) images captured at different exposure times. While these methods work well in static scenes, dynamic scenes remain a challenge since the LDR images still suffer from saturation and noise. In such scenarios, event cameras would be a valid complement, thanks to their higher temporal resolution and dynamic range. In this paper, we propose the first multi-bracket HDR pipeline combining a standard camera with an event camera. Our results show better overall robustness when using events, with improvements in PSNR by up to 5dB on synthetic data and up to 0.7dB on real-world data. We also introduce a new dataset containing bracketed LDR images with aligned events and HDR ground truth.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modern high dynamic range (HDR) imaging pipelines align and fuse multiple low dynamic range (LDR) images captured at different exposure times. While these methods work well in static scenes, dynamic scenes remain a challenge since the LDR images still suffer from saturation and noise. In such scenarios, event cameras would be a valid complement, thanks to their higher temporal resolution and dynamic range. In this paper, we propose the first multi-bracket HDR pipeline combining a standard camera with an event camera. Our results show better overall robustness when using events, with improvements in PSNR by up to 5dB on synthetic data and up to 0.7dB on real-world data. We also introduce a new dataset containing bracketed LDR images with aligned events and HDR ground truth.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modern high dynamic range (HDR) imaging pipelines align and fuse multiple low dynamic range (LDR) images captured at different exposure times. While these methods work well in static scenes, dynamic scenes remain a challenge since the LDR images still suffer from saturation and noise. In such scenarios, event cameras would be a valid complement, thanks to their higher temporal resolution and dynamic range. In this paper, we propose the first multi-bracket HDR pipeline combining a standard camera with an event camera. Our results show better overall robustness when using events, with improvements in PSNR by up to 5dB on synthetic data and up to 0.7dB on real-world data. We also introduce a new dataset containing bracketed LDR images with aligned events and HDR ground truth.",
"fno": "873900a546",
"keywords": [
"Cameras",
"Image Fusion",
"Image Motion Analysis",
"Image Reconstruction",
"Image Resolution",
"Image Sensors",
"Video Signal Processing",
"Multibracket High Dynamic Range Imaging",
"Event Camera",
"Modern High Dynamic Range Imaging Pipelines",
"Static Scenes",
"Dynamic Scenes",
"Temporal Resolution",
"Multibracket HDR Pipeline",
"Standard Camera",
"Bracketed LDR Images",
"Multiple Low Dynamic Range Image Fusion",
"Image Resolution",
"Fuses",
"Conferences",
"Pipelines",
"Dynamic Range",
"Cameras",
"Robustness"
],
"authors": [
{
"affiliation": "Univ. of Zurich,Dept. of Informatics",
"fullName": "Nico Messikommer",
"givenName": "Nico",
"surname": "Messikommer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Technologies,Zurich Research Center",
"fullName": "Stamatios Georgoulis",
"givenName": "Stamatios",
"surname": "Georgoulis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Zurich,Dept. of Informatics",
"fullName": "Daniel Gehrig",
"givenName": "Daniel",
"surname": "Gehrig",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Technologies,Zurich Research Center",
"fullName": "Stepan Tulyakov",
"givenName": "Stepan",
"surname": "Tulyakov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Technologies,Zurich Research Center",
"fullName": "Julius Erbach",
"givenName": "Julius",
"surname": "Erbach",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Technologies,Zurich Research Center",
"fullName": "Alfredo Bochicchio",
"givenName": "Alfredo",
"surname": "Bochicchio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Huawei Technologies,Zurich Research Center",
"fullName": "Yuanyou Li",
"givenName": "Yuanyou",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Zurich,Dept. of Informatics",
"fullName": "Davide Scaramuzza",
"givenName": "Davide",
"surname": "Scaramuzza",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "546-556",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8739-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1G56FFH7mJG",
"name": "pcvprw202287390-09857341s1-mm_873900a546.zip",
"size": "10.4 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvprw202287390-09857341s1-mm_873900a546.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "873900a536",
"articleId": "1G57gdwHb5m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "873900a557",
"articleId": "1G573IG23Uk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209a720",
"title": "Super-high Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a720/12OmNCm7BGS",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2015/0379/0/0379a289",
"title": "Evaluation of Feature Detection in HDR Based Imaging Under Changes in Illumination Conditions",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2015/0379a289/12OmNrFBPVE",
"parentPublication": {
"id": "proceedings/ism/2015/0379/0",
"title": "2015 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/06/06915885",
"title": "Robust High Dynamic Range Imaging by Rank Minimization",
"doi": null,
"abstractUrl": "/journal/tp/2015/06/06915885/13rRUxlgxXH",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a041",
"title": "Single-Photon Camera Guided Extreme Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a041/1B13zsIYHrG",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b182",
"title": "Bidirectional Motion Estimation with Cyclic Cost Volume for High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b182/1G56AFpQBri",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b031",
"title": "Gamma-enhanced Spatial Attention Network for Efficient High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b031/1G56nGzWShG",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2021/04/08930095",
"title": "Subjective Evaluation of High Dynamic Range Imaging for Face Matching",
"doi": null,
"abstractUrl": "/journal/ec/2021/04/08930095/1fCCON8Dwwo",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a463",
"title": "ADNet: Attention-guided Deformable Convolutional Network for High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a463/1yJYejaleb6",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a550",
"title": "A Two-stage Deep Network for High Dynamic Range Image Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a550/1yJYflzAa2I",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a691",
"title": "NTIRE 2021 Challenge on High Dynamic Range Imaging: Dataset, Methods and Results",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a691/1yJYojK19i8",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G55WEFExd6",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G56nGzWShG",
"doi": "10.1109/CVPRW56347.2022.00116",
"title": "Gamma-enhanced Spatial Attention Network for Efficient High Dynamic Range Imaging",
"normalizedTitle": "Gamma-enhanced Spatial Attention Network for Efficient High Dynamic Range Imaging",
"abstract": "High dynamic range(HDR) imaging is the task of re-covering HDR image from one or multiple input Low Dynamic Range (LDR) images. In this paper, we present Gamma-enhanced Spatial Attention Network(GSANet), a novel framework for reconstructing HDR images. This problem comprises two intractable challenges of how to tackle overexposed and underexposed regions and how to overcome the paradox of performance and complexity trade-off. To address the former, after applying gamma correction on the LDR images, we adopt a spatial attention module to adaptively select the most appropriate regions of various exposure low dynamic range images for fusion. For the latter one, we propose an efficient channel attention module, which only involves a handful of parameters while bringing clear performance gain. Experimental results show that the proposed method achieves better visual quality on the HDR dataset. The code will be available at: https://github.com/fancyicookie/GSANet",
"abstracts": [
{
"abstractType": "Regular",
"content": "High dynamic range(HDR) imaging is the task of re-covering HDR image from one or multiple input Low Dynamic Range (LDR) images. In this paper, we present Gamma-enhanced Spatial Attention Network(GSANet), a novel framework for reconstructing HDR images. This problem comprises two intractable challenges of how to tackle overexposed and underexposed regions and how to overcome the paradox of performance and complexity trade-off. To address the former, after applying gamma correction on the LDR images, we adopt a spatial attention module to adaptively select the most appropriate regions of various exposure low dynamic range images for fusion. For the latter one, we propose an efficient channel attention module, which only involves a handful of parameters while bringing clear performance gain. Experimental results show that the proposed method achieves better visual quality on the HDR dataset. The code will be available at: https://github.com/fancyicookie/GSANet",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High dynamic range(HDR) imaging is the task of re-covering HDR image from one or multiple input Low Dynamic Range (LDR) images. In this paper, we present Gamma-enhanced Spatial Attention Network(GSANet), a novel framework for reconstructing HDR images. This problem comprises two intractable challenges of how to tackle overexposed and underexposed regions and how to overcome the paradox of performance and complexity trade-off. To address the former, after applying gamma correction on the LDR images, we adopt a spatial attention module to adaptively select the most appropriate regions of various exposure low dynamic range images for fusion. For the latter one, we propose an efficient channel attention module, which only involves a handful of parameters while bringing clear performance gain. Experimental results show that the proposed method achieves better visual quality on the HDR dataset. The code will be available at: https://github.com/fancyicookie/GSANet",
"fno": "873900b031",
"keywords": [
"Image Enhancement",
"Image Fusion",
"Image Reconstruction",
"High Dynamic Range Imaging",
"Gamma Enhanced Spatial Attention Network",
"Multiple Input Low Dynamic Range Images",
"Dynamic Range",
"HDR Dataset",
"Channel Attention Module",
"Exposure Low Dynamic Range Images",
"Spatial Attention Module",
"Gamma Correction",
"Complexity Trade",
"HDR Image Reconstruction",
"Visualization",
"Codes",
"Conferences",
"Pipelines",
"Imaging",
"Dynamic Range",
"Performance Gain"
],
"authors": [
{
"affiliation": "State Key Laboratory of Media Convergence and Communication(CUC),Beijing,China,100024",
"fullName": "Fangya Li",
"givenName": "Fangya",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NRTA,Academy of Broadcasting Sciencience,Beijing,China,100866",
"fullName": "Ruipeng Gang",
"givenName": "Ruipeng",
"surname": "Gang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Science,Institute of Automation,Beijing,China,100190",
"fullName": "Chenghua Li",
"givenName": "Chenghua",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Key Laboratory of Media Convergence and Communication(CUC),Beijing,China,100024",
"fullName": "Jinjing Li",
"givenName": "Jinjing",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NRTA,Academy of Broadcasting Sciencience,Beijing,China,100866",
"fullName": "Sai Ma",
"givenName": "Sai",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NRTA,Academy of Broadcasting Sciencience,Beijing,China,100866",
"fullName": "Chenming Liu",
"givenName": "Chenming",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Key Laboratory of Media Convergence and Communication(CUC),Beijing,China,100024",
"fullName": "Yizhen Cao",
"givenName": "Yizhen",
"surname": "Cao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1031-1039",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8739-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "873900b023",
"articleId": "1G56C3LjkC4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "873900b040",
"articleId": "1G55ZIfZPwI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2014/4717/0/06890713",
"title": "HDR2014 - A high dynamic range image quality database",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890713/12OmNwDSdsg",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/06/06915885",
"title": "Robust High Dynamic Range Imaging by Rank Minimization",
"doi": null,
"abstractUrl": "/journal/tp/2015/06/06915885/13rRUxlgxXH",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a041",
"title": "Multi-Scale Dense Networks for Deep High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a041/18j8GO2zHcA",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a061",
"title": "High Dynamic Range Imaging of Dynamic Scenes with Saturation Compensation but without Explicit Motion Compensation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a061/1B134htBm6I",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b182",
"title": "Bidirectional Motion Estimation with Cyclic Cost Volume for High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b182/1G56AFpQBri",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a546",
"title": "Multi-Bracket High Dynamic Range Imaging with Event Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a546/1G56FK3UGPe",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2021/04/08930095",
"title": "Subjective Evaluation of High Dynamic Range Imaging for Face Matching",
"doi": null,
"abstractUrl": "/journal/ec/2021/04/08930095/1fCCON8Dwwo",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a463",
"title": "ADNet: Attention-guided Deformable Convolutional Network for High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a463/1yJYejaleb6",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a550",
"title": "A Two-stage Deep Network for High Dynamic Range Image Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a550/1yJYflzAa2I",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a691",
"title": "NTIRE 2021 Challenge on High Dynamic Range Imaging: Dataset, Methods and Results",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a691/1yJYojK19i8",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeJuGu5Xvq",
"doi": "10.1109/CVPR46437.2021.00762",
"title": "Neural Auto-Exposure for High-Dynamic Range Object Detection",
"normalizedTitle": "Neural Auto-Exposure for High-Dynamic Range Object Detection",
"abstract": "Real-world scenes have a dynamic range of up to 280 dB that todays imaging sensors cannot directly capture. Existing live vision pipelines tackle this fundamental challenge by relying on high dynamic range (HDR) sensors that try to recover HDR images from multiple captures with different exposures. While HDR sensors substantially increase the dynamic range, they are not without disadvantages, including severe artifacts for dynamic scenes, reduced fill-factor, lower resolution, and high sensor cost. At the same time, traditional auto-exposure methods for low-dynamic range sensors have advanced as proprietary methods relying on image statistics separated from downstream vision algorithms. In this work, we revisit auto-exposure control as an alternative to HDR sensors. We propose a neural net-work for exposure selection that is trained jointly, end-to-end with an object detector and an image signal processing (ISP) pipeline. To this end, we use an HDR dataset for automotive object detection and an HDR training procedure. We validate that the proposed neural auto-exposure control, which is tailored to object detection, outperforms conventional auto-exposure methods by more than 6 points in mean average precision (mAP).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Real-world scenes have a dynamic range of up to 280 dB that todays imaging sensors cannot directly capture. Existing live vision pipelines tackle this fundamental challenge by relying on high dynamic range (HDR) sensors that try to recover HDR images from multiple captures with different exposures. While HDR sensors substantially increase the dynamic range, they are not without disadvantages, including severe artifacts for dynamic scenes, reduced fill-factor, lower resolution, and high sensor cost. At the same time, traditional auto-exposure methods for low-dynamic range sensors have advanced as proprietary methods relying on image statistics separated from downstream vision algorithms. In this work, we revisit auto-exposure control as an alternative to HDR sensors. We propose a neural net-work for exposure selection that is trained jointly, end-to-end with an object detector and an image signal processing (ISP) pipeline. To this end, we use an HDR dataset for automotive object detection and an HDR training procedure. We validate that the proposed neural auto-exposure control, which is tailored to object detection, outperforms conventional auto-exposure methods by more than 6 points in mean average precision (mAP).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Real-world scenes have a dynamic range of up to 280 dB that todays imaging sensors cannot directly capture. Existing live vision pipelines tackle this fundamental challenge by relying on high dynamic range (HDR) sensors that try to recover HDR images from multiple captures with different exposures. While HDR sensors substantially increase the dynamic range, they are not without disadvantages, including severe artifacts for dynamic scenes, reduced fill-factor, lower resolution, and high sensor cost. At the same time, traditional auto-exposure methods for low-dynamic range sensors have advanced as proprietary methods relying on image statistics separated from downstream vision algorithms. In this work, we revisit auto-exposure control as an alternative to HDR sensors. We propose a neural net-work for exposure selection that is trained jointly, end-to-end with an object detector and an image signal processing (ISP) pipeline. To this end, we use an HDR dataset for automotive object detection and an HDR training procedure. We validate that the proposed neural auto-exposure control, which is tailored to object detection, outperforms conventional auto-exposure methods by more than 6 points in mean average precision (mAP).",
"fno": "450900h706",
"keywords": [
"Image Resolution",
"Image Sensors",
"Learning Artificial Intelligence",
"Neural Nets",
"Object Detection",
"Statistics",
"High Dynamic Range Object Detection",
"Live Vision Pipelines",
"High Dynamic Range Sensors",
"HDR Image Sensors",
"Reduced Fill Factor",
"Low Dynamic Range Sensors",
"Image Statistics",
"Downstream Vision Algorithms",
"Neural Network",
"Image Signal Processing Pipeline",
"HDR Dataset",
"Automotive Object Detection",
"HDR Training Procedure",
"Neural Autoexposure Control Methods",
"ISP Pipeline",
"M AP",
"Mean Average Precision",
"Image Sensors",
"Training",
"Computer Vision",
"Pipelines",
"Object Detection",
"Computer Architecture",
"Dynamic Range"
],
"authors": [
{
"affiliation": "Algolux",
"fullName": "Emmanuel Onzon",
"givenName": "Emmanuel",
"surname": "Onzon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Algolux",
"fullName": "Fahim Mannan",
"givenName": "Fahim",
"surname": "Mannan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Princeton University,Algolux",
"fullName": "Felix Heide",
"givenName": "Felix",
"surname": "Heide",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "7706-7716",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeJuz37ogE",
"name": "pcvpr202145090-09578074s1-mm_450900h706.zip",
"size": "15.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578074s1-mm_450900h706.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900h696",
"articleId": "1yeIDvyC9NK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900h717",
"articleId": "1yeL8iHj6iQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2016/4571/0/4571a119",
"title": "Feedback Control System for Exposure Optimization in High-Dynamic-Range Multimedia Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a119/12OmNA0MZab",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2016/4571/0/4571a397",
"title": "Extrapolative Lightspace Method for HDR Video Exposure Selection",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a397/12OmNB8TUgg",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2007/3009/0/30090382",
"title": "Exposure Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2007/30090382/12OmNx4gUxY",
"parentPublication": {
"id": "proceedings/pg/2007/3009/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042299",
"title": "Minimal Capture Sets for Multi-Exposure Enhanced-Dynamic-Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042299/12OmNyuyaax",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a061",
"title": "High Dynamic Range Imaging of Dynamic Scenes with Saturation Compensation but without Explicit Motion Compensation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a061/1B134htBm6I",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a041",
"title": "Single-Photon Camera Guided Extreme Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a041/1B13zsIYHrG",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a546",
"title": "Multi-Bracket High Dynamic Range Imaging with Event Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a546/1G56FK3UGPe",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b031",
"title": "Gamma-enhanced Spatial Attention Network for Efficient High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b031/1G56nGzWShG",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2020/9891/0/09108676",
"title": "Gradient-Based Auto-Exposure Control Applied to a Self-Driving Car",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2020/09108676/1kpIEpwgpSE",
"parentPublication": {
"id": "proceedings/crv/2020/9891/0",
"title": "2020 17th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900g293",
"title": "End-to-end High Dynamic Range Camera Pipeline Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900g293/1yeK6nSzK1y",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeK6nSzK1y",
"doi": "10.1109/CVPR46437.2021.00623",
"title": "End-to-end High Dynamic Range Camera Pipeline Optimization",
"normalizedTitle": "End-to-end High Dynamic Range Camera Pipeline Optimization",
"abstract": "The real world is a 280 dB High Dynamic Range (HDR) world which imaging sensors cannot record in a single shot. HDR cameras acquire multiple measurements with different exposures, gains and photodiodes, from which an Image Signal Processor (ISP) reconstructs an HDR image. Dynamic scene HDR image recovery is an open challenge because of motion and because stitched captures have different noise characteristics, resulting in artifacts that ISPs must resolve in real time at double-digit megapixel resolutions. Traditionally, ISP settings used by downstream vision modules are chosen by domain experts; such frozen camera designs are then used for training data acquisition and supervised learning of downstream vision modules. We depart from this paradigm and formulate HDR ISP hyperparameter search as an end-to-end optimization problem, proposing a mixed 0<sup>th</sup> and 1<sup>st</sup>-order block coordinate descent optimizer that jointly learns sensor, ISP and detector network weights using RAW image data augmented with emulated SNR transition region artifacts. We assess the proposed method for human vision and image understanding. For automotive object detection, the method improves mAP and mAR by 33% over expert-tuning and 22% over state-of-the-art optimization methods, outperforming expert-tuned HDR imaging and vision pipelines in all HDR laboratory rig and field experiments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The real world is a 280 dB High Dynamic Range (HDR) world which imaging sensors cannot record in a single shot. HDR cameras acquire multiple measurements with different exposures, gains and photodiodes, from which an Image Signal Processor (ISP) reconstructs an HDR image. Dynamic scene HDR image recovery is an open challenge because of motion and because stitched captures have different noise characteristics, resulting in artifacts that ISPs must resolve in real time at double-digit megapixel resolutions. Traditionally, ISP settings used by downstream vision modules are chosen by domain experts; such frozen camera designs are then used for training data acquisition and supervised learning of downstream vision modules. We depart from this paradigm and formulate HDR ISP hyperparameter search as an end-to-end optimization problem, proposing a mixed 0<sup>th</sup> and 1<sup>st</sup>-order block coordinate descent optimizer that jointly learns sensor, ISP and detector network weights using RAW image data augmented with emulated SNR transition region artifacts. We assess the proposed method for human vision and image understanding. For automotive object detection, the method improves mAP and mAR by 33% over expert-tuning and 22% over state-of-the-art optimization methods, outperforming expert-tuned HDR imaging and vision pipelines in all HDR laboratory rig and field experiments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The real world is a 280 dB High Dynamic Range (HDR) world which imaging sensors cannot record in a single shot. HDR cameras acquire multiple measurements with different exposures, gains and photodiodes, from which an Image Signal Processor (ISP) reconstructs an HDR image. Dynamic scene HDR image recovery is an open challenge because of motion and because stitched captures have different noise characteristics, resulting in artifacts that ISPs must resolve in real time at double-digit megapixel resolutions. Traditionally, ISP settings used by downstream vision modules are chosen by domain experts; such frozen camera designs are then used for training data acquisition and supervised learning of downstream vision modules. We depart from this paradigm and formulate HDR ISP hyperparameter search as an end-to-end optimization problem, proposing a mixed 0th and 1st-order block coordinate descent optimizer that jointly learns sensor, ISP and detector network weights using RAW image data augmented with emulated SNR transition region artifacts. We assess the proposed method for human vision and image understanding. For automotive object detection, the method improves mAP and mAR by 33% over expert-tuning and 22% over state-of-the-art optimization methods, outperforming expert-tuned HDR imaging and vision pipelines in all HDR laboratory rig and field experiments.",
"fno": "450900g293",
"keywords": [
"Cameras",
"Computerised Instrumentation",
"Data Acquisition",
"Image Motion Analysis",
"Image Reconstruction",
"Image Resolution",
"Image Sensors",
"Optimisation",
"Photodetectors",
"Photodiodes",
"Pipelines",
"Supervised Learning",
"Photodiodes",
"Image Signal Processor",
"Double Digit Megapixel Resolutions",
"Downstream Vision Modules",
"Frozen Camera Designs",
"Training Data Acquisition",
"Supervised Learning",
"End To End Optimization Problem",
"Descent Optimizer",
"RAW Image Data",
"Emulated SNR Transition Region Artifacts",
"Optimization Methods",
"Vision Pipelines",
"HDR Cameras",
"End To End High Dynamic Range Camera Pipeline Optimization",
"Dynamic Scene HDR Image Recovery",
"HDR ISP Settings",
"Data Acquisition",
"Gain 280 0 D B",
"Image Sensors",
"Image Resolution",
"Pipelines",
"Training Data",
"Optimization Methods",
"Object Detection",
"Dynamic Range"
],
"authors": [
{
"affiliation": "Algolux",
"fullName": "Nicolas Robidoux",
"givenName": "Nicolas",
"surname": "Robidoux",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Algolux",
"fullName": "Dong-eun Seo",
"givenName": "Dong-eun",
"surname": "Seo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Algolux",
"fullName": "Federico Ariza",
"givenName": "Federico",
"surname": "Ariza",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Algolux",
"fullName": "Luis E. García Capel",
"givenName": "Luis E.",
"surname": "García Capel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Algolux",
"fullName": "Avinash Sharma",
"givenName": "Avinash",
"surname": "Sharma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Algolux",
"fullName": "Felix Heide",
"givenName": "Felix",
"surname": "Heide",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "6293-6303",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeK6gRIgKI",
"name": "pcvpr202145090-09577748s1-mm_450900g293.zip",
"size": "14.5 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577748s1-mm_450900g293.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900g283",
"articleId": "1yeLROFTUfC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900g304",
"articleId": "1yeKM7CH3by",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209a720",
"title": "Super-high Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a720/12OmNCm7BGS",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2017/4662/0/08388688",
"title": "Image tone mapping approach using essentially non-oscillatory bi-quadratic interpolations combined with a weighting coefficients strategy",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2017/08388688/12OmNrIrPqg",
"parentPublication": {
"id": "proceedings/isspit/2017/4662/0",
"title": "2017 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/06/mcg2005060057",
"title": "High-Dynamic-Range Still-Image Encoding in JPEG 2000",
"doi": null,
"abstractUrl": "/magazine/cg/2005/06/mcg2005060057/13rRUwgQpwS",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2022/5824/0/582400a729",
"title": "Joint Multi-Scale Tone Mapping and Denoising for HDR Image Enhancement",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2022/582400a729/1B12qQ9zLfG",
"parentPublication": {
"id": "proceedings/wacvw/2022/5824/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a041",
"title": "Single-Photon Camera Guided Extreme Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a041/1B13zsIYHrG",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/2.812E233",
"title": "ReconfigISP: Reconfigurable Camera Image Processing Pipeline",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/2.812E233/1BmFUv5QgLe",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a546",
"title": "Multi-Bracket High Dynamic Range Imaging with Event Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a546/1G56FK3UGPe",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b031",
"title": "Gamma-enhanced Spatial Attention Network for Efficient High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b031/1G56nGzWShG",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b648",
"title": "Single-Image HDR Reconstruction by Learning to Reverse the Camera Pipeline",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b648/1m3os7RYAk8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h706",
"title": "Neural Auto-Exposure for High-Dynamic Range Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h706/1yeJuGu5Xvq",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzUPpz7",
"title": "2017 21st International Conference on Control Systems and Computer Science (CSCS)",
"acronym": "cscs",
"groupId": "1802635",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNARRYpY",
"doi": "10.1109/CSCS.2017.38",
"title": "Multimodal Interface for Ambient Assisted Living",
"normalizedTitle": "Multimodal Interface for Ambient Assisted Living",
"abstract": "Multimodal interfaces seamlessly integrate two or more user inputs in a coordinated manner to enhance user interaction with the system. This work proposes a multimodal interface that makes use of hand tracking and speech interactions for Ambient Intelligent and Ambient Assisted Living environments. The system is composed of two main modules: Spoken Language Interaction module and Hand Tracking Interaction module. Spoken Language Interaction use speech recognition services to input the user utterance to a natural language understanding module from which the dialog manager will match the context and output the corresponding answer to the user. Hand Tracking Interactions is performed using a Microsoft Kinect device to detect the 3D coordinate of the hand which is stabilized and transformed to be used as a cursor on the screen. Tests have been run on the speech recognition and natural language understanding framework and reached an accuracy of 83.7% in offering the user a correct answer from the first try. The accuracy results of the Dialog manager in offering the user a correct output was 91.6% computed on a data set of 3900 sentences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Multimodal interfaces seamlessly integrate two or more user inputs in a coordinated manner to enhance user interaction with the system. This work proposes a multimodal interface that makes use of hand tracking and speech interactions for Ambient Intelligent and Ambient Assisted Living environments. The system is composed of two main modules: Spoken Language Interaction module and Hand Tracking Interaction module. Spoken Language Interaction use speech recognition services to input the user utterance to a natural language understanding module from which the dialog manager will match the context and output the corresponding answer to the user. Hand Tracking Interactions is performed using a Microsoft Kinect device to detect the 3D coordinate of the hand which is stabilized and transformed to be used as a cursor on the screen. Tests have been run on the speech recognition and natural language understanding framework and reached an accuracy of 83.7% in offering the user a correct answer from the first try. The accuracy results of the Dialog manager in offering the user a correct output was 91.6% computed on a data set of 3900 sentences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Multimodal interfaces seamlessly integrate two or more user inputs in a coordinated manner to enhance user interaction with the system. This work proposes a multimodal interface that makes use of hand tracking and speech interactions for Ambient Intelligent and Ambient Assisted Living environments. The system is composed of two main modules: Spoken Language Interaction module and Hand Tracking Interaction module. Spoken Language Interaction use speech recognition services to input the user utterance to a natural language understanding module from which the dialog manager will match the context and output the corresponding answer to the user. Hand Tracking Interactions is performed using a Microsoft Kinect device to detect the 3D coordinate of the hand which is stabilized and transformed to be used as a cursor on the screen. Tests have been run on the speech recognition and natural language understanding framework and reached an accuracy of 83.7% in offering the user a correct answer from the first try. The accuracy results of the Dialog manager in offering the user a correct output was 91.6% computed on a data set of 3900 sentences.",
"fno": "07968566",
"keywords": [
"Assisted Living",
"Natural Language Interfaces",
"Natural Language Processing",
"Speech Recognition",
"Multimodal Interfaces",
"User Interaction",
"Speech Interactions",
"Ambient Intelligent",
"Ambient Assisted Living Environments",
"Spoken Language Interaction Module",
"Hand Tracking Interaction Module",
"Speech Recognition Services",
"User Utterance",
"Natural Language Understanding Module",
"Hand Tracking Interactions",
"Microsoft Kinect Device",
"Natural Language Understanding Framework",
"Dialog Manager",
"Speech Recognition",
"Speech",
"Ambient Assisted Living",
"Natural Languages",
"Senior Citizens",
"Statistics",
"Multimodal Interface",
"Voice Interactions",
"Hand Tracking"
],
"authors": [
{
"affiliation": null,
"fullName": "Alexandru Florin Gavril",
"givenName": "Alexandru Florin",
"surname": "Gavril",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mihai Trascau",
"givenName": "Mihai",
"surname": "Trascau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Irina Mocanu",
"givenName": "Irina",
"surname": "Mocanu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cscs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-05-01T00:00:00",
"pubType": "proceedings",
"pages": "223-230",
"year": "2017",
"issn": "2379-0482",
"isbn": "978-1-5386-1839-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07968565",
"articleId": "12OmNxiKs2n",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07968567",
"articleId": "12OmNyGKUme",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ie/2011/4452/0/4452a310",
"title": "Multi-agent Interactions for Ambient Assisted Living",
"doi": null,
"abstractUrl": "/proceedings-article/ie/2011/4452a310/12OmNB836Lu",
"parentPublication": {
"id": "proceedings/ie/2011/4452/0",
"title": "Intelligent Environments, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2017/3581/0/3581b443",
"title": "An Enhanced Conceptual Model for Using Ambient Assisted Living to Provide a Home Proactive Monitoring System for Elderly Saudi Arabians",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2017/3581b443/12OmNBOCWt6",
"parentPublication": {
"id": "proceedings/aiccsa/2017/3581/0",
"title": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/broadcom/2008/3453/0/3453a110",
"title": "An Engineering Toolbox to Build Situation Aware Ambient Assisted Living Systems",
"doi": null,
"abstractUrl": "/proceedings-article/broadcom/2008/3453a110/12OmNwtn3q8",
"parentPublication": {
"id": "proceedings/broadcom/2008/3453/0",
"title": "Broadband Communications, Information Technology & Biomedical Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdp/2008/3089/0/3089a556",
"title": "Towards Building Virtual Community for Ambient Assisted Living",
"doi": null,
"abstractUrl": "/proceedings-article/pdp/2008/3089a556/12OmNxGALbx",
"parentPublication": {
"id": "proceedings/pdp/2008/3089/0",
"title": "2008 16th Euromicro Conference on Parallel, Distributed and Network-based Processing - PDP '08",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itng/2009/3596/0/3596b201",
"title": "Promises and Challenges of Ambient Assisted Living Systems",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2009/3596b201/12OmNy50g2h",
"parentPublication": {
"id": "proceedings/itng/2009/3596/0",
"title": "Information Technology: New Generations, Third International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2009/3639/0/3639a531",
"title": "A Service Oriented Platform for Health Services and Ambient Assisted Living",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2009/3639a531/12OmNzVoBOx",
"parentPublication": {
"id": "proceedings/waina/2009/3639/0",
"title": "2009 International Conference on Advanced Information Networking and Applications Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc-scalcom-cbdcom-iop-smartworld/2016/2771/0/07816845",
"title": "Event-Driven and District-Related Home Care: Networked Technical Building Equipment (TBE) as a Basis for Ambient Assisted Living",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc-scalcom-cbdcom-iop-smartworld/2016/07816845/12OmNzcxZdG",
"parentPublication": {
"id": "proceedings/uic-atc-scalcom-cbdcom-iop-smartworld/2016/2771/0",
"title": "2016 Intl IEEE Conferences on Ubiquitous Intelligence & Computing, Advanced and Trusted Computing, Scalable Computing and Communications, Cloud and Big Data Computing, Internet of People, and Smart World Congress (UIC/ATC/ScalCom/CBDCom/IoP/SmartWorld)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2015/04/mex2015040016",
"title": "Using RFID to Detect Interactions in Ambient Assisted Living Environments",
"doi": null,
"abstractUrl": "/magazine/ex/2015/04/mex2015040016/13rRUNvgyUS",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2015/04/mex2015040002",
"title": "Ambient Assisted Living [Guest editors' introduction]",
"doi": null,
"abstractUrl": "/magazine/ex/2015/04/mex2015040002/13rRUwgQpvT",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2018/6950/0/08538563",
"title": "A Failure Detector for Ambient Assisted Living",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2018/08538563/17D45WcjjPR",
"parentPublication": {
"id": "proceedings/iscc/2018/6950/0",
"title": "2018 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNywfKxW",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzDehah",
"doi": "10.1109/HAPTIC.2003.1191261",
"title": "Relative Performance Using Haptic and/or Touch-Produced Auditory Cues in a Remote Absolute Texture Identification Task",
"normalizedTitle": "Relative Performance Using Haptic and/or Touch-Produced Auditory Cues in a Remote Absolute Texture Identification Task",
"abstract": "The current study assessed the relative effectiveness with which unimodal tactile, unimodal touch-produced auditory, and bimodal tactile + auditory cues contribute to the performance of an absolute texture identification task via remote touch. The study contributes to our fundamental understanding of the unimodal perception and intersensory integration of multimodal surface texture cues generated during surface exploration with rigid probes. The results also have significant implications for the design of unimodal and multisensory displays for use with teleoperation and virtual environment systems, as it addresses which modality(ies) may be used to most effectively present sensory information about remotely explored surface textures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The current study assessed the relative effectiveness with which unimodal tactile, unimodal touch-produced auditory, and bimodal tactile + auditory cues contribute to the performance of an absolute texture identification task via remote touch. The study contributes to our fundamental understanding of the unimodal perception and intersensory integration of multimodal surface texture cues generated during surface exploration with rigid probes. The results also have significant implications for the design of unimodal and multisensory displays for use with teleoperation and virtual environment systems, as it addresses which modality(ies) may be used to most effectively present sensory information about remotely explored surface textures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The current study assessed the relative effectiveness with which unimodal tactile, unimodal touch-produced auditory, and bimodal tactile + auditory cues contribute to the performance of an absolute texture identification task via remote touch. The study contributes to our fundamental understanding of the unimodal perception and intersensory integration of multimodal surface texture cues generated during surface exploration with rigid probes. The results also have significant implications for the design of unimodal and multisensory displays for use with teleoperation and virtual environment systems, as it addresses which modality(ies) may be used to most effectively present sensory information about remotely explored surface textures.",
"fno": "18900151",
"keywords": [],
"authors": [
{
"affiliation": "Queen?s University",
"fullName": "S. J. Lederman",
"givenName": "S. J.",
"surname": "Lederman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Queen?s University",
"fullName": "Andrea Martin",
"givenName": "Andrea",
"surname": "Martin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Queen?s University",
"fullName": "Christine Tong",
"givenName": "Christine",
"surname": "Tong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Roberta L. Klatzky",
"givenName": "Roberta L.",
"surname": "Klatzky",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-03-01T00:00:00",
"pubType": "proceedings",
"pages": "151",
"year": "2003",
"issn": null,
"isbn": "0-7695-1890-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "18900140",
"articleId": "12OmNAXglVY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "18900159",
"articleId": "12OmNzw8iZz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cisis/2011/4373/0/4373a053",
"title": "A Touch Screen Interface Design with Tactile Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2011/4373a053/12OmNAo45J4",
"parentPublication": {
"id": "proceedings/cisis/2011/4373/0",
"title": "2011 International Conference on Complex, Intelligent, and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890097",
"title": "Integrating Multimodal Information about Surface Texture via a Probe: Relative Contributions of Haptic and Touch-Produced Sound Sources",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890097/12OmNBBzohw",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04760961",
"title": "Relative advantage of touch over vision in the exploration of texture",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04760961/12OmNBZYTsA",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2009/3858/0/04810857",
"title": "Effects of sounds on tactile roughness depend on the congruency between modalities",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2009/04810857/12OmNwIHoBH",
"parentPublication": {
"id": "proceedings/whc/2009/3858/0",
"title": "World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444650",
"title": "A finger attachment to generate tactile feedback and make 3D gesture detectable by touch panel sensor",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444650/12OmNxveNHK",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223412",
"title": "A modified tactile brush algorithm for complex touch gestures",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223412/12OmNyS6RDd",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/01/tth2012010085",
"title": "Virtual Active Touch: Perception of Virtual Gratings Wavelength through Pointing-Stick Interface",
"doi": null,
"abstractUrl": "/journal/th/2012/01/tth2012010085/13rRUILtJr4",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07493742",
"title": "Biology to Technology in Active Touch Sensing – Introduction to the Special Section",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07493742/13rRUwInuWz",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2004/01/mcg2004010036",
"title": "SmartTouch: Electric Skin to Touch the Untouchable",
"doi": null,
"abstractUrl": "/magazine/cg/2004/01/mcg2004010036/13rRUxASu3d",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a782",
"title": "Virtual Touch Modulates Perception of Pleasant Touch",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a782/1CJd7sjt0go",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1G55WEFExd6",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G561ezEc9O",
"doi": "10.1109/CVPRW56347.2022.00504",
"title": "Improving Multimodal Speech Recognition by Data Augmentation and Speech Representations",
"normalizedTitle": "Improving Multimodal Speech Recognition by Data Augmentation and Speech Representations",
"abstract": "Multimodal speech recognition aims to improve the performance of automatic speech recognition (ASR) systems by leveraging additional visual information that is usually associated to the audio input. While previous approaches make crucial use of strong visual representations, e.g. by finetuning pretrained image recognition networks, significantly less attention has been paid to its counterpart: the speech component. In this work, we investigate ways of improving the base speech recognition system by following similar techniques to the ones used for the visual encoder, namely, transferring representations and data augmentation. First, we show that starting from a pretrained ASR significantly improves the state-of-the-art performance; remarkably, even when building upon a strong unimodal system, we still find gains by including the visual modality. Second, we employ speech data augmentation techniques to encourage the multimodal system to attend to the visual stimuli. This technique replaces previously used word masking and comes with the benefits of being conceptually simpler and yielding consistent improvements in the multimodal setting. We provide empirical results on three multimodal datasets, including the newly introduced Localized Narratives.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Multimodal speech recognition aims to improve the performance of automatic speech recognition (ASR) systems by leveraging additional visual information that is usually associated to the audio input. While previous approaches make crucial use of strong visual representations, e.g. by finetuning pretrained image recognition networks, significantly less attention has been paid to its counterpart: the speech component. In this work, we investigate ways of improving the base speech recognition system by following similar techniques to the ones used for the visual encoder, namely, transferring representations and data augmentation. First, we show that starting from a pretrained ASR significantly improves the state-of-the-art performance; remarkably, even when building upon a strong unimodal system, we still find gains by including the visual modality. Second, we employ speech data augmentation techniques to encourage the multimodal system to attend to the visual stimuli. This technique replaces previously used word masking and comes with the benefits of being conceptually simpler and yielding consistent improvements in the multimodal setting. We provide empirical results on three multimodal datasets, including the newly introduced Localized Narratives.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Multimodal speech recognition aims to improve the performance of automatic speech recognition (ASR) systems by leveraging additional visual information that is usually associated to the audio input. While previous approaches make crucial use of strong visual representations, e.g. by finetuning pretrained image recognition networks, significantly less attention has been paid to its counterpart: the speech component. In this work, we investigate ways of improving the base speech recognition system by following similar techniques to the ones used for the visual encoder, namely, transferring representations and data augmentation. First, we show that starting from a pretrained ASR significantly improves the state-of-the-art performance; remarkably, even when building upon a strong unimodal system, we still find gains by including the visual modality. Second, we employ speech data augmentation techniques to encourage the multimodal system to attend to the visual stimuli. This technique replaces previously used word masking and comes with the benefits of being conceptually simpler and yielding consistent improvements in the multimodal setting. We provide empirical results on three multimodal datasets, including the newly introduced Localized Narratives.",
"fno": "873900e578",
"keywords": [
"Image Recognition",
"Speech Recognition",
"Speech Representations",
"Multimodal Speech Recognition Aims",
"Automatic Speech Recognition Systems",
"Audio Input",
"Image Recognition Networks",
"Speech Component",
"Base Speech Recognition System",
"Similar Techniques",
"Visual Encoder",
"Transferring Representations",
"Pretrained ASR",
"Strong Unimodal System",
"Visual Modality",
"Speech Data Augmentation Techniques",
"Multimodal System",
"Visual Stimuli",
"Consistent Improvements",
"Multimodal Setting",
"Multimodal Datasets",
"Training",
"Couplings",
"Visualization",
"Image Recognition",
"Keyword Search",
"Speech Recognition",
"Machine Learning"
],
"authors": [
{
"affiliation": "University Politehnica of Bucharest,Speech and Dialogue Research Laboratory,Romania",
"fullName": "Dan Oneaţă",
"givenName": "Dan",
"surname": "Oneaţă",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Politehnica of Bucharest,Speech and Dialogue Research Laboratory,Romania",
"fullName": "Horia Cucu",
"givenName": "Horia",
"surname": "Cucu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4578-4587",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8739-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "873900e566",
"articleId": "1G56Ne3PbrO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "873900e588",
"articleId": "1G56ddEn8D6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpp/2011/4510/0/4510a305",
"title": "CSR: A Cloud-Assisted Speech Recognition Service for Personal Mobile Device",
"doi": null,
"abstractUrl": "/proceedings-article/icpp/2011/4510a305/12OmNBtUdMo",
"parentPublication": {
"id": "proceedings/icpp/2011/4510/0",
"title": "2011 International Conference on Parallel Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/5/01327250",
"title": "Improved face and feature finding for audio-visual speech recognition in visually challenging environments",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01327250/12OmNC1GujE",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/5",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d574",
"title": "Temporal Multimodal Learning in Audiovisual Speech Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d574/12OmNqGiu9C",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761927",
"title": "A phone-viseme dynamic Bayesian network for audio-visual automatic speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761927/12OmNwbLVlK",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icii/2001/7010/3/00983084",
"title": "An efficient robust automatic speech recognition system based on the combination of speech enhancement and log-add HMM adaptation",
"doi": null,
"abstractUrl": "/proceedings-article/icii/2001/00983084/12OmNypIYAq",
"parentPublication": {
"id": "proceedings/icii/2001/7010/3",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iset/2017/3031/0/08005425",
"title": "Improving English Pronunciation Via Automatic Speech Recognition Technology",
"doi": null,
"abstractUrl": "/proceedings-article/iset/2017/08005425/12OmNzVGcRw",
"parentPublication": {
"id": "proceedings/iset/2017/3031/0",
"title": "2017 International Symposium on Educational Technology (ISET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acct/2013/4941/0/06524273",
"title": "Automatic Speech Reading by Oral Motion Tracking for User Authentication System",
"doi": null,
"abstractUrl": "/proceedings-article/acct/2013/06524273/12OmNzxgHqu",
"parentPublication": {
"id": "proceedings/acct/2013/4941/0",
"title": "2013 Third International Conference on Advanced Computing & Communication Technologies (ACCT 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnlp/2022/9544/0/954400a341",
"title": "DataShift: A Cross-Modal Data Augmentation Method for Speech Recognition and Machine Translation",
"doi": null,
"abstractUrl": "/proceedings-article/icnlp/2022/954400a341/1GNtrRYWjO8",
"parentPublication": {
"id": "proceedings/icnlp/2022/9544/0",
"title": "2022 4th International Conference on Natural Language Processing (ICNLP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/06/08977320",
"title": "Touch? Speech? or Touch and Speech? Investigating Multimodal Interaction for Visual Network Exploration and Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2020/06/08977320/1h2AIkwYg4E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800o4421",
"title": "Discriminative Multi-Modality Speech Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800o4421/1m3osCqH3yM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1GhVTNddUvm",
"title": "2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)",
"acronym": "cmbs",
"groupId": "1000153",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1GhW8bBO4iQ",
"doi": "10.1109/CBMS55023.2022.00042",
"title": "Leveraging Clinical BERT in Multimodal Mortality Prediction Models for COVID-19",
"normalizedTitle": "Leveraging Clinical BERT in Multimodal Mortality Prediction Models for COVID-19",
"abstract": "Clinical prediction models are often based solely on the use of structured data in electronic health records, e.g. vital parameters and laboratory results, effectively ignoring potentially valuable information recorded in other modalities, such as free-text clinical notes. Here, we report on the development of a multimodal model that combines structured and unstructured data. In particular, we study how best to make use of a clinical language model in a multimodal setup for predicting 30-day all-cause mortality upon hospital admission in patients with COVID-19. We evaluate three strategies for incorporating a domain-specific clinical BERT model in multimodal prediction systems: (i) without fine-tuning, (ii) with unimodal fine-tuning, and (iii) with multimodal fine-tuning. The best-performing model leverages multimodal fine-tuning, in which the clinical BERT model is updated based also on the structured data. This multimodal mortality prediction model is shown to outperform unimodal models that are based on using either only structured data or only unstructured data. The experimental results indicate that clinical prediction models can be improved by including data in other modalities and that multimodal fine-tuning of a clinical language model is an effective strategy for incorporating information from clinical notes in multimodal prediction systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Clinical prediction models are often based solely on the use of structured data in electronic health records, e.g. vital parameters and laboratory results, effectively ignoring potentially valuable information recorded in other modalities, such as free-text clinical notes. Here, we report on the development of a multimodal model that combines structured and unstructured data. In particular, we study how best to make use of a clinical language model in a multimodal setup for predicting 30-day all-cause mortality upon hospital admission in patients with COVID-19. We evaluate three strategies for incorporating a domain-specific clinical BERT model in multimodal prediction systems: (i) without fine-tuning, (ii) with unimodal fine-tuning, and (iii) with multimodal fine-tuning. The best-performing model leverages multimodal fine-tuning, in which the clinical BERT model is updated based also on the structured data. This multimodal mortality prediction model is shown to outperform unimodal models that are based on using either only structured data or only unstructured data. The experimental results indicate that clinical prediction models can be improved by including data in other modalities and that multimodal fine-tuning of a clinical language model is an effective strategy for incorporating information from clinical notes in multimodal prediction systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Clinical prediction models are often based solely on the use of structured data in electronic health records, e.g. vital parameters and laboratory results, effectively ignoring potentially valuable information recorded in other modalities, such as free-text clinical notes. Here, we report on the development of a multimodal model that combines structured and unstructured data. In particular, we study how best to make use of a clinical language model in a multimodal setup for predicting 30-day all-cause mortality upon hospital admission in patients with COVID-19. We evaluate three strategies for incorporating a domain-specific clinical BERT model in multimodal prediction systems: (i) without fine-tuning, (ii) with unimodal fine-tuning, and (iii) with multimodal fine-tuning. The best-performing model leverages multimodal fine-tuning, in which the clinical BERT model is updated based also on the structured data. This multimodal mortality prediction model is shown to outperform unimodal models that are based on using either only structured data or only unstructured data. The experimental results indicate that clinical prediction models can be improved by including data in other modalities and that multimodal fine-tuning of a clinical language model is an effective strategy for incorporating information from clinical notes in multimodal prediction systems.",
"fno": "677000a199",
"keywords": [
"Diseases",
"Electronic Health Records",
"Health Care",
"Hospitals",
"Medical Computing",
"Natural Language Processing",
"Neural Nets",
"Text Analysis",
"Multimodal Mortality Prediction Model",
"COVID 19",
"Clinical Prediction Models",
"Structured Data",
"Free Text Clinical Notes",
"Multimodal Model",
"Unstructured Data",
"Clinical Language Model",
"Domain Specific Clinical BERT Model",
"Unimodal Fine Tuning",
"Hospital Admission",
"Electronic Health Records",
"COVID 19",
"Hospitals",
"Computational Modeling",
"Bit Error Rate",
"Natural Languages",
"Predictive Models",
"Data Models",
"Natural Language Processing",
"Machine Learning",
"Language Models",
"Clinical BERT",
"Multimodal Learning",
"Electronic Health Records",
"Mortality Prediction",
"COVID 19"
],
"authors": [
{
"affiliation": "Stockholm University,Department of Computer and Systems Sciences (DSV),Kista,Sweden",
"fullName": "Yash Pawar",
"givenName": "Yash",
"surname": "Pawar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stockholm University,Department of Computer and Systems Sciences (DSV),Kista,Sweden",
"fullName": "Aron Henriksson",
"givenName": "Aron",
"surname": "Henriksson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Solna (MedS), Karolinska Institutet,Division of Infectious Diseases,Department of Medicine,Stockholm,Sweden",
"fullName": "Pontus Hedberg",
"givenName": "Pontus",
"surname": "Hedberg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Solna (MedS), Karolinska Institutet,Division of Infectious Diseases,Department of Medicine,Stockholm,Sweden",
"fullName": "Pontus Naucler",
"givenName": "Pontus",
"surname": "Naucler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cmbs",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "199-204",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6770-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "677000a193",
"articleId": "1GhVX5TqGFG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "677000a205",
"articleId": "1GhW1ZbPaBG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/big-data/2021/3902/0/09671302",
"title": "Multimodality Data Fusion for COVID-19 Diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671302/1A8gQm5mBPO",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0/945700b245",
"title": "Enhance COVID-19 Mortality Prediction with Human Mobility Trend and Medical Information",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2021/945700b245/1DNE5m7hn9e",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0",
"title": "2021 IEEE 23rd Int Conf on High Performance Computing & Communications; 7th Int Conf on Data Science & Systems; 19th Int Conf on Smart City; 7th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2022/6845/0/684500a192",
"title": "EventScore: An Automated Real-time Early Warning Score for Clinical Events",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2022/684500a192/1GvdEerfGGQ",
"parentPublication": {
"id": "proceedings/ichi/2022/6845/0",
"title": "2022 IEEE 10th International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2022/6845/0/684500a502",
"title": "Decompensation Prediction for Hospitalized COVID-19 Patients",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2022/684500a502/1GvdKyKB2De",
"parentPublication": {
"id": "proceedings/ichi/2022/6845/0",
"title": "2022 IEEE 10th International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2021/01/09311826",
"title": "Relational Learning Improves Prediction of Mortality in COVID-19 in the Intensive Care Unit",
"doi": null,
"abstractUrl": "/journal/bd/2021/01/09311826/1q0BvvMczXG",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378295",
"title": "Preventing litigation with a predictive model of COVID-19 ICUs occupancy",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378295/1s64EZ5Ij5K",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2021/0132/0/013200a505",
"title": "Clinical Trial Information Extraction with BERT",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2021/013200a505/1xIORDDnlPW",
"parentPublication": {
"id": "proceedings/ichi/2021/0132/0",
"title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2021/0132/0/013200a258",
"title": "Topic-to-Topic Modeling for COVID-19 Mortality",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2021/013200a258/1xIOSvxstjy",
"parentPublication": {
"id": "proceedings/ichi/2021/0132/0",
"title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2021/1685/0/168500a197",
"title": "COVID-19 Mortality Prediction Using Machine Learning Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2021/168500a197/1ymJeRuIdwI",
"parentPublication": {
"id": "proceedings/icdh/2021/1685/0",
"title": "2021 IEEE International Conference on Digital Health (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2021/2744/0/09631477",
"title": "Machine Learning Techniques for Extracting Relevant Features from Clinical Data for COVID-19 Mortality Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2021/09631477/1zmvIjQw3vi",
"parentPublication": {
"id": "proceedings/iscc/2021/2744/0",
"title": "2021 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jPbbHBGDHq",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jPbxi0Vk40",
"doi": "10.1109/WACV45572.2020.9093414",
"title": "Exploring Hate Speech Detection in Multimodal Publications",
"normalizedTitle": "Exploring Hate Speech Detection in Multimodal Publications",
"abstract": "In this work we target the problem of hate speech detection in multimodal publications formed by a text and an image. We gather and annotate a large scale dataset from Twitter, MMHS150K, and propose different models that jointly analyze textual and visual information for hate speech detection, comparing them with unimodal detection. We provide quantitative and qualitative results and analyze the challenges of the proposed task. We find that, even though images are useful for the hate speech detection task, current multimodal models cannot outperform models analyzing only text. We discuss why and open the field and the dataset for further research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work we target the problem of hate speech detection in multimodal publications formed by a text and an image. We gather and annotate a large scale dataset from Twitter, MMHS150K, and propose different models that jointly analyze textual and visual information for hate speech detection, comparing them with unimodal detection. We provide quantitative and qualitative results and analyze the challenges of the proposed task. We find that, even though images are useful for the hate speech detection task, current multimodal models cannot outperform models analyzing only text. We discuss why and open the field and the dataset for further research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work we target the problem of hate speech detection in multimodal publications formed by a text and an image. We gather and annotate a large scale dataset from Twitter, MMHS150K, and propose different models that jointly analyze textual and visual information for hate speech detection, comparing them with unimodal detection. We provide quantitative and qualitative results and analyze the challenges of the proposed task. We find that, even though images are useful for the hate speech detection task, current multimodal models cannot outperform models analyzing only text. We discuss why and open the field and the dataset for further research.",
"fno": "09093414",
"keywords": [
"Social Networking Online",
"Speech Recognition",
"Text Analysis",
"Current Multimodal Models",
"Multimodal Publications",
"Unimodal Detection",
"Hate Speech Detection Task",
"Twitter",
"MMHS 150 K",
"Visualization",
"Task Analysis",
"Voice Activity Detection",
"Twitter",
"Feature Extraction",
"Kernel"
],
"authors": [
{
"affiliation": "Unitat de Tecnologies Audiovisuals,Eurecat, Centre Tecnològic de Catalunya,Barcelona,Spain",
"fullName": "Raul Gomez",
"givenName": "Raul",
"surname": "Gomez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Unitat de Tecnologies Audiovisuals,Eurecat, Centre Tecnològic de Catalunya,Barcelona,Spain",
"fullName": "Jaume Gibert",
"givenName": "Jaume",
"surname": "Gibert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universitat Autònoma de Barcelona,Computer Vision Center,Barcelona,Spain",
"fullName": "Lluis Gomez",
"givenName": "Lluis",
"surname": "Gomez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universitat Autònoma de Barcelona,Computer Vision Center,Barcelona,Spain",
"fullName": "Dimosthenis Karatzas",
"givenName": "Dimosthenis",
"surname": "Karatzas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1459-1467",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6553-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09093512",
"articleId": "1jPbcSZyXDy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09093565",
"articleId": "1jPbr1ZPI88",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bracis/2018/8023/0/802300a061",
"title": "Hate Speech Classification in Social Media Using Emotional Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2018/802300a061/17D45Vw15uD",
"parentPublication": {
"id": "proceedings/bracis/2018/8023/0",
"title": "2018 7th Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671955",
"title": "Multi-modal Hate Speech Detection using Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671955/1A8h6j1PMBO",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2019/3891/0/08925079",
"title": "Leveraging Hate Speech Detection to Investigate Immigration-related Phenomena in Italy",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2019/08925079/1fHFbA4BG24",
"parentPublication": {
"id": "proceedings/aciiw/2019/3891/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a551",
"title": "MC-BERT4HATE: Hate Speech Detection using Multi-channel BERT for Different Languages and Translations",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a551/1gAwSYUaAhy",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2020/6332/0/633200a199",
"title": "Evaluating Semantic Feature Representations to Efficiently Detect Hate Intent on Social Media",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2020/633200a199/1iffB07OjkY",
"parentPublication": {
"id": "proceedings/icsc/2020/6332/0",
"title": "2020 IEEE 14th International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2021/01/09254135",
"title": "HateClassify: A Service Framework for Hate Speech Identification on Social Media",
"doi": null,
"abstractUrl": "/magazine/ic/2021/01/09254135/1oDXC9BOvok",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2021/02/09238420",
"title": "Towards Hate Speech Detection at Large via Deep Generative Modeling",
"doi": null,
"abstractUrl": "/magazine/ic/2021/02/09238420/1oa0YeZjeq4",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2020/9228/0/922800a526",
"title": "Deep Learning Ensembles for Hate Speech Detection",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2020/922800a526/1pP3zXcMGC4",
"parentPublication": {
"id": "proceedings/ictai/2020/9228/0",
"title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2021/8924/0/892400a346",
"title": "QUARC: Quaternion Multi-Modal Fusion Architecture For Hate Speech Classification",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2021/892400a346/1rRcdggDS8M",
"parentPublication": {
"id": "proceedings/bigcomp/2021/8924/0",
"title": "2021 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2020/1056/0/09381473",
"title": "Graph-Based Methods to Detect Hate Speech Diffusion on Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2020/09381473/1semwUSbS4E",
"parentPublication": {
"id": "proceedings/asonam/2020/1056/0",
"title": "2020 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3ojQrj4iY",
"doi": "10.1109/CVPR42600.2020.01330",
"title": "MMTM: Multimodal Transfer Module for CNN Fusion",
"normalizedTitle": "MMTM: Multimodal Transfer Module for CNN Fusion",
"abstract": "In late fusion, each modality is processed in a separate unimodal Convolutional Neural Network (CNN) stream and the scores of each modality are fused at the end. Due to its simplicity, late fusion is still the predominant approach in many state-of-the-art multimodal applications. In this paper, we present a simple neural network module for leveraging the knowledge from multiple modalities in convolutional neural networks. The proposed unit, named Multimodal Transfer Module (MMTM), can be added at different levels of the feature hierarchy, enabling slow modality fusion. Using squeeze and excitation operations, MMTM utilizes the knowledge of multiple modalities to recalibrate the channel-wise features in each CNN stream. Unlike other intermediate fusion methods, the proposed module could be used for feature modality fusion in convolution layers with different spatial dimensions. Another advantage of the proposed method is that it could be added among unimodal branches with minimum changes in the their network architectures, allowing each branch to be initialized with existing pretrained weights. Experimental results show that our framework improves the recognition accuracy of well-known multimodal networks. We demonstrate state-of-the-art or competitive performance on four datasets that span the task domains of dynamic hand gesture recognition, speech enhancement, and action recognition with RGB and body joints.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In late fusion, each modality is processed in a separate unimodal Convolutional Neural Network (CNN) stream and the scores of each modality are fused at the end. Due to its simplicity, late fusion is still the predominant approach in many state-of-the-art multimodal applications. In this paper, we present a simple neural network module for leveraging the knowledge from multiple modalities in convolutional neural networks. The proposed unit, named Multimodal Transfer Module (MMTM), can be added at different levels of the feature hierarchy, enabling slow modality fusion. Using squeeze and excitation operations, MMTM utilizes the knowledge of multiple modalities to recalibrate the channel-wise features in each CNN stream. Unlike other intermediate fusion methods, the proposed module could be used for feature modality fusion in convolution layers with different spatial dimensions. Another advantage of the proposed method is that it could be added among unimodal branches with minimum changes in the their network architectures, allowing each branch to be initialized with existing pretrained weights. Experimental results show that our framework improves the recognition accuracy of well-known multimodal networks. We demonstrate state-of-the-art or competitive performance on four datasets that span the task domains of dynamic hand gesture recognition, speech enhancement, and action recognition with RGB and body joints.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In late fusion, each modality is processed in a separate unimodal Convolutional Neural Network (CNN) stream and the scores of each modality are fused at the end. Due to its simplicity, late fusion is still the predominant approach in many state-of-the-art multimodal applications. In this paper, we present a simple neural network module for leveraging the knowledge from multiple modalities in convolutional neural networks. The proposed unit, named Multimodal Transfer Module (MMTM), can be added at different levels of the feature hierarchy, enabling slow modality fusion. Using squeeze and excitation operations, MMTM utilizes the knowledge of multiple modalities to recalibrate the channel-wise features in each CNN stream. Unlike other intermediate fusion methods, the proposed module could be used for feature modality fusion in convolution layers with different spatial dimensions. Another advantage of the proposed method is that it could be added among unimodal branches with minimum changes in the their network architectures, allowing each branch to be initialized with existing pretrained weights. Experimental results show that our framework improves the recognition accuracy of well-known multimodal networks. We demonstrate state-of-the-art or competitive performance on four datasets that span the task domains of dynamic hand gesture recognition, speech enhancement, and action recognition with RGB and body joints.",
"fno": "716800n3286",
"keywords": [
"Convolutional Neural Nets",
"Feature Extraction",
"Learning Artificial Intelligence",
"Neural Net Architecture",
"Sensor Fusion",
"Sensor Data",
"Data Level Fusion",
"Machine Learning",
"Squeeze And Excitation Operations",
"Channel Wise Features",
"Unimodal Convolutional Neural Network",
"Multimodal Transfer Module",
"Network Architectures",
"CNN Fusion",
"MMTM",
"Gesture Recognition",
"Speech Enhancement",
"Three Dimensional Displays",
"Task Analysis",
"Speech Recognition",
"Visualization",
"Neural Networks"
],
"authors": [
{
"affiliation": "Microsoft",
"fullName": "Hamid Reza Vaezi Joze",
"givenName": "Hamid Reza",
"surname": "Vaezi Joze",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Tech",
"fullName": "Amirreza Shaban",
"givenName": "Amirreza",
"surname": "Shaban",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CU Boulder",
"fullName": "Michael L. Iuzzolino",
"givenName": "Michael L.",
"surname": "Iuzzolino",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft",
"fullName": "Kazuhito Koishida",
"givenName": "Kazuhito",
"surname": "Koishida",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "13286-13296",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800n3275",
"articleId": "1m3o1L6nI5i",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800n3297",
"articleId": "1m3ntdqTjji",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icapr/2009/3520/0/04782772",
"title": "Qualitative Weight Assignment for Multimodal Biometric Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/icapr/2009/04782772/12OmNCh0Pbl",
"parentPublication": {
"id": "proceedings/icapr/2009/3520/0",
"title": "Advances in Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349552",
"title": "PAD-based multimodal affective fusion",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349552/12OmNrAv3Jr",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545061",
"title": "Multi-Level Feature Abstraction from Convolutional Neural Networks for Multimodal Biometric Identification",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545061/17D45XfSEVa",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669634",
"title": "MAIN: Multimodal Attention-based Fusion Networks for Diagnosis Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669634/1A9WmdkM7u0",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09783089",
"title": "Learning to Learn Better Unimodal Representations via Adaptive Multimodal Meta-Learning",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09783089/1DIwPgX2QjS",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0675",
"title": "Multimodal Dynamics: Dynamical Fusion for Trustworthy Multimodal Classification",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0675/1H0NoYlOJHy",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956114",
"title": "IMCN: Identifying Modal Contribution Network for Multimodal Sentiment Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956114/1IHoIacv1uM",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09955646",
"title": "Sensor Fusion and Multimodal Learning for Robotic Grasp Verification Using Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09955646/1IHpshKFLX2",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428321",
"title": "Dense Fusion Network with Multimodal Residual for Sentiment Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428321/1uilJpPtNdK",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2021/3396/0/09663737",
"title": "D4FLY Multimodal Biometric Database: multimodal fusion evaluation envisaging on-the-move biometric-based border control",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2021/09663737/1zUZ7caeuLS",
"parentPublication": {
"id": "proceedings/avss/2021/3396/0",
"title": "2021 17th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 34