data
dict |
---|
{
"issue": {
"id": "1Jv6pC6iiPe",
"title": "Feb.",
"year": "2023",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Jv6wZRSJq0",
"doi": "10.1109/TVCG.2022.3219303",
"abstract": "Presents the editorial for this issue of the publication",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the editorial for this issue of the publication",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the editorial for this issue of the publication",
"title": "Farewell and New EIC Introduction",
"normalizedTitle": "Farewell and New EIC Introduction",
"fno": "10003073",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "02",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1299-1300",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tm/2008/04/ttm2008040385",
"title": "Editorial: EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tm/2008/04/ttm2008040385/13rRUwIF69R",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2009/12/ttd2009121713",
"title": "Editorial: EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/td/2009/12/ttd2009121713/13rRUx0gezw",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2013/01/tlt2013010001",
"title": "EiC team farewell and new EiC team introduction",
"doi": null,
"abstractUrl": "/journal/lt/2013/01/tlt2013010001/13rRUxAATd5",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/01/06966881",
"title": "EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tg/2015/01/06966881/13rRUxBa565",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2012/02/tlt2012020098",
"title": "EIC Editorial and Introduction of New Associate Editors",
"doi": null,
"abstractUrl": "/journal/lt/2012/02/tlt2012020098/13rRUxBrGcA",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2015/05/07065388",
"title": "EIC Editorial",
"doi": null,
"abstractUrl": "/journal/tm/2015/05/07065388/13rRUyYjK5I",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2016/10/07563940",
"title": "EIC Editorial",
"doi": null,
"abstractUrl": "/journal/tk/2016/10/07563940/13rRUynHujx",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2022/09/09696236",
"title": "EiC Editorial – Advancing Reproducibility in Parallel and Distributed Systems Research",
"doi": null,
"abstractUrl": "/journal/td/2022/09/09696236/1Ax61k0Lqq4",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/cc/2020/01/09027028",
"title": "Farewell Editorial",
"doi": null,
"abstractUrl": "/journal/cc/2020/01/09027028/1hYGlWc7xNm",
"parentPublication": {
"id": "trans/cc",
"title": "IEEE Transactions on Cloud Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2021/06/09642424",
"title": "EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tb/2021/06/09642424/1zarErdh7XO",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "09537699",
"articleId": "1wTiueApSAU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1zarv24nAkg",
"title": "Nov.-Dec.",
"year": "2021",
"issueNum": "06",
"idPrefix": "tb",
"pubType": "journal",
"volume": "18",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1zarErdh7XO",
"doi": "10.1109/TCBB.2021.3125125",
"abstract": null,
"abstracts": [],
"normalizedAbstract": null,
"title": "EIC Farewell and New EIC Introduction",
"normalizedTitle": "EIC Farewell and New EIC Introduction",
"fno": "09642424",
"hasPdf": true,
"idPrefix": "tb",
"keywords": [],
"authors": [
{
"givenName": "Aidong",
"surname": "Zhang",
"fullName": "Aidong Zhang",
"affiliation": "School of Computational Science and Engineering, Georgia Institute of Technology, Atlanta, GA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "06",
"pubDate": "2021-11-01 00:00:00",
"pubType": "trans",
"pages": "2057-2057",
"year": "2021",
"issn": "1545-5963",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/td/2006/01/01549809",
"title": "Editorial: EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/td/2006/01/01549809/13rRUILLkuX",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/03/ttg2011030261",
"title": "Editorial: EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tg/2011/03/ttg2011030261/13rRUILLkvk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/01/v0001",
"title": "Editorial: EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tg/2007/01/v0001/13rRUIM2VGW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2008/04/ttm2008040385",
"title": "Editorial: EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tm/2008/04/ttm2008040385/13rRUwIF69R",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2009/12/ttd2009121713",
"title": "Editorial: EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/td/2009/12/ttd2009121713/13rRUx0gezw",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2013/01/tlt2013010001",
"title": "EiC team farewell and new EiC team introduction",
"doi": null,
"abstractUrl": "/journal/lt/2013/01/tlt2013010001/13rRUxAATd5",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/01/06966881",
"title": "EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tg/2015/01/06966881/13rRUxBa565",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2013/12/ttd2013122322",
"title": "Editor's Note: EIC Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/td/2013/12/ttd2013122322/13rRUxZ0o18",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08629339",
"title": "Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08629339/17D45We0UD7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/10003073",
"title": "Farewell and New EIC Introduction",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/10003073/1Jv6wZRSJq0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "09642422",
"articleId": "1zarvQplbtC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNx4yvun",
"title": "March",
"year": "2015",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIJuxpC",
"doi": "10.1109/TVCG.2014.2360406",
"abstract": "This paper addresses a challenging single-view modeling and animation problem with cartoon images. Our goal is to model the hairs in a given cartoon image with consistent layering and occlusion, so that we can produce various visual effects from just a single image. We propose a novel 2.5D modeling approach to deal with this problem. Given an input image, we first segment the hairs of the cartoon character into regions of hair strands. Then, we apply our novel layering metric, which is derived from the Gestalt psychology, to automatically optimize the depth ordering among the hair strands. After that, we employ our hair completion method to fill the occluded part of each hair strand, and create a 2.5D model of the cartoon hair. By using this model, we can produce various visual effects, e.g., we develop a simplified fluid simulation model to produce wind blowing animations with the 2.5D hairs. To further demonstrate the applicability and versatility of our method, we compare our results with real cartoon hair animations, and also apply our model to produce a wide variety of hair manipulation effects, including hair editing and hair braiding.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper addresses a challenging single-view modeling and animation problem with cartoon images. Our goal is to model the hairs in a given cartoon image with consistent layering and occlusion, so that we can produce various visual effects from just a single image. We propose a novel 2.5D modeling approach to deal with this problem. Given an input image, we first segment the hairs of the cartoon character into regions of hair strands. Then, we apply our novel layering metric, which is derived from the Gestalt psychology, to automatically optimize the depth ordering among the hair strands. After that, we employ our hair completion method to fill the occluded part of each hair strand, and create a 2.5D model of the cartoon hair. By using this model, we can produce various visual effects, e.g., we develop a simplified fluid simulation model to produce wind blowing animations with the 2.5D hairs. To further demonstrate the applicability and versatility of our method, we compare our results with real cartoon hair animations, and also apply our model to produce a wide variety of hair manipulation effects, including hair editing and hair braiding.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper addresses a challenging single-view modeling and animation problem with cartoon images. Our goal is to model the hairs in a given cartoon image with consistent layering and occlusion, so that we can produce various visual effects from just a single image. We propose a novel 2.5D modeling approach to deal with this problem. Given an input image, we first segment the hairs of the cartoon character into regions of hair strands. Then, we apply our novel layering metric, which is derived from the Gestalt psychology, to automatically optimize the depth ordering among the hair strands. After that, we employ our hair completion method to fill the occluded part of each hair strand, and create a 2.5D model of the cartoon hair. By using this model, we can produce various visual effects, e.g., we develop a simplified fluid simulation model to produce wind blowing animations with the 2.5D hairs. To further demonstrate the applicability and versatility of our method, we compare our results with real cartoon hair animations, and also apply our model to produce a wide variety of hair manipulation effects, including hair editing and hair braiding.",
"title": "2.5D Cartoon Hair Modeling and Manipulation",
"normalizedTitle": "2.5D Cartoon Hair Modeling and Manipulation",
"fno": "06910280",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Hair",
"Junctions",
"Animation",
"Computational Modeling",
"Measurement",
"Image Edge Detection",
"Solid Modeling",
"Single View Modeling",
"Cartoon",
"Still Image Animation",
"2 5 D Modeling",
"Layering",
"Single View Modeling",
"Cartoon",
"Still Image Animation",
"2 5 D Modeling",
"Layering"
],
"authors": [
{
"givenName": "Chih-Kuo",
"surname": "Yeh",
"fullName": "Chih-Kuo Yeh",
"affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Taiwan, Republic of China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pradeep Kumar",
"surname": "Jayaraman",
"fullName": "Pradeep Kumar Jayaraman",
"affiliation": "School of Computer Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaopei",
"surname": "Liu",
"fullName": "Xiaopei Liu",
"affiliation": "School of Computer Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chi-Wing",
"surname": "Fu",
"fullName": "Chi-Wing Fu",
"affiliation": "School of Computer Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tong-Yee",
"surname": "Lee",
"fullName": "Tong-Yee Lee",
"affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Taiwan, Republic of China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2015-03-01 00:00:00",
"pubType": "trans",
"pages": "304-314",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2001/1195/0/11950186",
"title": "A Design Tool for the Hierarchical Hair Model",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2001/11950186/12OmNA2cYzp",
"parentPublication": {
"id": "proceedings/iv/2001/1195/0",
"title": "Proceedings Fifth International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2004/2171/0/21710060",
"title": "Modelling and Animating Cartoon Hair with NURBS Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710060/12OmNBSjJ3G",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2016/3568/0/3568a001",
"title": "Puppeteering 2.5D Models",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2016/3568a001/12OmNBpEeMN",
"parentPublication": {
"id": "proceedings/sibgrapi/2016/3568/0",
"title": "2016 29th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1999/0167/0/01670058",
"title": "Visible Volume Buffer for Efficient Hair Expression and Shadow Generation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1999/01670058/12OmNCbkQC8",
"parentPublication": {
"id": "proceedings/ca/1999/0167/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a185",
"title": "Real-Time 2.5D Facial Cartoon Animation Based on Pose and Expression Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a185/12OmNvSKNRM",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2009/3963/0/3963a185",
"title": "Procedural Hair Generation",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2009/3963a185/12OmNzgwmRo",
"parentPublication": {
"id": "proceedings/sbgames/2009/3963/0",
"title": "2009 VIII Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2001/03/mcg2001030036",
"title": "V-HairStudio: An Interactive Tool for Hair Design",
"doi": null,
"abstractUrl": "/magazine/cg/2001/03/mcg2001030036/13rRUwInvDe",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/07/07448467",
"title": "Adaptive Skinning for Interactive Hair-Solid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2017/07/07448467/13rRUygBw7e",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08301570",
"title": "A Skinned Tetrahedral Mesh for Hair Animation and Hair-Water Interaction",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08301570/17D45W9KVHk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09220808",
"title": "Real-Time Hair Simulation With Neural Interpolation",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09220808/1nRLElyFvfG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "06919322",
"articleId": "13rRUxOdD8j",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRLw",
"name": "ttg201503-06910280s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201503-06910280s1.zip",
"extension": "zip",
"size": "80.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwE9OmB",
"title": "May/June",
"year": "2001",
"issueNum": "03",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "21",
"label": "May/June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvDe",
"doi": "10.1109/38.920625",
"abstract": "Modeling, designing and rendering human hair is a challenging problem in computer graphics. The difficulty comes mainly from the amount of hair to be modeled, and the fine details of the individual hairs. In this article, the authors introduce an interactive hair designing system based on a cluster hair model. The generalized cylinder is used to specify the envelope shape of a hair cluster, so that the design and manipulation of hair can be globally and efficiently performed. The detail of the hair is then modeled by a volume density model in which a randomly generated density map on the hair base is projected into and deformed along the generalized cylinder. The system provides a set of functions for creating and manipulating hair clusters on a head model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modeling, designing and rendering human hair is a challenging problem in computer graphics. The difficulty comes mainly from the amount of hair to be modeled, and the fine details of the individual hairs. In this article, the authors introduce an interactive hair designing system based on a cluster hair model. The generalized cylinder is used to specify the envelope shape of a hair cluster, so that the design and manipulation of hair can be globally and efficiently performed. The detail of the hair is then modeled by a volume density model in which a randomly generated density map on the hair base is projected into and deformed along the generalized cylinder. The system provides a set of functions for creating and manipulating hair clusters on a head model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modeling, designing and rendering human hair is a challenging problem in computer graphics. The difficulty comes mainly from the amount of hair to be modeled, and the fine details of the individual hairs. In this article, the authors introduce an interactive hair designing system based on a cluster hair model. The generalized cylinder is used to specify the envelope shape of a hair cluster, so that the design and manipulation of hair can be globally and efficiently performed. The detail of the hair is then modeled by a volume density model in which a randomly generated density map on the hair base is projected into and deformed along the generalized cylinder. The system provides a set of functions for creating and manipulating hair clusters on a head model.",
"title": "V-HairStudio: An Interactive Tool for Hair Design",
"normalizedTitle": "V-HairStudio: An Interactive Tool for Hair Design",
"fno": "mcg2001030036",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Hair Modeling And Rendering",
"Hair Style Design",
"Interactive Design System"
],
"authors": [
{
"givenName": "Zhan",
"surname": "Xu",
"fullName": "Zhan Xu",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xue Dong",
"surname": "Yang",
"fullName": "Xue Dong Yang",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2001-05-01 00:00:00",
"pubType": "mags",
"pages": "36-43",
"year": "2001",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "mcg2001030024",
"articleId": "13rRUEgs2Or",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2001030044",
"articleId": "13rRUILLkxT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5apxc",
"title": "July",
"year": "2017",
"issueNum": "07",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygBw7e",
"doi": "10.1109/TVCG.2016.2551242",
"abstract": "Reduced hair models have proven successful for interactively simulating a full head of hair strands, building upon a fundamental assumption that only a small set of guide hairs are needed for explicit simulation, and the rest of the hair move coherently and thus can be interpolated using guide hairs. Unfortunately, hair-solid interactions is a pathological case for traditional reduced hair models, as the motion coherence between hair strands can be arbitrarily broken by interacting with solids. In this paper, we propose an adaptive hair skinning method for interactive hair simulation with hair-solid collisions. We precompute many eligible sets of guide hairs and the corresponding interpolation relationships that are represented using a compact strand-based hair skinning model. At runtime, we simulate only guide hairs; for interpolating every other hair, we adaptively choose its guide hairs, taking into account motion coherence and potential hair-solid collisions. Further, we introduce a two-way collision correction algorithm to allow sparsely sampled guide hairs to resolve collisions with solids that can have small geometric features. Our method enables interactive simulation of more than 150 K hair strands interacting with complex solid objects, using 400 guide hairs. We demonstrate the efficiency and robustness of the method with various hairstyles and user-controlled arbitrary hair-solid interactions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Reduced hair models have proven successful for interactively simulating a full head of hair strands, building upon a fundamental assumption that only a small set of guide hairs are needed for explicit simulation, and the rest of the hair move coherently and thus can be interpolated using guide hairs. Unfortunately, hair-solid interactions is a pathological case for traditional reduced hair models, as the motion coherence between hair strands can be arbitrarily broken by interacting with solids. In this paper, we propose an adaptive hair skinning method for interactive hair simulation with hair-solid collisions. We precompute many eligible sets of guide hairs and the corresponding interpolation relationships that are represented using a compact strand-based hair skinning model. At runtime, we simulate only guide hairs; for interpolating every other hair, we adaptively choose its guide hairs, taking into account motion coherence and potential hair-solid collisions. Further, we introduce a two-way collision correction algorithm to allow sparsely sampled guide hairs to resolve collisions with solids that can have small geometric features. Our method enables interactive simulation of more than 150 K hair strands interacting with complex solid objects, using 400 guide hairs. We demonstrate the efficiency and robustness of the method with various hairstyles and user-controlled arbitrary hair-solid interactions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Reduced hair models have proven successful for interactively simulating a full head of hair strands, building upon a fundamental assumption that only a small set of guide hairs are needed for explicit simulation, and the rest of the hair move coherently and thus can be interpolated using guide hairs. Unfortunately, hair-solid interactions is a pathological case for traditional reduced hair models, as the motion coherence between hair strands can be arbitrarily broken by interacting with solids. In this paper, we propose an adaptive hair skinning method for interactive hair simulation with hair-solid collisions. We precompute many eligible sets of guide hairs and the corresponding interpolation relationships that are represented using a compact strand-based hair skinning model. At runtime, we simulate only guide hairs; for interpolating every other hair, we adaptively choose its guide hairs, taking into account motion coherence and potential hair-solid collisions. Further, we introduce a two-way collision correction algorithm to allow sparsely sampled guide hairs to resolve collisions with solids that can have small geometric features. Our method enables interactive simulation of more than 150 K hair strands interacting with complex solid objects, using 400 guide hairs. We demonstrate the efficiency and robustness of the method with various hairstyles and user-controlled arbitrary hair-solid interactions.",
"title": "Adaptive Skinning for Interactive Hair-Solid Simulation",
"normalizedTitle": "Adaptive Skinning for Interactive Hair-Solid Simulation",
"fno": "07448467",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Hair",
"Adaptation Models",
"Computational Modeling",
"Solids",
"Runtime",
"Interpolation",
"Animation",
"Hair Simulation",
"Interactive Method",
"Reduced Model",
"Adaptivity",
"Collision Correction"
],
"authors": [
{
"givenName": "Menglei",
"surname": "Chai",
"fullName": "Menglei Chai",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Changxi",
"surname": "Zheng",
"fullName": "Changxi Zheng",
"affiliation": "Department of Computer Science, Columbia University, 616 Schapiro (CEPSR), New York, NY",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Zhou",
"fullName": "Kun Zhou",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2017-07-01 00:00:00",
"pubType": "trans",
"pages": "1725-1738",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2013/4989/0/4989a265",
"title": "Wide-Baseline Hair Capture Using Strand-Based Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2013/4989a265/12OmNA0MYZM",
"parentPublication": {
"id": "proceedings/cvpr/2013/4989/0",
"title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2001/1195/0/11950186",
"title": "A Design Tool for the Hierarchical Hair Model",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2001/11950186/12OmNA2cYzp",
"parentPublication": {
"id": "proceedings/iv/2001/1195/0",
"title": "Proceedings Fifth International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1999/0167/0/01670058",
"title": "Visible Volume Buffer for Efficient Hair Expression and Shadow Generation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1999/01670058/12OmNCbkQC8",
"parentPublication": {
"id": "proceedings/ca/1999/0167/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/simultech/2014/060/0/07095029",
"title": "2D hair strands generation based on template matching",
"doi": null,
"abstractUrl": "/proceedings-article/simultech/2014/07095029/12OmNx5GU7n",
"parentPublication": {
"id": "proceedings/simultech/2014/060/0",
"title": "2014 International Conference on Simulation and Modeling Methodologies, Technologies and Applications (SIMULTECH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2009/3963/0/3963a185",
"title": "Procedural Hair Generation",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2009/3963a185/12OmNzgwmRo",
"parentPublication": {
"id": "proceedings/sbgames/2009/3963/0",
"title": "2009 VIII Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/03/06910280",
"title": "2.5D Cartoon Hair Modeling and Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2015/03/06910280/13rRUIJuxpC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2001/03/mcg2001030036",
"title": "V-HairStudio: An Interactive Tool for Hair Design",
"doi": null,
"abstractUrl": "/magazine/cg/2001/03/mcg2001030036/13rRUwInvDe",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/02/v0131",
"title": "Real-Time Animation of Complex Hairstyles",
"doi": null,
"abstractUrl": "/journal/tg/2006/02/v0131/13rRUxZzAhw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08301570",
"title": "A Skinned Tetrahedral Mesh for Hair Animation and Hair-Water Interaction",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08301570/17D45W9KVHk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09220808",
"title": "Real-Time Hair Simulation With Neural Interpolation",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09220808/1nRLElyFvfG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "07473883",
"articleId": "13rRUxly8T1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRXS",
"name": "ttg201707-07448467s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07448467s1.zip",
"extension": "zip",
"size": "49.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1BhzoX5mYSY",
"title": "April",
"year": "2022",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1nRLElyFvfG",
"doi": "10.1109/TVCG.2020.3029823",
"abstract": "Traditionally, reduced hair simulation methods are either restricted to heuristic approximations or bound to specific hairstyles. We introduce the first CNN-integrated framework for simulating various hairstyles. The approach produces visually realistic hairs with an interactive speed. To address the technical challenges, our hair simulation pipeline is designed as a two-stage process. First, we present a fully-convolutional neural interpolator as the backbone generator to compute dynamic weights for guide hair interpolation. Then, we adopt a second generator to produce fine-scale displacements to enhance the hair details. We train the neural interpolator with a dedicated loss function and the displacement generator with an adversarial discriminator. Experimental results demonstrate that our method is effective, efficient, and superior to the state-of-the-art on a wide variety of hairstyles. We further propose a performance-driven digital avatar system and an interactive hairstyle editing tool to illustrate the practical applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Traditionally, reduced hair simulation methods are either restricted to heuristic approximations or bound to specific hairstyles. We introduce the first CNN-integrated framework for simulating various hairstyles. The approach produces visually realistic hairs with an interactive speed. To address the technical challenges, our hair simulation pipeline is designed as a two-stage process. First, we present a fully-convolutional neural interpolator as the backbone generator to compute dynamic weights for guide hair interpolation. Then, we adopt a second generator to produce fine-scale displacements to enhance the hair details. We train the neural interpolator with a dedicated loss function and the displacement generator with an adversarial discriminator. Experimental results demonstrate that our method is effective, efficient, and superior to the state-of-the-art on a wide variety of hairstyles. We further propose a performance-driven digital avatar system and an interactive hairstyle editing tool to illustrate the practical applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Traditionally, reduced hair simulation methods are either restricted to heuristic approximations or bound to specific hairstyles. We introduce the first CNN-integrated framework for simulating various hairstyles. The approach produces visually realistic hairs with an interactive speed. To address the technical challenges, our hair simulation pipeline is designed as a two-stage process. First, we present a fully-convolutional neural interpolator as the backbone generator to compute dynamic weights for guide hair interpolation. Then, we adopt a second generator to produce fine-scale displacements to enhance the hair details. We train the neural interpolator with a dedicated loss function and the displacement generator with an adversarial discriminator. Experimental results demonstrate that our method is effective, efficient, and superior to the state-of-the-art on a wide variety of hairstyles. We further propose a performance-driven digital avatar system and an interactive hairstyle editing tool to illustrate the practical applications.",
"title": "Real-Time Hair Simulation With Neural Interpolation",
"normalizedTitle": "Real-Time Hair Simulation With Neural Interpolation",
"fno": "09220808",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Computer Animation",
"Convolutional Neural Nets",
"Interpolation",
"Realistic Images",
"Neural Interpolation",
"CNN Integrated Framework",
"Fully Convolutional Neural Interpolator",
"Backbone Generator",
"Dynamic Weights",
"Guide Hair Interpolation",
"Fine Scale Displacements",
"Dedicated Loss Function",
"Displacement Generator",
"Real Time Hair Simulation",
"Hairstyle Simulation",
"Visually Realistic Hair",
"Adversarial Discriminator",
"Performance Driven Digital Avatar System",
"Interactive Hairstyle Editing Tool",
"Hair Detail Enhancement",
"Hair",
"Computational Modeling",
"Interpolation",
"Data Models",
"Generators",
"Shape",
"Neural Networks",
"Real Time Hair Simulation",
"Neural Interpolator",
"Generative Models",
"Computer Animation",
"CNN",
"GAN"
],
"authors": [
{
"givenName": "Qing",
"surname": "Lyu",
"fullName": "Qing Lyu",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Menglei",
"surname": "Chai",
"fullName": "Menglei Chai",
"affiliation": "Snap Research, Venice, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiang",
"surname": "Chen",
"fullName": "Xiang Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Zhou",
"fullName": "Kun Zhou",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1894-1905",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/casa/2003/1934/0/19340041",
"title": "Modeling Hair Using Level-of-Detail Representations",
"doi": null,
"abstractUrl": "/proceedings-article/casa/2003/19340041/12OmNA1DMn9",
"parentPublication": {
"id": "proceedings/casa/2003/1934/0",
"title": "Computer Animation and Social Agents, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2001/1195/0/11950186",
"title": "A Design Tool for the Hierarchical Hair Model",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2001/11950186/12OmNA2cYzp",
"parentPublication": {
"id": "proceedings/iv/2001/1195/0",
"title": "Proceedings Fifth International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2004/2171/0/21710248",
"title": "Real-Time Rendering of Human Hair Using Programmable Graphics Hardware",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710248/12OmNzdoMwf",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/03/06910280",
"title": "2.5D Cartoon Hair Modeling and Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2015/03/06910280/13rRUIJuxpC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/02/v0131",
"title": "Real-Time Animation of Complex Hairstyles",
"doi": null,
"abstractUrl": "/journal/tg/2006/02/v0131/13rRUxZzAhw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/07/07448467",
"title": "Adaptive Skinning for Interactive Hair-Solid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2017/07/07448467/13rRUygBw7e",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a336",
"title": "Spatial-Temporal Editing for Dynamic Hair Data",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a336/1ap5zrlhVio",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08964443",
"title": "DeepSketchHair: Deep Sketch-Based 3D Hair Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08964443/1gLZSnCp3Ko",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2020/8771/0/09122356",
"title": "Viewpoint Selection for Sketch-based Hairstyle Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2020/09122356/1kRSfSP7OpO",
"parentPublication": {
"id": "proceedings/nicoint/2020/8771/0",
"title": "2020 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212824",
"title": "Automatic Hair Modeling from One Image",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212824/1nHRUrDMgE0",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09200517",
"articleId": "1ndVuuNfI64",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09217964",
"articleId": "1nL7s9kgWRy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BhzsaokGly",
"name": "ttg202204-09220808s1-tvcg-3029823-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202204-09220808s1-tvcg-3029823-mm.zip",
"extension": "zip",
"size": "28.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwvT9gt",
"doi": "10.1109/TVCG.2013.131",
"abstract": "We describe a framework to explore and visualize the movement of cloud systems. Using techniques from computational topology and computer vision, our framework allows the user to study this movement at various scales in space and time. Such movements could have large temporal and spatial scales such as the Madden Julian Oscillation (MJO), which has a spatial scale ranging from 1000 km to 10000 km and time of oscillation of around 40 days. Embedded within these larger scale oscillations are a hierarchy of cloud clusters which could have smaller spatial and temporal scales such as the Nakazawa cloud clusters. These smaller cloud clusters, while being part of the equatorial MJO, sometimes move at speeds different from the larger scale and in a direction opposite to that of the MJO envelope. Hitherto, one could only speculate about such movements by selectively analysing data and a priori knowledge of such systems. Our framework automatically delineates such cloud clusters and does not depend on the prior experience of the user to define cloud clusters. Analysis using our framework also shows that most tropical systems such as cyclones also contain multi-scale interactions between clouds and cloud systems. We show the effectiveness of our framework to track organized cloud system during one such rainfall event which happened at Mumbai, India in July 2005 and for cyclone Aila which occurred in Bay of Bengal during May 2009.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe a framework to explore and visualize the movement of cloud systems. Using techniques from computational topology and computer vision, our framework allows the user to study this movement at various scales in space and time. Such movements could have large temporal and spatial scales such as the Madden Julian Oscillation (MJO), which has a spatial scale ranging from 1000 km to 10000 km and time of oscillation of around 40 days. Embedded within these larger scale oscillations are a hierarchy of cloud clusters which could have smaller spatial and temporal scales such as the Nakazawa cloud clusters. These smaller cloud clusters, while being part of the equatorial MJO, sometimes move at speeds different from the larger scale and in a direction opposite to that of the MJO envelope. Hitherto, one could only speculate about such movements by selectively analysing data and a priori knowledge of such systems. Our framework automatically delineates such cloud clusters and does not depend on the prior experience of the user to define cloud clusters. Analysis using our framework also shows that most tropical systems such as cyclones also contain multi-scale interactions between clouds and cloud systems. We show the effectiveness of our framework to track organized cloud system during one such rainfall event which happened at Mumbai, India in July 2005 and for cyclone Aila which occurred in Bay of Bengal during May 2009.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe a framework to explore and visualize the movement of cloud systems. Using techniques from computational topology and computer vision, our framework allows the user to study this movement at various scales in space and time. Such movements could have large temporal and spatial scales such as the Madden Julian Oscillation (MJO), which has a spatial scale ranging from 1000 km to 10000 km and time of oscillation of around 40 days. Embedded within these larger scale oscillations are a hierarchy of cloud clusters which could have smaller spatial and temporal scales such as the Nakazawa cloud clusters. These smaller cloud clusters, while being part of the equatorial MJO, sometimes move at speeds different from the larger scale and in a direction opposite to that of the MJO envelope. Hitherto, one could only speculate about such movements by selectively analysing data and a priori knowledge of such systems. Our framework automatically delineates such cloud clusters and does not depend on the prior experience of the user to define cloud clusters. Analysis using our framework also shows that most tropical systems such as cyclones also contain multi-scale interactions between clouds and cloud systems. We show the effectiveness of our framework to track organized cloud system during one such rainfall event which happened at Mumbai, India in July 2005 and for cyclone Aila which occurred in Bay of Bengal during May 2009.",
"title": "An Exploration Framework to Identify and Track Movement of Cloud Systems",
"normalizedTitle": "An Exploration Framework to Identify and Track Movement of Cloud Systems",
"fno": "ttg2013122896",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Clouds",
"Tracking",
"Brightness Temperature",
"Meteorology",
"Optical Imaging",
"Data Visualization",
"Level Set",
"Split Tree",
"Clouds",
"Tracking",
"Brightness Temperature",
"Meteorology",
"Optical Imaging",
"Data Visualization",
"Level Set",
"Weather And Climate Simulations",
"Cloud Clusters",
"Tracking",
"Computational Topology"
],
"authors": [
{
"givenName": "Harish",
"surname": "Doraiswamy",
"fullName": "Harish Doraiswamy",
"affiliation": "Dept. of Comput. Sci. & Eng., Polytech. Inst. of New York Univ., New York, NY, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Vijay",
"surname": "Natarajan",
"fullName": "Vijay Natarajan",
"affiliation": "Dept. of Comput. Sci. & Autom., Indian Inst. of Sci., Bangalore, India",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ravi S.",
"surname": "Nanjundiah",
"fullName": "Ravi S. Nanjundiah",
"affiliation": "Centre for Atmos. & Oceanic Sci., Indian Inst. of Sci., Bangalore, India",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2896-2905",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/compsacw/2014/3578/0/3578a301",
"title": "m-cloud -- Distributed Statistical Computation Using Multiple Cloud Computers",
"doi": null,
"abstractUrl": "/proceedings-article/compsacw/2014/3578a301/12OmNBDgZ0x",
"parentPublication": {
"id": "proceedings/compsacw/2014/3578/0",
"title": "2014 IEEE 38th International Computer Software and Applications Conference Workshops (COMPSACW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2012/4713/0/4713a276",
"title": "Social and Sensor Data Fusion in the Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2012/4713a276/12OmNxFaLFq",
"parentPublication": {
"id": "proceedings/mdm/2012/4713/0",
"title": "2012 IEEE 13th International Conference on Mobile Data Management",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eait/2014/4272/0/4272a279",
"title": "Temperature Induced Mean Based Cloud Motion Prediction Model for Multiple Cloud Clusters in Satellite Infrared Images",
"doi": null,
"abstractUrl": "/proceedings-article/eait/2014/4272a279/12OmNxFsmIe",
"parentPublication": {
"id": "proceedings/eait/2014/4272/0",
"title": "2014 Fourth International Conference of Emerging Applications of Information Technology (EAIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloud/2014/5063/0/5063a721",
"title": "The Use of Distributed Processing and Cloud Computing in Agricultural Decision-Making Support Systems",
"doi": null,
"abstractUrl": "/proceedings-article/cloud/2014/5063a721/12OmNxXUhSQ",
"parentPublication": {
"id": "proceedings/cloud/2014/5063/0",
"title": "2014 IEEE 7th International Conference on Cloud Computing (CLOUD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccgrid/2010/4039/0/4039a312",
"title": "Representing Eager Evaluation in a Demand Driven Model of Streams on Cloud Infrastructure",
"doi": null,
"abstractUrl": "/proceedings-article/ccgrid/2010/4039a312/12OmNy2rS6w",
"parentPublication": {
"id": "proceedings/ccgrid/2010/4039/0",
"title": "Cluster Computing and the Grid, IEEE International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2016/2616/0/2616a097",
"title": "The IEEE Services Track on Software Engineering for/in the Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/services/2016/2616a097/12OmNywfKAd",
"parentPublication": {
"id": "proceedings/services/2016/2616/0",
"title": "2016 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mobilecloud/2015/8977/0/8977a135",
"title": "Cloud-Based Programmable Sensor Data Provision",
"doi": null,
"abstractUrl": "/proceedings-article/mobilecloud/2015/8977a135/12OmNzWfp3F",
"parentPublication": {
"id": "proceedings/mobilecloud/2015/8977/0",
"title": "2015 3rd IEEE International Conference on Mobile Cloud Computing, Services, and Engineering (MobileCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2011/1788/0/06137683",
"title": "The Research on Fesibility of Fog Detection Using HJ Dat",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2011/06137683/12OmNzZWbz2",
"parentPublication": {
"id": "proceedings/kam/2011/1788/0",
"title": "2011 Fourth International Symposium on Knowledge Acquisition and Modeling",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fit/2014/7505/0/7505a208",
"title": "secCloudSim: Secure Cloud Simulator",
"doi": null,
"abstractUrl": "/proceedings-article/fit/2014/7505a208/12OmNzxPTKD",
"parentPublication": {
"id": "proceedings/fit/2014/7505/0",
"title": "2014 12th International Conference on Frontiers of Information Technology (FIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic2e/2014/3766/0/3766a343",
"title": "Cloud QoS Scaling by Fuzzy Logic",
"doi": null,
"abstractUrl": "/proceedings-article/ic2e/2014/3766a343/12OmNzxgHpL",
"parentPublication": {
"id": "proceedings/ic2e/2014/3766/0",
"title": "2014 IEEE International Conference on Cloud Engineering (IC2E)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122886",
"articleId": "13rRUxD9gXI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122906",
"articleId": "13rRUNvgyWn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyOq4VG",
"title": "Sept.-Oct.",
"year": "2013",
"issueNum": "05",
"idPrefix": "cs",
"pubType": "magazine",
"volume": "15",
"label": "Sept.-Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0geji",
"doi": "10.1109/MCSE.2012.90",
"abstract": "One of the current challenges in tropical cyclone (TC) research is how to improve our understanding of TC interannual variability and the impact of climate change on TCs. Recent advances in global modeling, visualization, and supercomputing technologies at NASA show potential for such studies. In this article, the authors discuss recent scalability improvement to the multiscale modeling framework (MMF) that makes it feasible to perform long-term TC-resolving simulations. The MMF consists of the finite-volume general circulation model (fvGCM), supplemented by a copy of the Goddard cumulus ensemble model (GCE) at each of the fvGCM grid points, giving 13,104 GCE copies. The original fvGCM implementation has a 1D data decomposition; the revised MMF implementation retains the 1D decomposition for most of the code, but uses a 2D decomposition for the massive copies of GCEs. Because the vast majority of computation time in the MMF is spent computing the GCEs, this approach can achieve excellent speedup without incurring the cost of modifying the entire code. Intelligent process mapping allows differing numbers of processes to be assigned to each domain for load balancing. The revised parallel implementation shows highly promising scalability, obtaining a nearly 80-fold speedup by increasing the number of cores from 30 to 3,335.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the current challenges in tropical cyclone (TC) research is how to improve our understanding of TC interannual variability and the impact of climate change on TCs. Recent advances in global modeling, visualization, and supercomputing technologies at NASA show potential for such studies. In this article, the authors discuss recent scalability improvement to the multiscale modeling framework (MMF) that makes it feasible to perform long-term TC-resolving simulations. The MMF consists of the finite-volume general circulation model (fvGCM), supplemented by a copy of the Goddard cumulus ensemble model (GCE) at each of the fvGCM grid points, giving 13,104 GCE copies. The original fvGCM implementation has a 1D data decomposition; the revised MMF implementation retains the 1D decomposition for most of the code, but uses a 2D decomposition for the massive copies of GCEs. Because the vast majority of computation time in the MMF is spent computing the GCEs, this approach can achieve excellent speedup without incurring the cost of modifying the entire code. Intelligent process mapping allows differing numbers of processes to be assigned to each domain for load balancing. The revised parallel implementation shows highly promising scalability, obtaining a nearly 80-fold speedup by increasing the number of cores from 30 to 3,335.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the current challenges in tropical cyclone (TC) research is how to improve our understanding of TC interannual variability and the impact of climate change on TCs. Recent advances in global modeling, visualization, and supercomputing technologies at NASA show potential for such studies. In this article, the authors discuss recent scalability improvement to the multiscale modeling framework (MMF) that makes it feasible to perform long-term TC-resolving simulations. The MMF consists of the finite-volume general circulation model (fvGCM), supplemented by a copy of the Goddard cumulus ensemble model (GCE) at each of the fvGCM grid points, giving 13,104 GCE copies. The original fvGCM implementation has a 1D data decomposition; the revised MMF implementation retains the 1D decomposition for most of the code, but uses a 2D decomposition for the massive copies of GCEs. Because the vast majority of computation time in the MMF is spent computing the GCEs, this approach can achieve excellent speedup without incurring the cost of modifying the entire code. Intelligent process mapping allows differing numbers of processes to be assigned to each domain for load balancing. The revised parallel implementation shows highly promising scalability, obtaining a nearly 80-fold speedup by increasing the number of cores from 30 to 3,335.",
"title": "Improving NASA's Multiscale Modeling Framework for Tropical Cyclone Climate Study",
"normalizedTitle": "Improving NASA's Multiscale Modeling Framework for Tropical Cyclone Climate Study",
"fno": "mcs2013050056",
"hasPdf": true,
"idPrefix": "cs",
"keywords": [
"Atmospheric Movements",
"Data Handling",
"Finite Volume Methods",
"Geophysics Computing",
"Learning Artificial Intelligence",
"Meteorology",
"Parallel Processing",
"Resource Allocation",
"Scalability",
"Revised Parallel Implementation",
"Load Balancing",
"Intelligent Process Mapping",
"2 D Data Decomposition",
"1 D Data Decomposition",
"GCE",
"Goddard Cumulus Ensemble Model",
"Fv GCM",
"Finite Volume General Circulation Model",
"Long Term TC Resolving Simulations",
"MMF",
"Supercomputing Technology",
"Visualization Technology",
"Global Modeling Technology",
"Climate Change",
"NASA Multiscale Modeling Framework",
"National Aeronautics And Space Administration",
"Tropical Cyclone Climate Study",
"TC Research",
"TC Interannual Variability",
"Atmospheric Modeling",
"Clouds",
"Computational Modeling",
"NASA",
"Meteorology",
"Hurricanes",
"Tropical Cyclones",
"NASA",
"Distributed Programming",
"Hurricane Modeling",
"Climate Modeling",
"Software",
"Software Engineering",
"Scientific Computing"
],
"authors": [
{
"givenName": "Bo-Wen",
"surname": "Shen",
"fullName": "Bo-Wen Shen",
"affiliation": "University of Maryland, College Park, and NASA Goddard Space Flight Center",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bron",
"surname": "Nelson",
"fullName": "Bron Nelson",
"affiliation": "NASA Ames Research Center",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Samson",
"surname": "Cheung",
"fullName": "Samson Cheung",
"affiliation": "NASA Ames Research Center",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei-Kuo",
"surname": "Tao",
"fullName": "Wei-Kuo Tao",
"affiliation": "NASA Goddard Space Flight Center",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2013-09-01 00:00:00",
"pubType": "mags",
"pages": "56-67",
"year": "2013",
"issn": "1521-9615",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/apwc-on-cse/2014/1955/0/07053845",
"title": "An expert system to assess the landfall propensity of a tropical cyclone in Australia",
"doi": null,
"abstractUrl": "/proceedings-article/apwc-on-cse/2014/07053845/12OmNvpew9D",
"parentPublication": {
"id": "proceedings/apwc-on-cse/2014/1955/0",
"title": "2014 Asia-Pacific World Congress on Computer Science and Engineering (APWC on CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2013/5016/0/5016a641",
"title": "Grey Correlation Analysis of Tropical Cyclone Landing Time",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a641/12OmNwBT1ml",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcmp-ugc/2010/986/0/06018005",
"title": "Tropical Cyclone Track and Intensity Predictability",
"doi": null,
"abstractUrl": "/proceedings-article/hpcmp-ugc/2010/06018005/12OmNxEjY4p",
"parentPublication": {
"id": "proceedings/hpcmp-ugc/2010/986/0",
"title": "2010 DoD High Performance Computing Modernization Program Users Group Conference (HPCMP-UGC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/3941/4/3941d120",
"title": "Effect of Buffer Zone Size on the Simulation of Tropical Cyclone Track in Regional Climate Model RegCM3",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/3941d120/12OmNywOWOr",
"parentPublication": {
"id": "proceedings/iccms/2010/3941/4",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icii/2001/7010/3/00983116",
"title": "Tropical cyclone center location with digital image process",
"doi": null,
"abstractUrl": "/proceedings-article/icii/2001/00983116/12OmNzSyCfW",
"parentPublication": {
"id": "proceedings/icii/2001/7010/3",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2012/1365/0/06274832",
"title": "A Rapid Loss Index for Tropical Cyclone Disasters in China",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2012/06274832/12OmNzayN0a",
"parentPublication": {
"id": "proceedings/cso/2012/1365/0",
"title": "2012 Fifth International Joint Conference on Computational Sciences and Optimization (CSO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/01/mcs2011010031",
"title": "Diagnosing Tropical Cyclone Sensitivity",
"doi": null,
"abstractUrl": "/magazine/cs/2011/01/mcs2011010031/13rRUB7a1jD",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/02/mcs2013020047",
"title": "Advanced Visualizations of Scale Interactions of Tropical Cyclone Formation and Tropical Waves",
"doi": null,
"abstractUrl": "/magazine/cs/2013/02/mcs2013020047/13rRUy3xYez",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440837",
"title": "Visualizing Uncertain Tropical Cyclone Predictions using Representative Samples from Ensembles of Forecast Tracks",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440837/17D45XeKgnt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a575",
"title": "Tropical Cyclone Maximum Wind Estimation from Infrared Satellite Data with Integrated Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a575/1ehBzeBtn32",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcs2013050042",
"articleId": "13rRUILLkzl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcs2013050068",
"articleId": "13rRUILtJuQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwFid7w",
"title": "Jan.",
"year": "2019",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "17D45XeKgnt",
"doi": "10.1109/TVCG.2018.2865193",
"abstract": "A common approach to sampling the space of a prediction is the generation of an ensemble of potential outcomes, where the ensemble's distribution reveals the statistical structure of the prediction space. For example, the US National Hurricane Center generates multiple day predictions for a storm's path, size, and wind speed, and then uses a Monte Carlo approach to sample this prediction into a large ensemble of potential storm outcomes. Various forms of summary visualizations are generated from such an ensemble, often using spatial spread to indicate its statistical characteristics. However, studies have shown that changes in the size of such summary glyphs, representing changes in the uncertainty of the prediction, are frequently confounded with other attributes of the phenomenon, such as its size or strength. In addition, simulation ensembles typically encode multivariate information, which can be difficult or confusing to include in a summary display. This problem can be overcome by directly displaying the ensemble as a set of annotated trajectories, however this solution will not be effective if ensembles are densely overdrawn or structurally disorganized. We propose to overcome these difficulties by selectively sampling the original ensemble, constructing a smaller representative and spatially well organized ensemble. This can be drawn directly as a set of paths that implicitly reveals the underlying spatial uncertainty distribution of the prediction. Since this approach does not use a visual channel to encode uncertainty, additional information can more easily be encoded in the display without leading to visual confusion. To demonstrate our argument, we describe the development of a visualization for ensembles of tropical cyclone forecast tracks, explaining how their spatial and temporal predictions, as well as other crucial storm characteristics such as size and intensity, can be clearly revealed. We verify the effectiveness of this visualization approach through a cognitive study exploring how storm damage estimates are affected by the density of tracks drawn, and by the presence or absence of annotating information on storm size and intensity.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A common approach to sampling the space of a prediction is the generation of an ensemble of potential outcomes, where the ensemble's distribution reveals the statistical structure of the prediction space. For example, the US National Hurricane Center generates multiple day predictions for a storm's path, size, and wind speed, and then uses a Monte Carlo approach to sample this prediction into a large ensemble of potential storm outcomes. Various forms of summary visualizations are generated from such an ensemble, often using spatial spread to indicate its statistical characteristics. However, studies have shown that changes in the size of such summary glyphs, representing changes in the uncertainty of the prediction, are frequently confounded with other attributes of the phenomenon, such as its size or strength. In addition, simulation ensembles typically encode multivariate information, which can be difficult or confusing to include in a summary display. This problem can be overcome by directly displaying the ensemble as a set of annotated trajectories, however this solution will not be effective if ensembles are densely overdrawn or structurally disorganized. We propose to overcome these difficulties by selectively sampling the original ensemble, constructing a smaller representative and spatially well organized ensemble. This can be drawn directly as a set of paths that implicitly reveals the underlying spatial uncertainty distribution of the prediction. Since this approach does not use a visual channel to encode uncertainty, additional information can more easily be encoded in the display without leading to visual confusion. To demonstrate our argument, we describe the development of a visualization for ensembles of tropical cyclone forecast tracks, explaining how their spatial and temporal predictions, as well as other crucial storm characteristics such as size and intensity, can be clearly revealed. We verify the effectiveness of this visualization approach through a cognitive study exploring how storm damage estimates are affected by the density of tracks drawn, and by the presence or absence of annotating information on storm size and intensity.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A common approach to sampling the space of a prediction is the generation of an ensemble of potential outcomes, where the ensemble's distribution reveals the statistical structure of the prediction space. For example, the US National Hurricane Center generates multiple day predictions for a storm's path, size, and wind speed, and then uses a Monte Carlo approach to sample this prediction into a large ensemble of potential storm outcomes. Various forms of summary visualizations are generated from such an ensemble, often using spatial spread to indicate its statistical characteristics. However, studies have shown that changes in the size of such summary glyphs, representing changes in the uncertainty of the prediction, are frequently confounded with other attributes of the phenomenon, such as its size or strength. In addition, simulation ensembles typically encode multivariate information, which can be difficult or confusing to include in a summary display. This problem can be overcome by directly displaying the ensemble as a set of annotated trajectories, however this solution will not be effective if ensembles are densely overdrawn or structurally disorganized. We propose to overcome these difficulties by selectively sampling the original ensemble, constructing a smaller representative and spatially well organized ensemble. This can be drawn directly as a set of paths that implicitly reveals the underlying spatial uncertainty distribution of the prediction. Since this approach does not use a visual channel to encode uncertainty, additional information can more easily be encoded in the display without leading to visual confusion. To demonstrate our argument, we describe the development of a visualization for ensembles of tropical cyclone forecast tracks, explaining how their spatial and temporal predictions, as well as other crucial storm characteristics such as size and intensity, can be clearly revealed. We verify the effectiveness of this visualization approach through a cognitive study exploring how storm damage estimates are affected by the density of tracks drawn, and by the presence or absence of annotating information on storm size and intensity.",
"title": "Visualizing Uncertain Tropical Cyclone Predictions using Representative Samples from Ensembles of Forecast Tracks",
"normalizedTitle": "Visualizing Uncertain Tropical Cyclone Predictions using Representative Samples from Ensembles of Forecast Tracks",
"fno": "08440837",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Monte Carlo Methods",
"Storms",
"Weather Forecasting",
"Wind",
"Multiple Day Predictions",
"Tropical Cyclone Predictions",
"Forecast Tracks",
"US National Hurricane Center",
"Storm",
"Wind Speed",
"Monte Carlo Approach",
"Statistical Characteristics",
"Spatial Uncertainty Distribution",
"Storms",
"Uncertainty",
"Visualization",
"Hurricanes",
"Tropical Cyclones",
"Prediction Algorithms",
"Predictive Models",
"Uncertainty Visualization",
"Hurricane Forecasts",
"Ensemble Visualization",
"Ensemble Sampling",
"Implicit Uncertainty"
],
"authors": [
{
"givenName": "Le",
"surname": "Liu",
"fullName": "Le Liu",
"affiliation": "Magic Weaver Inc., Santa Clara, CA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lace",
"surname": "Padilla",
"fullName": "Lace Padilla",
"affiliation": "Department of Psychology, Northwestern University, Evanston, IL",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sarah H.",
"surname": "Creem-Regehr",
"fullName": "Sarah H. Creem-Regehr",
"affiliation": "Department of Psychology, University of Utah, Salt Lake City, UT",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Donald H.",
"surname": "House",
"fullName": "Donald H. House",
"affiliation": "School of Computing, Clemson University, Clemson, SC",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2019-01-01 00:00:00",
"pubType": "trans",
"pages": "882-891",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdma/2013/5016/0/5016a641",
"title": "Grey Correlation Analysis of Tropical Cyclone Landing Time",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a641/12OmNwBT1ml",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcmp-ugc/2010/986/0/06018005",
"title": "Tropical Cyclone Track and Intensity Predictability",
"doi": null,
"abstractUrl": "/proceedings-article/hpcmp-ugc/2010/06018005/12OmNxEjY4p",
"parentPublication": {
"id": "proceedings/hpcmp-ugc/2010/986/0",
"title": "2010 DoD High Performance Computing Modernization Program Users Group Conference (HPCMP-UGC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icii/2001/7010/3/00983116",
"title": "Tropical cyclone center location with digital image process",
"doi": null,
"abstractUrl": "/proceedings-article/icii/2001/00983116/12OmNzSyCfW",
"parentPublication": {
"id": "proceedings/icii/2001/7010/3",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2012/1365/0/06274832",
"title": "A Rapid Loss Index for Tropical Cyclone Disasters in China",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2012/06274832/12OmNzayN0a",
"parentPublication": {
"id": "proceedings/cso/2012/1365/0",
"title": "2012 Fifth International Joint Conference on Computational Sciences and Optimization (CSO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/01/mcs2011010031",
"title": "Diagnosing Tropical Cyclone Sensitivity",
"doi": null,
"abstractUrl": "/magazine/cs/2011/01/mcs2011010031/13rRUB7a1jD",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/09/07563342",
"title": "Uncertainty Visualization by Representative Sampling from Prediction Ensembles",
"doi": null,
"abstractUrl": "/journal/tg/2017/09/07563342/13rRUIM2VH4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/05/mcs2013050056",
"title": "Improving NASA's Multiscale Modeling Framework for Tropical Cyclone Climate Study",
"doi": null,
"abstractUrl": "/magazine/cs/2013/05/mcs2013050056/13rRUx0geji",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/02/mcs2013020047",
"title": "Advanced Visualizations of Scale Interactions of Tropical Cyclone Formation and Tropical Waves",
"doi": null,
"abstractUrl": "/magazine/cs/2013/02/mcs2013020047/13rRUy3xYez",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisai/2021/0692/0/069200a135",
"title": "Identification Method of Disaster-Bearing Body in Coastal Cities under Emergency Logistics Characteristics",
"doi": null,
"abstractUrl": "/proceedings-article/cisai/2021/069200a135/1BmO0jZJcEo",
"parentPublication": {
"id": "proceedings/cisai/2021/0692/0",
"title": "2021 International Conference on Computer Information Science and Artificial Intelligence (CISAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a575",
"title": "Tropical Cyclone Maximum Wind Estimation from Infrared Satellite Data with Integrated Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a575/1ehBzeBtn32",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08440849",
"articleId": "17D45XH89qk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08440816",
"articleId": "17D45Xh13so",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCaLEju",
"title": "Jan.",
"year": "2018",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwbaqLz",
"doi": "10.1109/TVCG.2017.2744138",
"abstract": "We investigate priming and anchoring effects on perceptual tasks in visualization. Priming or anchoring effects depict the phenomena that a stimulus might influence subsequent human judgments on a perceptual level, or on a cognitive level by providing a frame of reference. Using visual class separability in scatterplots as an example task, we performed a set of five studies to investigate the potential existence of priming and anchoring effects. Our findings show that—under certain circumstances—such effects indeed exist. In other words, humans judge class separability of the same scatterplot differently depending on the scatterplot(s) they have seen before. These findings inform future work on better understanding and more accurately modeling human perception of visual patterns.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We investigate priming and anchoring effects on perceptual tasks in visualization. Priming or anchoring effects depict the phenomena that a stimulus might influence subsequent human judgments on a perceptual level, or on a cognitive level by providing a frame of reference. Using visual class separability in scatterplots as an example task, we performed a set of five studies to investigate the potential existence of priming and anchoring effects. Our findings show that—under certain circumstances—such effects indeed exist. In other words, humans judge class separability of the same scatterplot differently depending on the scatterplot(s) they have seen before. These findings inform future work on better understanding and more accurately modeling human perception of visual patterns.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We investigate priming and anchoring effects on perceptual tasks in visualization. Priming or anchoring effects depict the phenomena that a stimulus might influence subsequent human judgments on a perceptual level, or on a cognitive level by providing a frame of reference. Using visual class separability in scatterplots as an example task, we performed a set of five studies to investigate the potential existence of priming and anchoring effects. Our findings show that—under certain circumstances—such effects indeed exist. In other words, humans judge class separability of the same scatterplot differently depending on the scatterplot(s) they have seen before. These findings inform future work on better understanding and more accurately modeling human perception of visual patterns.",
"title": "Priming and Anchoring Effects in Visualization",
"normalizedTitle": "Priming and Anchoring Effects in Visualization",
"fno": "08022891",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Visual Perception",
"Correlation",
"Cognition",
"Data Models",
"Uncertainty",
"Perception",
"Anchoring",
"Bias",
"Scatterplots",
"Visualization",
"M Turk Study"
],
"authors": [
{
"givenName": "André Calero",
"surname": "Valdez",
"fullName": "André Calero Valdez",
"affiliation": "RWTH Aachen University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Martina",
"surname": "Ziefle",
"fullName": "Martina Ziefle",
"affiliation": "RWTH Aachen University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Sedlmair",
"fullName": "Michael Sedlmair",
"affiliation": "University of Vienna",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2018-01-01 00:00:00",
"pubType": "trans",
"pages": "584-594",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2013/4892/0/4892a215",
"title": "Creative Virtual Environments: Effect of Supraliminal Priming on Team Brainstorming",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892a215/12OmNANkooi",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2015/7367/0/7367d394",
"title": "The Priming Effects of Relevant and Irrelevant Advertising in Online Auctions",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2015/7367d394/12OmNClQ0uA",
"parentPublication": {
"id": "proceedings/hicss/2015/7367/0",
"title": "2015 48th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmit/2010/4008/2/4008b123",
"title": "The Implicit Aggression in Adolescents: The Priming Effect of Internet Violent Stimulus",
"doi": null,
"abstractUrl": "/proceedings-article/mmit/2010/4008b123/12OmNs0kyEP",
"parentPublication": {
"id": "proceedings/mmit/2010/4008/2",
"title": "MultiMedia and Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2012/05/mic2012050013",
"title": "Priming for Better Performance in Microtask Crowdsourcing Environments",
"doi": null,
"abstractUrl": "/magazine/ic/2012/05/mic2012050013/13rRUILLkA1",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539348",
"title": "The Attraction Effect in Information Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539348/13rRUwj7cpe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c092",
"title": "Priming Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c092/17D45WB0qcX",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585665",
"title": "The Anchoring Effect in Decision-Making with Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585665/17D45WZZ7CL",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805431",
"title": "Common Fate for Animated Transitions in Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805431/1cG4F76usA8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212866",
"title": "Audio-Tactile Priming to Guide Information Recall in Edutainment",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212866/1nHRVpg6Ms8",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/05/09495208",
"title": "Visual Clustering Factors in Scatterplots",
"doi": null,
"abstractUrl": "/magazine/cg/2021/05/09495208/1vyjCkbMBvW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08019826",
"articleId": "13rRUwIF6la",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08017584",
"articleId": "13rRUyueghe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNBCZnUs",
"title": "March",
"year": "2020",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "14jQfPkRijD",
"doi": "10.1109/TVCG.2018.2875702",
"abstract": "Similarity measuring methods are widely adopted in a broad range of visualization applications. In this work, we address the challenge of representing human perception in the visual analysis of scatterplots by introducing a novel deep-learning-based approach, ScatterNet, captures perception-driven similarities of such plots. The approach exploits deep neural networks to extract semantic features of scatterplot images for similarity calculation. We create a large labeled dataset consisting of similar and dissimilar images of scatterplots to train the deep neural network. We conduct a set of evaluations including performance experiments and a user study to demonstrate the effectiveness and efficiency of our approach. The evaluations confirm that the learned features capture the human perception of scatterplot similarity effectively. We describe two scenarios to show how ScatterNet can be applied in visual analysis applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Similarity measuring methods are widely adopted in a broad range of visualization applications. In this work, we address the challenge of representing human perception in the visual analysis of scatterplots by introducing a novel deep-learning-based approach, ScatterNet, captures perception-driven similarities of such plots. The approach exploits deep neural networks to extract semantic features of scatterplot images for similarity calculation. We create a large labeled dataset consisting of similar and dissimilar images of scatterplots to train the deep neural network. We conduct a set of evaluations including performance experiments and a user study to demonstrate the effectiveness and efficiency of our approach. The evaluations confirm that the learned features capture the human perception of scatterplot similarity effectively. We describe two scenarios to show how ScatterNet can be applied in visual analysis applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Similarity measuring methods are widely adopted in a broad range of visualization applications. In this work, we address the challenge of representing human perception in the visual analysis of scatterplots by introducing a novel deep-learning-based approach, ScatterNet, captures perception-driven similarities of such plots. The approach exploits deep neural networks to extract semantic features of scatterplot images for similarity calculation. We create a large labeled dataset consisting of similar and dissimilar images of scatterplots to train the deep neural network. We conduct a set of evaluations including performance experiments and a user study to demonstrate the effectiveness and efficiency of our approach. The evaluations confirm that the learned features capture the human perception of scatterplot similarity effectively. We describe two scenarios to show how ScatterNet can be applied in visual analysis applications.",
"title": "ScatterNet: A Deep Subjective Similarity Model for Visual Analysis of Scatterplots",
"normalizedTitle": "ScatterNet: A Deep Subjective Similarity Model for Visual Analysis of Scatterplots",
"fno": "08490694",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Learning Artificial Intelligence",
"Neural Nets",
"Deep Subjective Similarity Model",
"Similarity Measuring Methods",
"Visualization Applications",
"Human Perception",
"Deep Learning Based Approach",
"Scatter Net",
"Perception Driven Similarities",
"Deep Neural Network",
"Semantic Features",
"Scatterplot Images",
"Labeled Dataset",
"Similar Images",
"Dissimilar Images",
"Scatterplot Similarity",
"Visual Analysis Applications",
"Visualization",
"Feature Extraction",
"Measurement",
"Neural Networks",
"Personal Area Networks",
"Visual Perception",
"Computational Modeling",
"Scatterplot",
"Similarity Measuring",
"Deep Learning",
"Visualization",
"Visual Exploration"
],
"authors": [
{
"givenName": "Yuxin",
"surname": "Ma",
"fullName": "Yuxin Ma",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anthony K. H.",
"surname": "Tung",
"fullName": "Anthony K. H. Tung",
"affiliation": "National University of Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Wang",
"fullName": "Wei Wang",
"affiliation": "National University of Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiang",
"surname": "Gao",
"fullName": "Xiang Gao",
"affiliation": "Hangzhou Normal University, Yuhang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhigeng",
"surname": "Pan",
"fullName": "Zhigeng Pan",
"affiliation": "Hangzhou Normal University, Yuhang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2020-03-01 00:00:00",
"pubType": "trans",
"pages": "1562-1576",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iscc/2009/4672/0/05202299",
"title": "BlueHRT: Hybrid Ring Tree Scatternet Formation in Bluetooth Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2009/05202299/12OmNwDACmA",
"parentPublication": {
"id": "proceedings/iscc/2009/4672/0",
"title": "2009 IEEE Symposium on Computers and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pcc/2006/0198/0/01629405",
"title": "Maintaining an energy-efficient Bluetooth scatternet",
"doi": null,
"abstractUrl": "/proceedings-article/pcc/2006/01629405/12OmNwcl7Lt",
"parentPublication": {
"id": "proceedings/pcc/2006/0198/0",
"title": "2006 IEEE International Performance Computing and Communications Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2012/1247/0/06180882",
"title": "Shape perception in 3-D scatterplots using constant visual angle glyphs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2012/06180882/12OmNxEjXNR",
"parentPublication": {
"id": "proceedings/vr/2012/1247/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034b140",
"title": "Efficient Convolutional Network Learning Using Parametric Log Based Dual-Tree Wavelet ScatterNet",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034b140/12OmNyvoXjj",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08047300",
"title": "Cluster-Based Visual Abstraction for Multivariate Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08047300/13rRUILLkvy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08022891",
"title": "Priming and Anchoring Effects in Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08022891/13rRUwbaqLz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09834145",
"title": "Visual Cue Effects on a Classification Accuracy Estimation Task in Immersive Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09834145/1FapOsLgEik",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933670",
"title": "Disentangled Representation of Data Distributions in Scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933670/1fTgGJvQB9e",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222295",
"title": "Modeling the Influence of Visual Density on Cluster Perception in Scatterplots Using Topology",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222295/1nTqtC45a12",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/05/09495208",
"title": "Visual Clustering Factors in Scatterplots",
"doi": null,
"abstractUrl": "/magazine/cg/2021/05/09495208/1vyjCkbMBvW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08468065",
"articleId": "13HFz2XZAUp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08494817",
"articleId": "14s8M4gkNi0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1i57w9LYwp2",
"name": "ttg202003-08490694s1.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202003-08490694s1.pdf",
"extension": "pdf",
"size": "254 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cr2ZlCC2xG",
"doi": "10.1109/TVCG.2019.2934208",
"abstract": "Scatterplots are frequently scaled to fit display areas in multi-view and multi-device data analysis environments. A common method used for scaling is to enlarge or shrink the entire scatterplot together with the inside points synchronously and proportionally. This process is called geometric scaling. However, geometric scaling of scatterplots may cause a perceptual bias, that is, the perceived and physical values of visual features may be dissociated with respect to geometric scaling. For example, if a scatterplot is projected from a laptop to a large projector screen, then observers may feel that the scatterplot shown on the projector has fewer points than that viewed on the laptop. This paper presents an evaluation study on the perceptual bias of visual features in scatterplots caused by geometric scaling. The study focuses on three fundamental visual features (i.e., numerosity, correlation, and cluster separation) and three hypotheses that are formulated on the basis of our experience. We carefully design three controlled experiments by using well-prepared synthetic data and recruit participants to complete the experiments on the basis of their subjective experience. With a detailed analysis of the experimental results, we obtain a set of instructive findings. First, geometric scaling causes a bias that has a linear relationship with the scale ratio. Second, no significant difference exists between the biases measured from normally and uniformly distributed scatterplots. Third, changing the point radius can correct the bias to a certain extent. These findings can be used to inspire the design decisions of scatterplots in various scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scatterplots are frequently scaled to fit display areas in multi-view and multi-device data analysis environments. A common method used for scaling is to enlarge or shrink the entire scatterplot together with the inside points synchronously and proportionally. This process is called geometric scaling. However, geometric scaling of scatterplots may cause a perceptual bias, that is, the perceived and physical values of visual features may be dissociated with respect to geometric scaling. For example, if a scatterplot is projected from a laptop to a large projector screen, then observers may feel that the scatterplot shown on the projector has fewer points than that viewed on the laptop. This paper presents an evaluation study on the perceptual bias of visual features in scatterplots caused by geometric scaling. The study focuses on three fundamental visual features (i.e., numerosity, correlation, and cluster separation) and three hypotheses that are formulated on the basis of our experience. We carefully design three controlled experiments by using well-prepared synthetic data and recruit participants to complete the experiments on the basis of their subjective experience. With a detailed analysis of the experimental results, we obtain a set of instructive findings. First, geometric scaling causes a bias that has a linear relationship with the scale ratio. Second, no significant difference exists between the biases measured from normally and uniformly distributed scatterplots. Third, changing the point radius can correct the bias to a certain extent. These findings can be used to inspire the design decisions of scatterplots in various scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scatterplots are frequently scaled to fit display areas in multi-view and multi-device data analysis environments. A common method used for scaling is to enlarge or shrink the entire scatterplot together with the inside points synchronously and proportionally. This process is called geometric scaling. However, geometric scaling of scatterplots may cause a perceptual bias, that is, the perceived and physical values of visual features may be dissociated with respect to geometric scaling. For example, if a scatterplot is projected from a laptop to a large projector screen, then observers may feel that the scatterplot shown on the projector has fewer points than that viewed on the laptop. This paper presents an evaluation study on the perceptual bias of visual features in scatterplots caused by geometric scaling. The study focuses on three fundamental visual features (i.e., numerosity, correlation, and cluster separation) and three hypotheses that are formulated on the basis of our experience. We carefully design three controlled experiments by using well-prepared synthetic data and recruit participants to complete the experiments on the basis of their subjective experience. With a detailed analysis of the experimental results, we obtain a set of instructive findings. First, geometric scaling causes a bias that has a linear relationship with the scale ratio. Second, no significant difference exists between the biases measured from normally and uniformly distributed scatterplots. Third, changing the point radius can correct the bias to a certain extent. These findings can be used to inspire the design decisions of scatterplots in various scenarios.",
"title": "Evaluating Perceptual Bias During Geometric Scaling of Scatterplots",
"normalizedTitle": "Evaluating Perceptual Bias During Geometric Scaling of Scatterplots",
"fno": "08794768",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Geometry",
"Data Analysis",
"Data Visualisation",
"Visual Perception",
"Multiview Data Analysis Environments",
"Scale Ratio",
"Visual Features",
"Scatterplot",
"Multidevice Data Analysis Environments",
"Geometric Scaling",
"Perceptual Bias",
"Visualization",
"Correlation",
"Data Analysis",
"Encoding",
"Measurement",
"Portable Computers",
"Mobile Handsets",
"Evaluation",
"Scatterplot",
"Geometric Scaling",
"Bias",
"Perceptual Consistency"
],
"authors": [
{
"givenName": "Yating",
"surname": "Wei",
"fullName": "Yating Wei",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Honghui",
"surname": "Mei",
"fullName": "Honghui Mei",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ying",
"surname": "Zhao",
"fullName": "Ying Zhao",
"affiliation": "School of Computer Science and Engineering, Central South University, Changsha, Hunan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shuyue",
"surname": "Zhou",
"fullName": "Shuyue Zhou",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bingru",
"surname": "Lin",
"fullName": "Bingru Lin",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haojing",
"surname": "Jiang",
"fullName": "Haojing Jiang",
"affiliation": "School of Computer Science and Engineering, Central South University, Changsha, Hunan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "321-331",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2009/3992/0/05206722",
"title": "On bias correction for geometric parameter estimation in computer vision",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206722/12OmNwvVrCj",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08047300",
"title": "Cluster-Based Visual Abstraction for Multivariate Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08047300/13rRUILLkvy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/06/07864468",
"title": "Towards Perceptual Optimization of the Visual Design of Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2017/06/07864468/13rRUILtJzC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0829",
"title": "User Interaction with Scatterplots on Small Screens - A Comparative Evaluation of Geometric-Semantic Zoom and Fisheye Distortion",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0829/13rRUxcKzVd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017602",
"title": "Scatterplots: Tasks, Data, and Designs",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017602/13rRUy3gn7C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a062",
"title": "A Technique for Selection and Drawing of Scatterplots for Multi-Dimensional Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a062/1cMF8TTAeAw",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2019/0869/0/086900a700",
"title": "Investigating the Effects of Gender Bias on GitHub",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2019/086900a700/1cMFvs0gd6o",
"parentPublication": {
"id": "proceedings/icse/2019/0869/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933670",
"title": "Disentangled Representation of Data Distributions in Scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933670/1fTgGJvQB9e",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222295",
"title": "Modeling the Influence of Visual Density on Cluster Perception in Scatterplots Using Topology",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222295/1nTqtC45a12",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09556578",
"title": "The Weighted Average Illusion: Biases in Perceived Mean Position in Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09556578/1xlvYaEQTNC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08836120",
"articleId": "1dia2KVa7g4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08805430",
"articleId": "1cG4Ahb0KnC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1i4p6q9rtp6",
"name": "ttg202001-08794768s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202001-08794768s1.mp4",
"extension": "mp4",
"size": "35.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1DQPlKUprk4",
"title": "April-June",
"year": "2022",
"issueNum": "02",
"idPrefix": "ta",
"pubType": "journal",
"volume": "13",
"label": "April-June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1igRZVW87EQ",
"doi": "10.1109/TAFFC.2020.2981440",
"abstract": "Electroencephalogram (EEG) has been widely used for the detection of anxiety because of its ability to reflect the functional activities of the brain. However, EEG alone may not provide precision in the detection of anxiety because other emotional disorders usually trigger the same changes in brain function. To discover effective diagnostic indicators and to achieve more precise anxiety detection, we integrate eye movement information into EEG and divide the features into groups according to their respective characteristics. Then, we use group sparse canonical correlation analysis (GSCCA) to investigate group structure information among EEG and eye movement features and obtain an effective fusion representation of EEG and eye movement to achieve more precise detection of anxiety mood. The experimental results from 45 anxious subjects and 47 normal controls from the Healthy Brain Network (HBN) dataset showed that GSCCA could be effectively used to explore the correlation between EEG features within different scalp regions and eye movement features from several aspects. Visual behaviors, including saccades and fixation, are more linearly related to the power spectrum of EEG on the scalp area corresponding to the visual region of the brain. The ultimate fusion representation achieved an optimal classification accuracy of 82.70 percent with the support vector machine (SVM) classifier on the gamma band of EEG.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Electroencephalogram (EEG) has been widely used for the detection of anxiety because of its ability to reflect the functional activities of the brain. However, EEG alone may not provide precision in the detection of anxiety because other emotional disorders usually trigger the same changes in brain function. To discover effective diagnostic indicators and to achieve more precise anxiety detection, we integrate eye movement information into EEG and divide the features into groups according to their respective characteristics. Then, we use group sparse canonical correlation analysis (GSCCA) to investigate group structure information among EEG and eye movement features and obtain an effective fusion representation of EEG and eye movement to achieve more precise detection of anxiety mood. The experimental results from 45 anxious subjects and 47 normal controls from the Healthy Brain Network (HBN) dataset showed that GSCCA could be effectively used to explore the correlation between EEG features within different scalp regions and eye movement features from several aspects. Visual behaviors, including saccades and fixation, are more linearly related to the power spectrum of EEG on the scalp area corresponding to the visual region of the brain. The ultimate fusion representation achieved an optimal classification accuracy of 82.70 percent with the support vector machine (SVM) classifier on the gamma band of EEG.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Electroencephalogram (EEG) has been widely used for the detection of anxiety because of its ability to reflect the functional activities of the brain. However, EEG alone may not provide precision in the detection of anxiety because other emotional disorders usually trigger the same changes in brain function. To discover effective diagnostic indicators and to achieve more precise anxiety detection, we integrate eye movement information into EEG and divide the features into groups according to their respective characteristics. Then, we use group sparse canonical correlation analysis (GSCCA) to investigate group structure information among EEG and eye movement features and obtain an effective fusion representation of EEG and eye movement to achieve more precise detection of anxiety mood. The experimental results from 45 anxious subjects and 47 normal controls from the Healthy Brain Network (HBN) dataset showed that GSCCA could be effectively used to explore the correlation between EEG features within different scalp regions and eye movement features from several aspects. Visual behaviors, including saccades and fixation, are more linearly related to the power spectrum of EEG on the scalp area corresponding to the visual region of the brain. The ultimate fusion representation achieved an optimal classification accuracy of 82.70 percent with the support vector machine (SVM) classifier on the gamma band of EEG.",
"title": "Fusing of Electroencephalogram and Eye Movement With Group Sparse Canonical Correlation Analysis for Anxiety Detection",
"normalizedTitle": "Fusing of Electroencephalogram and Eye Movement With Group Sparse Canonical Correlation Analysis for Anxiety Detection",
"fno": "09039622",
"hasPdf": true,
"idPrefix": "ta",
"keywords": [
"Biomechanics",
"Correlation Methods",
"Electroencephalography",
"Eye",
"Feature Extraction",
"Medical Disorders",
"Medical Signal Processing",
"Neurophysiology",
"Signal Classification",
"Support Vector Machines",
"Group Sparse Canonical Correlation Analysis",
"Eye Movement",
"Healthy Brain Network Dataset",
"Electroencephalogram",
"Brain Function",
"Anxiety Mood Detection",
"Emotional Disorders",
"GSCCA",
"Saccades",
"Scalp",
"EEG Fusion Representation",
"Signal Classification",
"Support Vector Machine Classifier",
"SVM Classifier",
"Electroencephalography",
"Feature Extraction",
"Correlation",
"Scalp",
"Visualization",
"Support Vector Machines",
"Psychology",
"Emotional Disorder",
"Anxiety",
"GSCCA",
"Electroencephalogram",
"Eye Movement"
],
"authors": [
{
"givenName": "Xiaowei",
"surname": "Zhang",
"fullName": "Xiaowei Zhang",
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing School of Information Science and Engineering, Lanzhou University, Lanzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Pan",
"fullName": "Jing Pan",
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing School of Information Science and Engineering, Lanzhou University, Lanzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Shen",
"fullName": "Jian Shen",
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing School of Information Science and Engineering, Lanzhou University, Lanzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zia",
"surname": "ud Din",
"fullName": "Zia ud Din",
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing School of Information Science and Engineering, Lanzhou University, Lanzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junlei",
"surname": "Li",
"fullName": "Junlei Li",
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing School of Information Science and Engineering, Lanzhou University, Lanzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dawei",
"surname": "Lu",
"fullName": "Dawei Lu",
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing School of Information Science and Engineering, Lanzhou University, Lanzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Manxi",
"surname": "Wu",
"fullName": "Manxi Wu",
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing School of Information Science and Engineering, Lanzhou University, Lanzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bin",
"surname": "Hu",
"fullName": "Bin Hu",
"affiliation": "Gansu Provincial Key Laboratory of Wearable Computing, School of Information Science and Engineering, Lanzhou University, Lanzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "958-971",
"year": "2022",
"issn": "1949-3045",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2013/5048/0/5048a399",
"title": "Mouse Trajectories and State Anxiety: Feature Selection with Random Forest",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a399/12OmNBO3K1C",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1991/2470/0/00186558",
"title": "Electroencephalogram pattern recognition using fuzzy logic",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1991/00186558/12OmNyNQSDQ",
"parentPublication": {
"id": "proceedings/acssc/1991/2470/0",
"title": "Conference Record of the Twenty-Fifth Asilomar Conference on Signals, Systems & Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2017/1710/0/1710a409",
"title": "An Automatic EEG Based System for the Recognition of Math Anxiety",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a409/12OmNym2c8j",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217946",
"title": "A novel depression detection method based on pervasive EEG and EEG splitting criterion",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217946/12OmNyoiZ50",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217905",
"title": "mEEG: A system for electroencephalogram data management and analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217905/12OmNzVGcEA",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2018/6217/0/247100a177",
"title": "Investigating Electrode Sites for Intention Detection During Robot Based Hand Movement Using EEG-BCI System",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2018/247100a177/17D45XuDNHz",
"parentPublication": {
"id": "proceedings/bibe/2018/6217/0",
"title": "2018 IEEE 18th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/01/08807347",
"title": "The Recognition of Multiple Anxiety Levels Based on Electroencephalograph",
"doi": null,
"abstractUrl": "/journal/ta/2022/01/08807347/1cG5Tm3XaNy",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/emip/2019/2243/0/224300a026",
"title": "Synchronized Analysis of Eye Movement and EEG during Program Comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/emip/2019/224300a026/1dlvMfXpd5e",
"parentPublication": {
"id": "proceedings/emip/2019/2243/0",
"title": "2019 IEEE/ACM 6th International Workshop on Eye Movements in Programming (EMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09200685",
"title": "Enhancement of Movement Intention Detection Using EEG Signals Responsive to Emotional Music Stimulus",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09200685/1ndVcP6texO",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313104",
"title": "Anxiety Detection with Nonlinear Group Correlation Fusion of Electroencephalogram and Eye Movement",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313104/1qmfPNp0Vmo",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09037266",
"articleId": "1ifd4ywuBO0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09043472",
"articleId": "1ilQHOfMtUc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1BhzoX5mYSY",
"title": "April",
"year": "2022",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1n2jl9RTLBm",
"doi": "10.1109/TVCG.2020.3023537",
"abstract": "Natural language and visualization are being increasingly deployed together for supporting data analysis in different ways, from multimodal interaction to enriched data summaries and insights. Yet, researchers still lack systematic knowledge on how viewers verbalize their interpretations of visualizations, and how they interpret verbalizations of visualizations in such contexts. We describe two studies aimed at identifying characteristics of data and charts that are relevant in such tasks. The first study asks participants to verbalize what they see in scatterplots that depict various levels of correlations. The second study then asks participants to choose visualizations that match a given verbal description of correlation. We extract key concepts from responses, organize them in a taxonomy and analyze the categorized responses. We observe that participants use a wide range of vocabulary across all scatterplots, but particular concepts are preferred for higher levels of correlation. A comparison between the studies reveals the ambiguity of some of the concepts. We discuss how the results could inform the design of multimodal representations aligned with the data and analytical tasks, and present a research roadmap to deepen the understanding about visualizations and natural language.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Natural language and visualization are being increasingly deployed together for supporting data analysis in different ways, from multimodal interaction to enriched data summaries and insights. Yet, researchers still lack systematic knowledge on how viewers verbalize their interpretations of visualizations, and how they interpret verbalizations of visualizations in such contexts. We describe two studies aimed at identifying characteristics of data and charts that are relevant in such tasks. The first study asks participants to verbalize what they see in scatterplots that depict various levels of correlations. The second study then asks participants to choose visualizations that match a given verbal description of correlation. We extract key concepts from responses, organize them in a taxonomy and analyze the categorized responses. We observe that participants use a wide range of vocabulary across all scatterplots, but particular concepts are preferred for higher levels of correlation. A comparison between the studies reveals the ambiguity of some of the concepts. We discuss how the results could inform the design of multimodal representations aligned with the data and analytical tasks, and present a research roadmap to deepen the understanding about visualizations and natural language.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Natural language and visualization are being increasingly deployed together for supporting data analysis in different ways, from multimodal interaction to enriched data summaries and insights. Yet, researchers still lack systematic knowledge on how viewers verbalize their interpretations of visualizations, and how they interpret verbalizations of visualizations in such contexts. We describe two studies aimed at identifying characteristics of data and charts that are relevant in such tasks. The first study asks participants to verbalize what they see in scatterplots that depict various levels of correlations. The second study then asks participants to choose visualizations that match a given verbal description of correlation. We extract key concepts from responses, organize them in a taxonomy and analyze the categorized responses. We observe that participants use a wide range of vocabulary across all scatterplots, but particular concepts are preferred for higher levels of correlation. A comparison between the studies reveals the ambiguity of some of the concepts. We discuss how the results could inform the design of multimodal representations aligned with the data and analytical tasks, and present a research roadmap to deepen the understanding about visualizations and natural language.",
"title": "Words of Estimative Correlation: Studying Verbalizations of Scatterplots",
"normalizedTitle": "Words of Estimative Correlation: Studying Verbalizations of Scatterplots",
"fno": "09195155",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Mining",
"Data Visualisation",
"Natural Language Processing",
"Data Analysis",
"Multimodal Interaction",
"Data Summaries",
"Systematic Knowledge",
"Verbalizations",
"Scatterplots",
"Natural Language",
"Estimative Correlation",
"Data Visualization",
"Correlation",
"Task Analysis",
"Data Analysis",
"Taxonomy",
"Natural Language Processing",
"Information Visualization",
"Natural Language Generation",
"Natural Language Processing",
"Human Computer Interaction"
],
"authors": [
{
"givenName": "Rafael",
"surname": "Henkin",
"fullName": "Rafael Henkin",
"affiliation": "Centre for Translational Bioinformatics, Queen Mary, University of London, London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cagatay",
"surname": "Turkay",
"fullName": "Cagatay Turkay",
"affiliation": "Centre for Interdisciplinary Methodologies, University of Warwick, Coventry, U.K.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1967-1981",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2010/6685/0/05429604",
"title": "A model of symbol lightness discrimination in sparse scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2010/05429604/12OmNBSSVnf",
"parentPublication": {
"id": "proceedings/pacificvis/2010/6685/0",
"title": "2010 IEEE Pacific Visualization Symposium (PacificVis 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a080",
"title": "A Nested Hierarchy of Localized Scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a080/12OmNy7h3e0",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08047300",
"title": "Cluster-Based Visual Abstraction for Multivariate Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08047300/13rRUILLkvy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122316",
"title": "Perception of Average Value in Multiclass Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122316/13rRUxASuhA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017602",
"title": "Scatterplots: Tasks, Data, and Designs",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017602/13rRUy3gn7C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/06/09695173",
"title": "Roslingifier: Semi-Automated Storytelling for Animated Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2023/06/09695173/1AvqJqAJOKY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904491",
"title": "Seeing What You Believe or Believing What You See? Belief Biases Correlation Estimation",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904491/1H1gs8qCjdu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a062",
"title": "A Technique for Selection and Drawing of Scatterplots for Multi-Dimensional Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a062/1cMF8TTAeAw",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08794768",
"title": "Evaluating Perceptual Bias During Geometric Scaling of Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08794768/1cr2ZlCC2xG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09023002",
"title": "Interweaving Multimodal Interaction With Flexible Unit Visualizations for Data Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09023002/1hTHRTEQgRG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09187994",
"articleId": "1mXkiNpxvvq",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNxb5hpF",
"title": "Sept.",
"year": "2014",
"issueNum": "09",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Sept.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwvT9gu",
"doi": "10.1109/TVCG.2014.2327977",
"abstract": "In NURBS-based isogeometric analysis, the basis functions of a 3D model's geometric description also form the basis for the solution space of variational formulations of partial differential equations. In order to visualize the results of a NURBS-based isogeometric analysis, we developed a novel GPU-based multi-pass isosurface visualization technique which performs directly on an equivalent rational Bézier representation without the need for discretization or approximation. Our approach utilizes rasterization to generate a list of intervals along the ray that each potentially contain boundary or isosurface intersections. Depth-sorting this list for each ray allows us to proceed in front-to-back order and enables early ray termination. We detect multiple intersections of a ray with the higher-order surface of the model using a sampling-based root-isolation method. The model's surfaces and the isosurfaces always appear smooth, independent of the zoom level due to our pixel-precise processing scheme. Our adaptive sampling strategy minimizes costs for point evaluations and intersection computations. The implementation shows that the proposed approach interactively visualizes volume meshes containing hundreds of thousands of Bézier elements on current graphics hardware. A comparison to a GPU-based ray casting implementation using spatial data structures indicates that our approach generally performs significantly faster while being more accurate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In NURBS-based isogeometric analysis, the basis functions of a 3D model's geometric description also form the basis for the solution space of variational formulations of partial differential equations. In order to visualize the results of a NURBS-based isogeometric analysis, we developed a novel GPU-based multi-pass isosurface visualization technique which performs directly on an equivalent rational Bézier representation without the need for discretization or approximation. Our approach utilizes rasterization to generate a list of intervals along the ray that each potentially contain boundary or isosurface intersections. Depth-sorting this list for each ray allows us to proceed in front-to-back order and enables early ray termination. We detect multiple intersections of a ray with the higher-order surface of the model using a sampling-based root-isolation method. The model's surfaces and the isosurfaces always appear smooth, independent of the zoom level due to our pixel-precise processing scheme. Our adaptive sampling strategy minimizes costs for point evaluations and intersection computations. The implementation shows that the proposed approach interactively visualizes volume meshes containing hundreds of thousands of Bézier elements on current graphics hardware. A comparison to a GPU-based ray casting implementation using spatial data structures indicates that our approach generally performs significantly faster while being more accurate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In NURBS-based isogeometric analysis, the basis functions of a 3D model's geometric description also form the basis for the solution space of variational formulations of partial differential equations. In order to visualize the results of a NURBS-based isogeometric analysis, we developed a novel GPU-based multi-pass isosurface visualization technique which performs directly on an equivalent rational Bézier representation without the need for discretization or approximation. Our approach utilizes rasterization to generate a list of intervals along the ray that each potentially contain boundary or isosurface intersections. Depth-sorting this list for each ray allows us to proceed in front-to-back order and enables early ray termination. We detect multiple intersections of a ray with the higher-order surface of the model using a sampling-based root-isolation method. The model's surfaces and the isosurfaces always appear smooth, independent of the zoom level due to our pixel-precise processing scheme. Our adaptive sampling strategy minimizes costs for point evaluations and intersection computations. The implementation shows that the proposed approach interactively visualizes volume meshes containing hundreds of thousands of Bézier elements on current graphics hardware. A comparison to a GPU-based ray casting implementation using spatial data structures indicates that our approach generally performs significantly faster while being more accurate.",
"title": "Direct Isosurface Ray Casting of NURBS-Based Isogeometric Analysis",
"normalizedTitle": "Direct Isosurface Ray Casting of NURBS-Based Isogeometric Analysis",
"fno": "06846294",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Isosurfaces",
"Splines Mathematics",
"Surface Topography",
"Surface Reconstruction",
"Solid Modeling",
"Casting"
],
"authors": [
{
"givenName": "Andre",
"surname": "Schollmeyer",
"fullName": "Andre Schollmeyer",
"affiliation": "Virtual Reality Syst. Group, Bauhaus-Univ. Weimar, Weimar, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bernd",
"surname": "Froehlich",
"fullName": "Bernd Froehlich",
"affiliation": "Virtual Reality Syst. Group, Bauhaus-Univ. Weimar, Weimar, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "09",
"pubDate": "2014-09-01 00:00:00",
"pubType": "trans",
"pages": "1227-1240",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/rt/2006/0693/0/04061557",
"title": "Ray Casting of Trimmed NURBS Surfaces on the GPU",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2006/04061557/12OmNBNM8TN",
"parentPublication": {
"id": "proceedings/rt/2006/0693/0",
"title": "IEEE Symposium on Interactive Ray Tracing 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815006",
"title": "Isogeometric Analysis Based on a Set of Truncated Interpolatory Basis Functions",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815006/12OmNvAiSa4",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a080",
"title": "Research on a New Linear Interpolation Algorithm of NURBS Curve",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a080/12OmNxwWoum",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csa/2015/9961/0/9961a213",
"title": "Isogeometric Analysis: The Influence of Penalty Coefficients in Boundary Condition Treatments",
"doi": null,
"abstractUrl": "/proceedings-article/csa/2015/9961a213/12OmNzyp5YU",
"parentPublication": {
"id": "proceedings/csa/2015/9961/0",
"title": "2015 International Conference on Computer Science and Applications (CSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/03/ttg2008030603",
"title": "Interactive High-Resolution Isosurface Ray Casting on Multicore Processors",
"doi": null,
"abstractUrl": "/journal/tg/2008/03/ttg2008030603/13rRUEgs2LW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/05/ttg2012050753",
"title": "Direct Isosurface Visualization of Hex-Based High-Order Geometry and Attribute Representations",
"doi": null,
"abstractUrl": "/journal/tg/2012/05/ttg2012050753/13rRUxly8XF",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08493612",
"title": "CPU Isosurface Ray Tracing of Adaptive Mesh Refinement Data",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08493612/17D45Vw15vd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2016/3593/0/07982321",
"title": "Data Structure for Supporting Patch Refinement in Adaptive Isogeometric Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2016/07982321/17D45WLdYRa",
"parentPublication": {
"id": "proceedings/cse-euc/2016/3593/0",
"title": "2016 19th IEEE Intl Conference on Computational Science and Engineering (CSE), IEEE 14th Intl Conference on Embedded and Ubiquitous Computing (EUC), and 15th Intl Symposium on Distributed Computing and Applications for Business Engineering (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000a073",
"title": "A Note on the Convergence of NURBS Curves When Weights Approach Infinity",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000a073/17D45WXIkzJ",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2018/8481/0/848100a097",
"title": "Linear Motor Platform Contouring Control Based on NURBS Curve Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2018/848100a097/17D45XwUAKx",
"parentPublication": {
"id": "proceedings/icmcce/2018/8481/0",
"title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06784056",
"articleId": "13rRUx0xPIK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06774478",
"articleId": "13rRUwbs2b5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNC0PGNS",
"title": "Nov.-Dec.",
"year": "2018",
"issueNum": "06",
"idPrefix": "tb",
"pubType": "journal",
"volume": "15",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "17D45WZZ7FE",
"doi": "10.1109/TCBB.2017.2688444",
"abstract": "Curve reconstruction from data points is an important issue for advanced medical imaging techniques, such as computer tomography (CT) and magnetic resonance imaging (MRI). The most powerful fitting functions for this purpose are the NURBS (non-uniform rational B-splines). Solving the general reconstruction problem with NURBS requires computing all free variables of the problem (data parameters, breakpoints, control points, and their weights). This leads to a very difficult non-convex, nonlinear, high-dimensional, multimodal, and continuous optimization problem. Previous methods simplify the problem by guessing the values for some variables and computing only the remaining ones. As a result, unavoidable approximations errors are introduced. In this paper, we describe the first method in the literature to solve the full NURBS curve reconstruction problem in all its generality. Our method is based on a combination of two techniques: an immunological approach to perform data parameterization, breakpoint placement, and weight calculation, and least squares minimization to compute the control points. This procedure is repeated iteratively (until no further improvement is achieved) for higher accuracy. The method has been applied to reconstruct some outline curves from MRI brain images with satisfactory results. Comparative work shows that our method outperforms the previous related approaches in the literature for all instances in our benchmark.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Curve reconstruction from data points is an important issue for advanced medical imaging techniques, such as computer tomography (CT) and magnetic resonance imaging (MRI). The most powerful fitting functions for this purpose are the NURBS (non-uniform rational B-splines). Solving the general reconstruction problem with NURBS requires computing all free variables of the problem (data parameters, breakpoints, control points, and their weights). This leads to a very difficult non-convex, nonlinear, high-dimensional, multimodal, and continuous optimization problem. Previous methods simplify the problem by guessing the values for some variables and computing only the remaining ones. As a result, unavoidable approximations errors are introduced. In this paper, we describe the first method in the literature to solve the full NURBS curve reconstruction problem in all its generality. Our method is based on a combination of two techniques: an immunological approach to perform data parameterization, breakpoint placement, and weight calculation, and least squares minimization to compute the control points. This procedure is repeated iteratively (until no further improvement is achieved) for higher accuracy. The method has been applied to reconstruct some outline curves from MRI brain images with satisfactory results. Comparative work shows that our method outperforms the previous related approaches in the literature for all instances in our benchmark.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Curve reconstruction from data points is an important issue for advanced medical imaging techniques, such as computer tomography (CT) and magnetic resonance imaging (MRI). The most powerful fitting functions for this purpose are the NURBS (non-uniform rational B-splines). Solving the general reconstruction problem with NURBS requires computing all free variables of the problem (data parameters, breakpoints, control points, and their weights). This leads to a very difficult non-convex, nonlinear, high-dimensional, multimodal, and continuous optimization problem. Previous methods simplify the problem by guessing the values for some variables and computing only the remaining ones. As a result, unavoidable approximations errors are introduced. In this paper, we describe the first method in the literature to solve the full NURBS curve reconstruction problem in all its generality. Our method is based on a combination of two techniques: an immunological approach to perform data parameterization, breakpoint placement, and weight calculation, and least squares minimization to compute the control points. This procedure is repeated iteratively (until no further improvement is achieved) for higher accuracy. The method has been applied to reconstruct some outline curves from MRI brain images with satisfactory results. Comparative work shows that our method outperforms the previous related approaches in the literature for all instances in our benchmark.",
"title": "Immunological Approach for Full NURBS Reconstruction of Outline Curves from Noisy Data Points in Medical Imaging",
"normalizedTitle": "Immunological Approach for Full NURBS Reconstruction of Outline Curves from Noisy Data Points in Medical Imaging",
"fno": "07888512",
"hasPdf": true,
"idPrefix": "tb",
"keywords": [
"Biomedical MRI",
"Brain",
"Curve Fitting",
"Image Reconstruction",
"Least Squares Approximations",
"Medical Image Processing",
"Splines Mathematics",
"NURBS Curve Reconstruction Problem",
"Immunological Approach",
"Data Parameterization",
"Breakpoint Placement",
"Weight Calculation",
"Outline Curves",
"MRI Brain Images",
"Full NURBS Reconstruction",
"Noisy Data Points",
"Magnetic Resonance Imaging",
"Nonuniform Rational B Splines",
"General Reconstruction Problem",
"Data Parameters",
"Medical Imaging Techniques",
"Fitting Functions",
"Least Squares Minimization",
"Surface Reconstruction",
"Splines Mathematics",
"Surface Topography",
"Image Reconstruction",
"Biomedical Imaging",
"Magnetic Resonance Imaging",
"Medical Imaging",
"Curve Reconstruction",
"NURBS",
"Artificial Immune Systems",
"Metaheuristics",
"Optimization"
],
"authors": [
{
"givenName": "Andrés",
"surname": "Iglesias",
"fullName": "Andrés Iglesias",
"affiliation": "Department of Information Science, Toho University, Funabashi, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Akemi",
"surname": "Gálvez",
"fullName": "Akemi Gálvez",
"affiliation": "Department of Information Science, Toho University, Funabashi, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreina",
"surname": "Avila",
"fullName": "Andreina Avila",
"affiliation": "Department of Applied Mathematics and Computational Sciences, University of Cantabria, Santander, Spain",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2018-11-01 00:00:00",
"pubType": "trans",
"pages": "1929-1942",
"year": "2018",
"issn": "1545-5963",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icisce/2017/3013/0/3013a802",
"title": "A Method for Calculation of Hydrodynamic Coefficients Based on NURBS",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2017/3013a802/12OmNBzRNpF",
"parentPublication": {
"id": "proceedings/icisce/2017/3013/0",
"title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2010/6812/1/05533005",
"title": "Representing Railway Wheel Profile Using Quadratic NURBS",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2010/05533005/12OmNvStctH",
"parentPublication": {
"id": "proceedings/cso/2010/6812/1",
"title": "2010 Third International Joint Conference on Computational Science and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450396",
"title": "Parameter Estimation of Point Projection on NURBS Curves and Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450396/12OmNwwd2UN",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/2/01315207",
"title": "Biventricular myocardial kinematics based on tagged MRI from anatomical NURBS models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315207/12OmNxw5Bmb",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/2",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/maee/2013/4975/0/4975a080",
"title": "Research on a New Linear Interpolation Algorithm of NURBS Curve",
"doi": null,
"abstractUrl": "/proceedings-article/maee/2013/4975a080/12OmNxwWoum",
"parentPublication": {
"id": "proceedings/maee/2013/4975/0",
"title": "2013 International Conference on Mechanical and Automation Engineering (MAEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06846294",
"title": "Direct Isosurface Ray Casting of NURBS-Based Isogeometric Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06846294/13rRUwvT9gu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08314702",
"title": "Efficient and Anti-Aliased Trimming for Rendering Large NURBS Models",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08314702/17D45VUZMUW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000a073",
"title": "A Note on the Convergence of NURBS Curves When Weights Approach Infinity",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000a073/17D45WXIkzJ",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2018/8481/0/848100a097",
"title": "Linear Motor Platform Contouring Control Based on NURBS Curve Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2018/848100a097/17D45XwUAKx",
"parentPublication": {
"id": "proceedings/icmcce/2018/8481/0",
"title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2019/5045/0/504500a645",
"title": "Research on Adaptive Feedrate Planning of NURBS Curves for CNC System",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2019/504500a645/1hHLnPQuEG4",
"parentPublication": {
"id": "proceedings/wcmeim/2019/5045/0",
"title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07782378",
"articleId": "17D45WwsQ5w",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07878613",
"articleId": "17D45XERmlP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNBl6EK8",
"title": "Nov.-Dec.",
"year": "2011",
"issueNum": "06",
"idPrefix": "so",
"pubType": "magazine",
"volume": "28",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwghd7n",
"doi": "10.1109/MS.2011.114",
"abstract": "Coupled climate models exhibit scientific, numerical, and architectural variability. This variability introduces requirements that give rise to complexity. However, techniques exist that can tame this complexity; one such technique is feature analysis. As climate model fidelity and complexity increase, the climate-modeling community should adopt a systematic way to deal with software variability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Coupled climate models exhibit scientific, numerical, and architectural variability. This variability introduces requirements that give rise to complexity. However, techniques exist that can tame this complexity; one such technique is feature analysis. As climate model fidelity and complexity increase, the climate-modeling community should adopt a systematic way to deal with software variability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Coupled climate models exhibit scientific, numerical, and architectural variability. This variability introduces requirements that give rise to complexity. However, techniques exist that can tame this complexity; one such technique is feature analysis. As climate model fidelity and complexity increase, the climate-modeling community should adopt a systematic way to deal with software variability.",
"title": "Managing Software Complexity and Variability in Coupled Climate Models",
"normalizedTitle": "Managing Software Complexity and Variability in Coupled Climate Models",
"fno": "mso2011060043",
"hasPdf": true,
"idPrefix": "so",
"keywords": [
"Climate Modeling",
"Earth And Atmospheric Sciences",
"Automatic Programming",
"Domain Engineering",
"Reusable Software",
"Software Engineering"
],
"authors": [
{
"givenName": "Spencer",
"surname": "Rugaber",
"fullName": "Spencer Rugaber",
"affiliation": "Georgia Institute of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rocky",
"surname": "Dunlap",
"fullName": "Rocky Dunlap",
"affiliation": "Georgia Institute of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Leo",
"surname": "Mark",
"fullName": "Leo Mark",
"affiliation": "Georgia Institute of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sameer",
"surname": "Ansari",
"fullName": "Sameer Ansari",
"affiliation": "Georgia Institute of Technology",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2011-11-01 00:00:00",
"pubType": "mags",
"pages": "43-48",
"year": "2011",
"issn": "0740-7459",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sccompanion/2012/4956/0/4956a483",
"title": "Exploratory Climate Data Visualization and Analysis Using DV3D and UVCDAT",
"doi": null,
"abstractUrl": "/proceedings-article/sccompanion/2012/4956a483/12OmNwtn3BU",
"parentPublication": {
"id": "proceedings/sccompanion/2012/4956/0",
"title": "2012 SC Companion: High Performance Computing, Networking Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061375",
"title": "Visual Exploration of Climate Variability Changes Using Wavelet Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061375/13rRUB6Sq0v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2011/06/mso2011060036",
"title": "Clear Climate Code: Rewriting Legacy Science Software for Clarity",
"doi": null,
"abstractUrl": "/magazine/so/2011/06/mso2011060036/13rRUwbs2eL",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2015/06/mcs2015060009",
"title": "Climate Computing: The State of Play",
"doi": null,
"abstractUrl": "/magazine/cs/2015/06/mcs2015060009/13rRUyZaxu4",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/05/mcs2011050036",
"title": "Climate Change Modeling: Computational Opportunities and Challenges",
"doi": null,
"abstractUrl": "/magazine/cs/2011/05/mcs2011050036/13rRUyoPSSG",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08305502",
"title": "Exploring Variability within Ensembles of Decadal Climate Predictions",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08305502/17D45WWzW55",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2018/9288/0/928800a758",
"title": "Extreme Values from Spatiotemporal Chaos: Precipitation Extremes and Climate Variability",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2018/928800a758/18jXFBH7KI8",
"parentPublication": {
"id": "proceedings/icdmw/2018/9288/0",
"title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2023/7578/0/757800a013",
"title": "Self-supervised learning for climate downscaling",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2023/757800a013/1LFLH6rsV7q",
"parentPublication": {
"id": "proceedings/bigcomp/2023/7578/0",
"title": "2023 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visap/2019/5027/0/08900829",
"title": "Data Manifestation: Merging the Human World & Global Climate Change",
"doi": null,
"abstractUrl": "/proceedings-article/visap/2019/08900829/1eXazcVttni",
"parentPublication": {
"id": "proceedings/visap/2019/5027/0",
"title": "2019 IEEE VIS Arts Program (VISAP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2021/03/09384262",
"title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations",
"doi": null,
"abstractUrl": "/magazine/cs/2021/03/09384262/1scDqsJ2diM",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mso2011060036",
"articleId": "13rRUwbs2eL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mso2011060049",
"articleId": "13rRUy0HYPl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzaQoMg",
"title": "Sept.-Oct.",
"year": "2015",
"issueNum": "05",
"idPrefix": "cs",
"pubType": "magazine",
"volume": "17",
"label": "Sept.-Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy0ZzWh",
"doi": "10.1109/MCSE.2015.101",
"abstract": "Developing the capability to project how climate change will affect a particular region--any region--on Earth is a high-performance computing problem with enormous economic and health impacts. Techniques to resolve regional-scale features in global climate models are being developed and validated on leadership computing systems. In time, these regional-scale-resolving global climate models will arm local and state decision makers with the information needed to prepare for both short- and long-term impacts of climate change.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Developing the capability to project how climate change will affect a particular region--any region--on Earth is a high-performance computing problem with enormous economic and health impacts. Techniques to resolve regional-scale features in global climate models are being developed and validated on leadership computing systems. In time, these regional-scale-resolving global climate models will arm local and state decision makers with the information needed to prepare for both short- and long-term impacts of climate change.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Developing the capability to project how climate change will affect a particular region--any region--on Earth is a high-performance computing problem with enormous economic and health impacts. Techniques to resolve regional-scale features in global climate models are being developed and validated on leadership computing systems. In time, these regional-scale-resolving global climate models will arm local and state decision makers with the information needed to prepare for both short- and long-term impacts of climate change.",
"title": "Putting Regional Climate Prediction in Reach",
"normalizedTitle": "Putting Regional Climate Prediction in Reach",
"fno": "mcs2015050049",
"hasPdf": true,
"idPrefix": "cs",
"keywords": [
"Climatology",
"Decision Making",
"Environmental Science Computing",
"Geophysics Computing",
"Parallel Processing",
"Regional Climate Prediction",
"Climate Change",
"Earth",
"High Performance Computing Problem",
"Health Impacts",
"Regional Scale Features",
"Leadership Computing Systems",
"Regional Scale Resolving Global Climate Models",
"State Decision Makers",
"Local Decision Makers",
"Meteorology",
"Computational Modeling",
"Atmospheric Modeling",
"Predictive Models",
"Spatial Resolution",
"Data Models",
"Numerical Models",
"Climate Change",
"High Performance Computing",
"HPC",
"Supercomputers",
"Leadership",
"Climate Models",
"Scientific Computing"
],
"authors": [
{
"givenName": "Laura",
"surname": "Wolf",
"fullName": "Laura Wolf",
"affiliation": "Argonne National Laboratory",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jim",
"surname": "Collins",
"fullName": "Jim Collins",
"affiliation": "Argonne National Laboratory",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2015-09-01 00:00:00",
"pubType": "mags",
"pages": "49-51",
"year": "2015",
"issn": "1521-9615",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2010/4279/1/4279a209",
"title": "Carbon Management Strategy of Tourism in Response to Climate Change",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2010/4279a209/12OmNxETalL",
"parentPublication": {
"id": "proceedings/iciii/2010/4279/1",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sustainit/2013/56/0/06685204",
"title": "A greenhouse gas accounting tool for regional and municipal climate change management",
"doi": null,
"abstractUrl": "/proceedings-article/sustainit/2013/06685204/12OmNz2C1zU",
"parentPublication": {
"id": "proceedings/sustainit/2013/56/0",
"title": "2013 Sustainable Internet and ICT for Sustainability (SustainIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ettandgrs/2008/3563/1/3563a289",
"title": "Numerical Simulations of the Effect of Organic Carbon on Regional Climate in East Asia",
"doi": null,
"abstractUrl": "/proceedings-article/ettandgrs/2008/3563a289/12OmNzBwGvh",
"parentPublication": {
"id": "proceedings/ettandgrs/2008/3563/1",
"title": "Education Technology and Training & Geoscience and Remote Sensing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/05/mcs2013050032",
"title": "Climate Informatics: Accelerating Discovering in Climate Science with Machine Learning",
"doi": null,
"abstractUrl": "/magazine/cs/2013/05/mcs2013050032/13rRUy2YLOR",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/05/mcs2011050036",
"title": "Climate Change Modeling: Computational Opportunities and Challenges",
"doi": null,
"abstractUrl": "/magazine/cs/2011/05/mcs2011050036/13rRUyoPSSG",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798076",
"title": "Climate Change on Your Plate: A VR Seafood Buffet Experience",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798076/1cJ11aCNS7u",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a252",
"title": "Climate Change Perception in Scientific and Public Sphere",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a252/1gAwRMf6b3q",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a296",
"title": "Climate Data Analytics Applied to Sugar Cane Crop in the French West Indies",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a296/1gAx0WpNpm0",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2021/03/09384262",
"title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations",
"doi": null,
"abstractUrl": "/magazine/cs/2021/03/09384262/1scDqsJ2diM",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hipc/2020/2292/0/229200z019",
"title": "Computing and Data Challenges in Climate Change",
"doi": null,
"abstractUrl": "/proceedings-article/hipc/2020/229200z019/1taEYrNTNGo",
"parentPublication": {
"id": "proceedings/hipc/2020/2292/0",
"title": "2020 IEEE 27th International Conference on High Performance Computing, Data, and Analytics (HiPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcs2015050044",
"articleId": "13rRUygT7BH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcs2015050052",
"articleId": "13rRUxjyXej",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzZ5oaw",
"title": "Nov.-Dec.",
"year": "2015",
"issueNum": "06",
"idPrefix": "cs",
"pubType": "magazine",
"volume": "17",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYBlcf",
"doi": "10.1109/MCSE.2015.128",
"abstract": "Understanding changes in climate extremes is an urgent challenge. Topic modeling techniques from natural language processing can help scientists learn climate patterns from data. The authors' work extracts global climate patterns from multivariate climate data, modeling relations between variables via latent topics and discovering the probability of each climate topic appearing at different geographical locations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Understanding changes in climate extremes is an urgent challenge. Topic modeling techniques from natural language processing can help scientists learn climate patterns from data. The authors' work extracts global climate patterns from multivariate climate data, modeling relations between variables via latent topics and discovering the probability of each climate topic appearing at different geographical locations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Understanding changes in climate extremes is an urgent challenge. Topic modeling techniques from natural language processing can help scientists learn climate patterns from data. The authors' work extracts global climate patterns from multivariate climate data, modeling relations between variables via latent topics and discovering the probability of each climate topic appearing at different geographical locations.",
"title": "Can Topic Modeling Shed Light on Climate Extremes?",
"normalizedTitle": "Can Topic Modeling Shed Light on Climate Extremes?",
"fno": "mcs2015060043",
"hasPdf": true,
"idPrefix": "cs",
"keywords": [
"Meteorology",
"Hidden Markov Models",
"Data Models",
"Tensile Stress",
"Computational Modeling",
"Atmospheric Modeling",
"Scientific Computing",
"Climate Informatics",
"Climate Extremes",
"Machine Learning",
"Unsupervised Learning",
"Topic Models",
"Latent Dirichlet Allocation"
],
"authors": [
{
"givenName": "Cheng",
"surname": "Tang",
"fullName": "Cheng Tang",
"affiliation": "George Washington University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Claire",
"surname": "Monteleoni",
"fullName": "Claire Monteleoni",
"affiliation": "George Washington University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2015-11-01 00:00:00",
"pubType": "mags",
"pages": "43-52",
"year": "2015",
"issn": "1521-9615",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/e-science/2015/9325/0/9325a108",
"title": "From HPC Performance to Climate Modeling: Transforming Methods for HPC Predictions into Models of Extreme Climate Conditions",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2015/9325a108/12OmNB06l60",
"parentPublication": {
"id": "proceedings/e-science/2015/9325/0",
"title": "2015 IEEE 11th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2017/3800/0/3800a303",
"title": "Deriving Data-Driven Insights from Climate Extreme Indices for the Continental US",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2017/3800a303/12OmNqyUUyR",
"parentPublication": {
"id": "proceedings/icdmw/2017/3800/0",
"title": "2017 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsiit/2017/9899/0/9899a359",
"title": "On Estimation and Prediction of Simple Model and Spatial Hierarchical Model for Temperature Extremes",
"doi": null,
"abstractUrl": "/proceedings-article/icsiit/2017/9899a359/12OmNx57HGx",
"parentPublication": {
"id": "proceedings/icsiit/2017/9899/0",
"title": "2017 International Conference on Soft Computing, Intelligent System and Information Technology (ICSIIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2013/3142/0/3143b020",
"title": "Mining Semantic Time Period Similarity in Spatio-Temporal Climate Data",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2013/3143b020/12OmNyoSbet",
"parentPublication": {
"id": "proceedings/icdmw/2013/3142/0",
"title": "2013 IEEE 13th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/05/08338424",
"title": "Anchor-Free Correlated Topic Modeling",
"doi": null,
"abstractUrl": "/journal/tp/2019/05/08338424/13rRUwjXZTo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2015/06/mcs2015060019",
"title": "Scalable Multivariate Time-Series Models for Climate Informatics",
"doi": null,
"abstractUrl": "/magazine/cs/2015/06/mcs2015060019/13rRUxD9h19",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258063",
"title": "Discovering scientific influence using cross-domain dynamic topic modeling",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258063/17D45WgziNs",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2018/9288/0/928800a758",
"title": "Extreme Values from Spatiotemporal Chaos: Precipitation Extremes and Climate Variability",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2018/928800a758/18jXFBH7KI8",
"parentPublication": {
"id": "proceedings/icdmw/2018/9288/0",
"title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a303",
"title": "Evaluating Carbon Extremes in a Coupled Climate-Carbon Cycle Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a303/1gAwXq9oMgg",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2021/03/09384262",
"title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations",
"doi": null,
"abstractUrl": "/magazine/cs/2021/03/09384262/1scDqsJ2diM",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcs2015060035",
"articleId": "13rRUwhpBJG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcs2015060053",
"articleId": "13rRUILLkHk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzZ5oaw",
"title": "Nov.-Dec.",
"year": "2015",
"issueNum": "06",
"idPrefix": "cs",
"pubType": "magazine",
"volume": "17",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyZaxu4",
"doi": "10.1109/MCSE.2015.109",
"abstract": "Climate models represent a large variety of processes on different time and space scalesâa canonical example of multiphysics, multiscale modeling. In addition, the system is physically characterized by sensitive dependence on initial conditions and natural stochastic variability, with very long integrations needed to extract signals of climate change. Weak scaling, I/O, and memory-bound multiphysics codes present particular challenges to computational performance. The author presents trends in climate science that are driving models toward higher resolution, greater complexity, and larger ensembles, all of which present computing challenges. He also discusses the prospects for adapting these models to novel hardware and programming models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Climate models represent a large variety of processes on different time and space scalesâa canonical example of multiphysics, multiscale modeling. In addition, the system is physically characterized by sensitive dependence on initial conditions and natural stochastic variability, with very long integrations needed to extract signals of climate change. Weak scaling, I/O, and memory-bound multiphysics codes present particular challenges to computational performance. The author presents trends in climate science that are driving models toward higher resolution, greater complexity, and larger ensembles, all of which present computing challenges. He also discusses the prospects for adapting these models to novel hardware and programming models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Climate models represent a large variety of processes on different time and space scalesâa canonical example of multiphysics, multiscale modeling. In addition, the system is physically characterized by sensitive dependence on initial conditions and natural stochastic variability, with very long integrations needed to extract signals of climate change. Weak scaling, I/O, and memory-bound multiphysics codes present particular challenges to computational performance. The author presents trends in climate science that are driving models toward higher resolution, greater complexity, and larger ensembles, all of which present computing challenges. He also discusses the prospects for adapting these models to novel hardware and programming models.",
"title": "Climate Computing: The State of Play",
"normalizedTitle": "Climate Computing: The State of Play",
"fno": "mcs2015060009",
"hasPdf": true,
"idPrefix": "cs",
"keywords": [
"Climatology",
"Geophysics Computing",
"Climate Computing",
"Climate Science",
"Climate Models",
"Meteorology",
"Computational Modeling",
"Atmospheric Modeling",
"Biological System Modeling",
"Uncertainty",
"Data Models",
"Earth",
"Climate Change",
"Earth System Science",
"Climate Science",
"High Performance Computing",
"HPC",
"Big Data",
"Scientific Computing"
],
"authors": [
{
"givenName": "V.",
"surname": "Balaji",
"fullName": "V. Balaji",
"affiliation": "Cooperative Institute for Climate Science, Princeton University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2015-11-01 00:00:00",
"pubType": "mags",
"pages": "9-13",
"year": "2015",
"issn": "1521-9615",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/arith/2017/1965/0/1965a122",
"title": "Large Scale Numerical Simulations of the Climate",
"doi": null,
"abstractUrl": "/proceedings-article/arith/2017/1965a122/12OmNAkWvoH",
"parentPublication": {
"id": "proceedings/arith/2017/1965/0",
"title": "2017 IEEE 24th Symposium on Computer Arithmetic (ARITH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2012/05/mso2012050073",
"title": "Sharing Satellite Observations with the Climate-Modeling Community: Software and Architecture",
"doi": null,
"abstractUrl": "/magazine/so/2012/05/mso2012050073/13rRUNvgz82",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2015/05/mcs2015050049",
"title": "Putting Regional Climate Prediction in Reach",
"doi": null,
"abstractUrl": "/magazine/cs/2015/05/mcs2015050049/13rRUy0ZzWh",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/05/mcs2013050032",
"title": "Climate Informatics: Accelerating Discovering in Climate Science with Machine Learning",
"doi": null,
"abstractUrl": "/magazine/cs/2013/05/mcs2013050032/13rRUy2YLOR",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/05/mcs2011050036",
"title": "Climate Change Modeling: Computational Opportunities and Challenges",
"doi": null,
"abstractUrl": "/magazine/cs/2011/05/mcs2011050036/13rRUyoPSSG",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2023/7578/0/757800a013",
"title": "Self-supervised learning for climate downscaling",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2023/757800a013/1LFLH6rsV7q",
"parentPublication": {
"id": "proceedings/bigcomp/2023/7578/0",
"title": "2023 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a252",
"title": "Climate Change Perception in Scientific and Public Sphere",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a252/1gAwRMf6b3q",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/01/09281098",
"title": "Dynamic 3-D Visualization of Climate Model Development and Results",
"doi": null,
"abstractUrl": "/magazine/cg/2021/01/09281098/1phO0N1Fhte",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2021/03/09384262",
"title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations",
"doi": null,
"abstractUrl": "/magazine/cs/2021/03/09384262/1scDqsJ2diM",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hipc/2020/2292/0/229200z019",
"title": "Computing and Data Challenges in Climate Change",
"doi": null,
"abstractUrl": "/proceedings-article/hipc/2020/229200z019/1taEYrNTNGo",
"parentPublication": {
"id": "proceedings/hipc/2020/2292/0",
"title": "2020 IEEE 27th International Conference on High Performance Computing, Data, and Analytics (HiPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcs2015060006",
"articleId": "13rRUxYIMZ4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcs2015060014",
"articleId": "13rRUxly91i",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1qnQQML0BmE",
"title": "Jan.-Feb.",
"year": "2021",
"issueNum": "01",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "41",
"label": "Jan.-Feb.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1phO0N1Fhte",
"doi": "10.1109/MCG.2020.3042587",
"abstract": "Climate models play a significant role in the understanding of climate change, and the effective presentation and interpretation of their results is important for both the scientific community and the general public. In the case of the latter audience—which has become increasingly concerned with the implications of climate change for society—there is a requirement for visualizations which are compelling and engaging. We describe the use of ParaView, a well-established visualization application, to produce images and animations of results from a large set of modeling experiments, and their use in the promulgation of climate research results. Visualization can also make useful contributions to development, particularly for complex large-scale applications such as climate models. We present early results from the construction of a next-generation climate model which has been designed for use on exascale compute platforms, and show how visualization has helped in the development process, particularly with regard to higher model resolutions and novel data representations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Climate models play a significant role in the understanding of climate change, and the effective presentation and interpretation of their results is important for both the scientific community and the general public. In the case of the latter audience—which has become increasingly concerned with the implications of climate change for society—there is a requirement for visualizations which are compelling and engaging. We describe the use of ParaView, a well-established visualization application, to produce images and animations of results from a large set of modeling experiments, and their use in the promulgation of climate research results. Visualization can also make useful contributions to development, particularly for complex large-scale applications such as climate models. We present early results from the construction of a next-generation climate model which has been designed for use on exascale compute platforms, and show how visualization has helped in the development process, particularly with regard to higher model resolutions and novel data representations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Climate models play a significant role in the understanding of climate change, and the effective presentation and interpretation of their results is important for both the scientific community and the general public. In the case of the latter audience—which has become increasingly concerned with the implications of climate change for society—there is a requirement for visualizations which are compelling and engaging. We describe the use of ParaView, a well-established visualization application, to produce images and animations of results from a large set of modeling experiments, and their use in the promulgation of climate research results. Visualization can also make useful contributions to development, particularly for complex large-scale applications such as climate models. We present early results from the construction of a next-generation climate model which has been designed for use on exascale compute platforms, and show how visualization has helped in the development process, particularly with regard to higher model resolutions and novel data representations.",
"title": "Dynamic 3-D Visualization of Climate Model Development and Results",
"normalizedTitle": "Dynamic 3-D Visualization of Climate Model Development and Results",
"fno": "09281098",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Climatology",
"Data Visualisation",
"Geophysics Computing",
"Parallel Processing",
"Modeling Experiments",
"Climate Research Results",
"Next Generation Climate Model",
"Higher Model Resolutions",
"Novel Data Representations",
"Climate Model Development",
"Climate Change",
"Scientific Community",
"Visualization Application",
"Para View",
"Meteorology",
"Computational Modeling",
"Data Models",
"Atmospheric Modeling",
"Data Visualization",
"Geoscience",
"Climate Change"
],
"authors": [
{
"givenName": "Jeremy",
"surname": "Walton",
"fullName": "Jeremy Walton",
"affiliation": "Met Office Hadley Centre for Climate Science and Services, Exeter, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Samantha",
"surname": "Adams",
"fullName": "Samantha Adams",
"affiliation": "Met Office Informatics Lab, Exeter, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Hayek",
"fullName": "Wolfgang Hayek",
"affiliation": "National Institute of Water and Atmospheric Research, Wellington, New Zealand",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Piotr",
"surname": "Florek",
"fullName": "Piotr Florek",
"affiliation": "Met Office Hadley Centre for Climate Science and Services, Exeter, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Harold",
"surname": "Dyson",
"fullName": "Harold Dyson",
"affiliation": "Met Office Hadley Centre for Climate Science and Services, Exeter, U.K.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2021-01-01 00:00:00",
"pubType": "mags",
"pages": "17-25",
"year": "2021",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2010/4279/1/4279a209",
"title": "Carbon Management Strategy of Tourism in Response to Climate Change",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2010/4279a209/12OmNxETalL",
"parentPublication": {
"id": "proceedings/iciii/2010/4279/1",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/02/mcg2017020054",
"title": "VisAdapt: A Visualization Tool to Support Climate Change Adaptation",
"doi": null,
"abstractUrl": "/magazine/cg/2017/02/mcg2017020054/13rRUIJcWg1",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/05/mcs2013050032",
"title": "Climate Informatics: Accelerating Discovering in Climate Science with Machine Learning",
"doi": null,
"abstractUrl": "/magazine/cs/2013/05/mcs2013050032/13rRUy2YLOR",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/05/mcs2011050036",
"title": "Climate Change Modeling: Computational Opportunities and Challenges",
"doi": null,
"abstractUrl": "/magazine/cs/2011/05/mcs2011050036/13rRUyoPSSG",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a252",
"title": "Climate Change Perception in Scientific and Public Sphere",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a252/1gAwRMf6b3q",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a296",
"title": "Climate Data Analytics Applied to Sugar Cane Crop in the French West Indies",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a296/1gAx0WpNpm0",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/01/09325141",
"title": "Visualization of Climate Change",
"doi": null,
"abstractUrl": "/magazine/cg/2021/01/09325141/1qnQSeB3gME",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/01/09325132",
"title": "Visualization of Climate Science Simulation Data",
"doi": null,
"abstractUrl": "/magazine/cg/2021/01/09325132/1qnQT22F5zq",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2021/03/09384262",
"title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations",
"doi": null,
"abstractUrl": "/magazine/cs/2021/03/09384262/1scDqsJ2diM",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2020/1056/0/09381419",
"title": "Affective Polarization in Online Climate Change Discourse on Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2020/09381419/1semx89mBhK",
"parentPublication": {
"id": "proceedings/asonam/2020/1056/0",
"title": "2020 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09325141",
"articleId": "1qnQSeB3gME",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09273215",
"articleId": "1pb9IymEzcI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1qnQR9hQEs8",
"name": "mcg202101-09281098s1-supp1-3042587.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/mcg202101-09281098s1-supp1-3042587.avi",
"extension": "avi",
"size": "3.73 MB",
"__typename": "WebExtraType"
},
{
"id": "1qnQQZ39zBS",
"name": "mcg202101-09281098s1-supp2-3042587.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/mcg202101-09281098s1-supp2-3042587.avi",
"extension": "avi",
"size": "2.09 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1zw1icCEnRK",
"title": "Nov.-Dec.",
"year": "2021",
"issueNum": "06",
"idPrefix": "cs",
"pubType": "magazine",
"volume": "23",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xH5FqO6mtO",
"doi": "10.1109/MCSE.2021.3119509",
"abstract": "Over the last 30 years, the Computational Science Graduate Fellowship (CSGF) program has played an integral role in preparing a large and diverse community of computational scientists to push the limits of high-performance computing (HPC). To celebrate the CSGF program’s enduring influence, I share my perspective from the climate modeling community, which has used HPC to better understand the earth’s climate system. While the benefits of HPC in climate science have been enormous, rapid computing advances have brought new challenges. One difficulty is quality assurance—ensuring that large and complex codes running on multiple platforms are correct. A second is mitigating the increasingly large data volumes. Here, I describe our software quality assurance framework and strategies to reduce climate data volumes. In these complementary efforts, our focus has been preserving the scientific integrity of the simulation output data, a priority shared across HPC model simulation disciplines.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Over the last 30 years, the Computational Science Graduate Fellowship (CSGF) program has played an integral role in preparing a large and diverse community of computational scientists to push the limits of high-performance computing (HPC). To celebrate the CSGF program’s enduring influence, I share my perspective from the climate modeling community, which has used HPC to better understand the earth’s climate system. While the benefits of HPC in climate science have been enormous, rapid computing advances have brought new challenges. One difficulty is quality assurance—ensuring that large and complex codes running on multiple platforms are correct. A second is mitigating the increasingly large data volumes. Here, I describe our software quality assurance framework and strategies to reduce climate data volumes. In these complementary efforts, our focus has been preserving the scientific integrity of the simulation output data, a priority shared across HPC model simulation disciplines.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Over the last 30 years, the Computational Science Graduate Fellowship (CSGF) program has played an integral role in preparing a large and diverse community of computational scientists to push the limits of high-performance computing (HPC). To celebrate the CSGF program’s enduring influence, I share my perspective from the climate modeling community, which has used HPC to better understand the earth’s climate system. While the benefits of HPC in climate science have been enormous, rapid computing advances have brought new challenges. One difficulty is quality assurance—ensuring that large and complex codes running on multiple platforms are correct. A second is mitigating the increasingly large data volumes. Here, I describe our software quality assurance framework and strategies to reduce climate data volumes. In these complementary efforts, our focus has been preserving the scientific integrity of the simulation output data, a priority shared across HPC model simulation disciplines.",
"title": "On Preserving Scientific Integrity for Climate Model Data in the HPC Era",
"normalizedTitle": "On Preserving Scientific Integrity for Climate Model Data in the HPC Era",
"fno": "09573307",
"hasPdf": true,
"idPrefix": "cs",
"keywords": [
"Climatology",
"Data Analysis",
"Geophysics Computing",
"Parallel Processing",
"Quality Assurance",
"Data Volumes",
"Software Quality Assurance Framework",
"Climate Data Volumes",
"Simulation Output Data",
"HPC Model Simulation Disciplines",
"Climate Model Data",
"Computational Science Graduate Fellowship Program",
"Computational Scientists",
"High Performance Computing",
"CSGF Program",
"Climate Modeling Community",
"Climate Science",
"Data Models",
"Codes",
"Meteorology",
"Computational Modeling",
"Atmospheric Modeling",
"Earth"
],
"authors": [
{
"givenName": "Allison H.",
"surname": "Baker",
"fullName": "Allison H. Baker",
"affiliation": "National Center for Atmospheric Research, Boulder, CO, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2021-11-01 00:00:00",
"pubType": "mags",
"pages": "16-24",
"year": "2021",
"issn": "1521-9615",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/e-science/2015/9325/0/9325a108",
"title": "From HPC Performance to Climate Modeling: Transforming Methods for HPC Predictions into Models of Extreme Climate Conditions",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2015/9325a108/12OmNB06l60",
"parentPublication": {
"id": "proceedings/e-science/2015/9325/0",
"title": "2015 IEEE 11th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2016/9005/0/07841078",
"title": "Next-gen tools for big scientific data: ARM data center example",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2016/07841078/12OmNyPQ4MV",
"parentPublication": {
"id": "proceedings/big-data/2016/9005/0",
"title": "2016 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/simultech/2014/060/0/07095124",
"title": "Using fuzzy cognitive mapping and nonlinear Hebbian Learning for modeling, simulation and assessment of the climate system, based on a planetary boundaries framework",
"doi": null,
"abstractUrl": "/proceedings-article/simultech/2014/07095124/12OmNypIYxl",
"parentPublication": {
"id": "proceedings/simultech/2014/060/0",
"title": "2014 International Conference on Simulation and Modeling Methodologies, Technologies and Applications (SIMULTECH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/05/mcs2013050032",
"title": "Climate Informatics: Accelerating Discovering in Climate Science with Machine Learning",
"doi": null,
"abstractUrl": "/magazine/cs/2013/05/mcs2013050032/13rRUy2YLOR",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2015/06/mcs2015060009",
"title": "Climate Computing: The State of Play",
"doi": null,
"abstractUrl": "/magazine/cs/2015/06/mcs2015060009/13rRUyZaxu4",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2011/05/mcs2011050036",
"title": "Climate Change Modeling: Computational Opportunities and Challenges",
"doi": null,
"abstractUrl": "/magazine/cs/2011/05/mcs2011050036/13rRUyoPSSG",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2018/9156/0/915600a271",
"title": "Increasing parallelism in climate models via additional component concurrency",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2018/915600a271/17D45XlyDvn",
"parentPublication": {
"id": "proceedings/e-science/2018/9156/0",
"title": "2018 IEEE 14th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cluster/2018/8319/0/831900a114",
"title": "SciDP: Support HPC and Big Data Applications via Integrated Scientific Data Processing",
"doi": null,
"abstractUrl": "/proceedings-article/cluster/2018/831900a114/17D45XwUAIs",
"parentPublication": {
"id": "proceedings/cluster/2018/8319/0",
"title": "2018 IEEE International Conference on Cluster Computing (CLUSTER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/drbsd-5/2019/6017/0/601700a016",
"title": "A Collaborative Effort to Improve Lossy Compression Methods for Climate Data",
"doi": null,
"abstractUrl": "/proceedings-article/drbsd-5/2019/601700a016/1gAwQswkjfO",
"parentPublication": {
"id": "proceedings/drbsd-5/2019/6017/0",
"title": "2019 IEEE/ACM 5th International Workshop on Data Analysis and Reduction for Big Scientific Data (DRBSD-5)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2021/03/09384262",
"title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations",
"doi": null,
"abstractUrl": "/magazine/cs/2021/03/09384262/1scDqsJ2diM",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09580661",
"articleId": "1xPo0IgHEdi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09568724",
"articleId": "1xDLGfzwqGs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzxgHw9",
"title": "September/October",
"year": "2007",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "13",
"label": "September/October",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwkxc5j",
"doi": "10.1109/TVCG.2007.1065",
"abstract": "We present a simple and fast mesh denoising method, which can remove noise effectively while preserving mesh features such as sharp edges and corners. The method consists of two stages. First, noisy face normals are filtered iteratively by weighted averaging of neighboring face normals. Second, vertex positions are iteratively updated to agree with the denoised face normals. The weight function used during normal filtering is much simpler than that used in previous similar approaches, being simply a trimmed quadratic. This makes the algorithm both fast and simple to implement. Vertex position updating is based on the integration of surface normals using a least-squares error criterion. Like previous algorithms, we solve the least-squares problem by gradient descent; whereas previous methods needed user input to determine the iteration step size, we determine it automatically. In addition, we prove the convergence of the vertex position updating approach. Analysis and experiments show the advantages of our proposed method over various earlier surface denoising methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a simple and fast mesh denoising method, which can remove noise effectively while preserving mesh features such as sharp edges and corners. The method consists of two stages. First, noisy face normals are filtered iteratively by weighted averaging of neighboring face normals. Second, vertex positions are iteratively updated to agree with the denoised face normals. The weight function used during normal filtering is much simpler than that used in previous similar approaches, being simply a trimmed quadratic. This makes the algorithm both fast and simple to implement. Vertex position updating is based on the integration of surface normals using a least-squares error criterion. Like previous algorithms, we solve the least-squares problem by gradient descent; whereas previous methods needed user input to determine the iteration step size, we determine it automatically. In addition, we prove the convergence of the vertex position updating approach. Analysis and experiments show the advantages of our proposed method over various earlier surface denoising methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a simple and fast mesh denoising method, which can remove noise effectively while preserving mesh features such as sharp edges and corners. The method consists of two stages. First, noisy face normals are filtered iteratively by weighted averaging of neighboring face normals. Second, vertex positions are iteratively updated to agree with the denoised face normals. The weight function used during normal filtering is much simpler than that used in previous similar approaches, being simply a trimmed quadratic. This makes the algorithm both fast and simple to implement. Vertex position updating is based on the integration of surface normals using a least-squares error criterion. Like previous algorithms, we solve the least-squares problem by gradient descent; whereas previous methods needed user input to determine the iteration step size, we determine it automatically. In addition, we prove the convergence of the vertex position updating approach. Analysis and experiments show the advantages of our proposed method over various earlier surface denoising methods.",
"title": "Fast and Effective Feature-Preserving Mesh Denoising",
"normalizedTitle": "Fast and Effective Feature-Preserving Mesh Denoising",
"fno": "04276075",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Feature Extraction",
"Filtering Theory",
"Image Denoising",
"Least Squares Approximations",
"Feature Preserving Mesh Denoising",
"Noisy Face Normals",
"Weighted Averaging",
"Normal Filtering",
"Least Squares Error Criterion",
"Surface Denoising Methods",
"Noise Reduction",
"Smoothing Methods",
"Filters",
"Filtering",
"Solid Modeling",
"Noise Measurement",
"Sun",
"Iterative Algorithms",
"Convergence",
"Design Automation",
"Mesh Smoothing",
"Mesh Denoising",
"Feature Preservation"
],
"authors": [
{
"givenName": "Xianfang",
"surname": "Sun",
"fullName": "Xianfang Sun",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Paul L.",
"surname": "Rosin",
"fullName": "Paul L. Rosin",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ralph",
"surname": "Martin",
"fullName": "Ralph Martin",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frank",
"surname": "Langbein",
"fullName": "Frank Langbein",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2007-09-01 00:00:00",
"pubType": "trans",
"pages": "925-938",
"year": "2007",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-cg/2005/2473/0/24730275",
"title": "Feature-Preserving Mesh Denoising via Bilateral Normal Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730275/12OmNBJNL1l",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a046",
"title": "Global Mesh Denoising with Fairness",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a046/12OmNBtUdJJ",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720332",
"title": "Mesh Denoising Using Quadric Error Metric",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720332/12OmNxecRQw",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a083",
"title": "Robust Feature-Preserving Denoising of 3D Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a083/12OmNyRxFIQ",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/03/07328329",
"title": "A Robust Scheme for Feature-Preserving Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2016/03/07328329/13rRUwIF69l",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/01/06822598",
"title": "Bi-Normal Filtering for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2015/01/06822598/13rRUxYINff",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/02/ttg2010020312",
"title": "Robust Feature-Preserving Mesh Denoising Based on Consistent Subneighborhoods",
"doi": null,
"abstractUrl": "/journal/tg/2010/02/ttg2010020312/13rRUyp7tWS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08283576",
"title": "Feature Preserving Mesh Denoising Based on Graph Spectral Processing",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08283576/17D45XcttjZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a001",
"title": "Adaptive Patches for Mesh Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a001/17D45XvMce1",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09296808",
"title": "Mesh Denoising With Facet Graph Convolutions",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09296808/1pDnJLfMBWg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "v0849",
"articleId": "13rRUxNW1Zd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyQphh4",
"title": "Aug.",
"year": "2018",
"issueNum": "08",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0PqpA",
"doi": "10.1109/TVCG.2017.2740384",
"abstract": "This paper presents a two-stage mesh denoising algorithm. Unlike other traditional averaging approaches, our approach uses an element-based normal voting tensor to compute smooth surfaces. By introducing a binary optimization on the proposed tensor together with a local binary neighborhood concept, our algorithm better retains sharp features and produces smoother umbilical regions than previous approaches. On top of that, we provide a stochastic analysis on the different kinds of noise based on the average edge length. The quantitative results demonstrate that the performance of our method is better compared to state-of-the-art smoothing approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a two-stage mesh denoising algorithm. Unlike other traditional averaging approaches, our approach uses an element-based normal voting tensor to compute smooth surfaces. By introducing a binary optimization on the proposed tensor together with a local binary neighborhood concept, our algorithm better retains sharp features and produces smoother umbilical regions than previous approaches. On top of that, we provide a stochastic analysis on the different kinds of noise based on the average edge length. The quantitative results demonstrate that the performance of our method is better compared to state-of-the-art smoothing approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a two-stage mesh denoising algorithm. Unlike other traditional averaging approaches, our approach uses an element-based normal voting tensor to compute smooth surfaces. By introducing a binary optimization on the proposed tensor together with a local binary neighborhood concept, our algorithm better retains sharp features and produces smoother umbilical regions than previous approaches. On top of that, we provide a stochastic analysis on the different kinds of noise based on the average edge length. The quantitative results demonstrate that the performance of our method is better compared to state-of-the-art smoothing approaches.",
"title": "Mesh Denoising Based on Normal Voting Tensor and Binary Optimization",
"normalizedTitle": "Mesh Denoising Based on Normal Voting Tensor and Binary Optimization",
"fno": "08012522",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Face",
"Noise Reduction",
"Smoothing Methods",
"Tensile Stress",
"Noise Measurement",
"Surface Treatment",
"Optimization",
"Geometry Processing",
"Mesh Smoothing",
"Normal Voting Tensor",
"Eigenvalue Binary Optimization",
"Noise Analysis"
],
"authors": [
{
"givenName": "Sunil Kumar",
"surname": "Yadav",
"fullName": "Sunil Kumar Yadav",
"affiliation": "Department of Mathematics and Computer Science, Freie Universität Berlin, Berlin, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ulrich",
"surname": "Reitebuch",
"fullName": "Ulrich Reitebuch",
"affiliation": "Department of Mathematics and Computer Science, Freie Universität Berlin, Berlin, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Konrad",
"surname": "Polthier",
"fullName": "Konrad Polthier",
"affiliation": "Department of Mathematics and Computer Science, Freie Universität Berlin, Berlin, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2018-08-01 00:00:00",
"pubType": "trans",
"pages": "2366-2379",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457d843",
"title": "Simultaneous Visual Data Completion and Denoising Based on Tensor Rank and Total Variation Minimization and Its Primal-Dual Splitting Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d843/12OmNAIMO9d",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2013/5108/0/5108b037",
"title": "Walk 'n' Merge: A Scalable Algorithm for Boolean Tensor Factorization",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2013/5108b037/12OmNCfjeAz",
"parentPublication": {
"id": "proceedings/icdm/2013/5108/0",
"title": "2013 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460336",
"title": "Road marking recognition for map generation using sparse tensor voting",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460336/12OmNxGAKVZ",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c949",
"title": "Decomposable Nonlocal Tensor Dictionary Learning for Multispectral Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c949/12OmNxZ2GjN",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06238926",
"title": "Probabilistic tensor voting for robust perceptual grouping",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06238926/12OmNxwENs4",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a442",
"title": "Low-Rank Tensor Approximation with Laplacian Scale Mixture Modeling for Multiframe Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a442/12OmNxxNbR9",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f901",
"title": "Hyper-Laplacian Regularized Unidirectional Low-Rank Tensor Recovery for Multispectral Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f901/12OmNyPQ4S0",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851b692",
"title": "Multispectral Images Denoising by Intrinsic Tensor Sparsity Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851b692/12OmNzmtWIy",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/01/06822598",
"title": "Bi-Normal Filtering for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2015/01/06822598/13rRUxYINff",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09005626",
"title": "IEEE BigData 2019 Cup: Binary Classification via Tensor Completion",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09005626/1hJsv6O2W08",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07967887",
"articleId": "13rRUwI5TR5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07990536",
"articleId": "13rRUxcbnCx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNBpEeNH",
"title": "Jan.",
"year": "2015",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYINff",
"doi": "10.1109/TVCG.2014.2326872",
"abstract": "Most mesh denoising techniques utilize only either the facet normal field or the vertex normal field of a mesh surface. The two normal fields, though contain some redundant geometry information of the same model, can provide additional information that the other field lacks. Thus, considering only one normal field is likely to overlook some geometric features. In this paper, we take advantage of the piecewise consistent property of the two normal fields and propose an effective framework in which they are filtered and integrated using a novel method to guide the denoising process. Our key observation is that, decomposing the inconsistent field at challenging regions into multiple piecewise consistent fields makes the two fields complementary to each other and produces better results. Our approach consists of three steps: vertex classification, bi-normal filtering, and vertex position update. The classification step allows us to filter the two fields on a piecewise smooth surface rather than a surface that is smooth everywhere. Based on the piecewise consistence of the two normal fields, we filtered them using a piecewise smooth region clustering strategy. To benefit from the bi-normal filtering, we design a quadratic optimization algorithm for vertex position update. Experimental results on synthetic and real data show that our algorithm achieves higher quality results than current approaches on surfaces with multifarious geometric features and irregular surface sampling.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most mesh denoising techniques utilize only either the facet normal field or the vertex normal field of a mesh surface. The two normal fields, though contain some redundant geometry information of the same model, can provide additional information that the other field lacks. Thus, considering only one normal field is likely to overlook some geometric features. In this paper, we take advantage of the piecewise consistent property of the two normal fields and propose an effective framework in which they are filtered and integrated using a novel method to guide the denoising process. Our key observation is that, decomposing the inconsistent field at challenging regions into multiple piecewise consistent fields makes the two fields complementary to each other and produces better results. Our approach consists of three steps: vertex classification, bi-normal filtering, and vertex position update. The classification step allows us to filter the two fields on a piecewise smooth surface rather than a surface that is smooth everywhere. Based on the piecewise consistence of the two normal fields, we filtered them using a piecewise smooth region clustering strategy. To benefit from the bi-normal filtering, we design a quadratic optimization algorithm for vertex position update. Experimental results on synthetic and real data show that our algorithm achieves higher quality results than current approaches on surfaces with multifarious geometric features and irregular surface sampling.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most mesh denoising techniques utilize only either the facet normal field or the vertex normal field of a mesh surface. The two normal fields, though contain some redundant geometry information of the same model, can provide additional information that the other field lacks. Thus, considering only one normal field is likely to overlook some geometric features. In this paper, we take advantage of the piecewise consistent property of the two normal fields and propose an effective framework in which they are filtered and integrated using a novel method to guide the denoising process. Our key observation is that, decomposing the inconsistent field at challenging regions into multiple piecewise consistent fields makes the two fields complementary to each other and produces better results. Our approach consists of three steps: vertex classification, bi-normal filtering, and vertex position update. The classification step allows us to filter the two fields on a piecewise smooth surface rather than a surface that is smooth everywhere. Based on the piecewise consistence of the two normal fields, we filtered them using a piecewise smooth region clustering strategy. To benefit from the bi-normal filtering, we design a quadratic optimization algorithm for vertex position update. Experimental results on synthetic and real data show that our algorithm achieves higher quality results than current approaches on surfaces with multifarious geometric features and irregular surface sampling.",
"title": "Bi-Normal Filtering for Mesh Denoising",
"normalizedTitle": "Bi-Normal Filtering for Mesh Denoising",
"fno": "06822598",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Noise Reduction",
"Noise",
"Optimization",
"Surface Treatment",
"Noise Measurement",
"Geometry",
"Smoothing Methods",
"Piecewise Smooth Surface",
"Mesh Denoising",
"Bi Normal Filtering",
"Feature Preserving",
"Piecewise Consistence"
],
"authors": [
{
"givenName": "Mingqiang",
"surname": "Wei",
"fullName": "Mingqiang Wei",
"affiliation": "Department of Computer Science and Engineering, the Chinese University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jinze",
"surname": "Yu",
"fullName": "Jinze Yu",
"affiliation": "School of Computing, National University of Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wai-Man",
"surname": "Pang",
"fullName": "Wai-Man Pang",
"affiliation": "Department of Computer Science, Caritas Institute of Higher Education, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jun",
"surname": "Wang",
"fullName": "Jun Wang",
"affiliation": "College of Electronic and Information Engineering, Nanjing University of Aeronautics and Astronautics, Nanjing, Jiangsu",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Qin",
"fullName": "Jing Qin",
"affiliation": "College of Electronic and Information Engineering, Nanjing University of Aeronautics and Astronautics, Nanjing, Jiangsu",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ligang",
"surname": "Liu",
"fullName": "Ligang Liu",
"affiliation": "Human-Computer Interaction Center, Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, 1068 Xueyuan Avenue, Shenzhen, Guangdong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pheng-Ann",
"surname": "Heng",
"fullName": "Pheng-Ann Heng",
"affiliation": "Department of Computer Science and Engineering, the Chinese University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2015-01-01 00:00:00",
"pubType": "trans",
"pages": "43-55",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-cg/2005/2473/0/24730275",
"title": "Feature-Preserving Mesh Denoising via Bilateral Normal Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730275/12OmNBJNL1l",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a046",
"title": "Global Mesh Denoising with Fairness",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a046/12OmNBtUdJJ",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720332",
"title": "Mesh Denoising Using Quadric Error Metric",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720332/12OmNxecRQw",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/05/04276075",
"title": "Fast and Effective Feature-Preserving Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2007/05/04276075/13rRUwkxc5j",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/08012522",
"title": "Mesh Denoising Based on Normal Voting Tensor and Binary Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/08012522/13rRUx0PqpA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08344461",
"title": "Robust and High Fidelity Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08344461/13rRUxcbnHm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/10/08434353",
"title": "Mesh Denoising Guided by Patch Normal Co-Filtering via Kernel Low-Rank Recovery",
"doi": null,
"abstractUrl": "/journal/tg/2019/10/08434353/13rRUy2YLYE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a001",
"title": "Adaptive Patches for Mesh Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a001/17D45XvMce1",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09115285",
"title": "DNF-Net: A Deep Normal Filtering Network for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09115285/1kzC0PMrQXu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09127881",
"title": "Selective Guidance Normal Filter for Geometric Texture Removal",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09127881/1l3ut5TpoCA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06819054",
"articleId": "13rRUzpzeB5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06824802",
"articleId": "13rRUwInvsT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNAHW0Jc",
"title": "June",
"year": "2019",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxcbnHm",
"doi": "10.1109/TVCG.2018.2828818",
"abstract": "This paper presents a simple and effective two-stage mesh denoising algorithm, where in the first stage, face normal filtering is done by using bilateral normal filtering in a robust statistics framework. Tukey's bi-weight function is used as similarity function in the bilateral weighting, which is a robust estimator and stops the diffusion at sharp edges to retain features and removes noise from flat regions effectively. In the second stage, an edge-weighted Laplace operator is introduced to compute a differential coordinate. This differential coordinate helps the algorithm to produce a high-quality mesh without any face normal flips and makes the method robust against high-intensity noise.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a simple and effective two-stage mesh denoising algorithm, where in the first stage, face normal filtering is done by using bilateral normal filtering in a robust statistics framework. Tukey's bi-weight function is used as similarity function in the bilateral weighting, which is a robust estimator and stops the diffusion at sharp edges to retain features and removes noise from flat regions effectively. In the second stage, an edge-weighted Laplace operator is introduced to compute a differential coordinate. This differential coordinate helps the algorithm to produce a high-quality mesh without any face normal flips and makes the method robust against high-intensity noise.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a simple and effective two-stage mesh denoising algorithm, where in the first stage, face normal filtering is done by using bilateral normal filtering in a robust statistics framework. Tukey's bi-weight function is used as similarity function in the bilateral weighting, which is a robust estimator and stops the diffusion at sharp edges to retain features and removes noise from flat regions effectively. In the second stage, an edge-weighted Laplace operator is introduced to compute a differential coordinate. This differential coordinate helps the algorithm to produce a high-quality mesh without any face normal flips and makes the method robust against high-intensity noise.",
"title": "Robust and High Fidelity Mesh Denoising",
"normalizedTitle": "Robust and High Fidelity Mesh Denoising",
"fno": "08344461",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Face",
"Robustness",
"Noise Reduction",
"Noise Measurement",
"Geometry",
"Surface Treatment",
"Smoothing Methods",
"Robust Statistics",
"Face Normal Processing",
"Tukeys Bi Weight Function",
"High Fidelity Mesh",
"Differential Coordinate"
],
"authors": [
{
"givenName": "Sunil Kumar",
"surname": "Yadav",
"fullName": "Sunil Kumar Yadav",
"affiliation": "Department of Mathematics and Computer Science, Freie Universität, Berlin, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ulrich",
"surname": "Reitebuch",
"fullName": "Ulrich Reitebuch",
"affiliation": "Department of Mathematics and Computer Science, Freie Universität, Berlin, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Konrad",
"surname": "Polthier",
"fullName": "Konrad Polthier",
"affiliation": "Department of Mathematics and Computer Science, Freie Universität, Berlin, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": {
"isEnabled": true,
"codeDownloadUrl": "https://github.com/Sunil7545/RoFi_replicabilty_repository/archive/master.zip",
"codeRepositoryUrl": "https://github.com/Sunil7545/RoFi_replicabilty_repository",
"__typename": "ArticleReplicabilityType"
},
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2019-06-01 00:00:00",
"pubType": "trans",
"pages": "2304-2310",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-cg/2005/2473/0/24730275",
"title": "Feature-Preserving Mesh Denoising via Bilateral Normal Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730275/12OmNBJNL1l",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a046",
"title": "Global Mesh Denoising with Fairness",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a046/12OmNBtUdJJ",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a083",
"title": "Robust Feature-Preserving Denoising of 3D Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a083/12OmNyRxFIQ",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/10/ttg2011101521",
"title": "Bilateral Normal Filtering for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2011/10/ttg2011101521/13rRUwjXZS9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/08012522",
"title": "Mesh Denoising Based on Normal Voting Tensor and Binary Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/08012522/13rRUx0PqpA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/01/06822598",
"title": "Bi-Normal Filtering for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2015/01/06822598/13rRUxYINff",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/07990536",
"title": "Mesh Denoising with (Geo)Metric Fidelity",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/07990536/13rRUxcbnCx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/10/08434353",
"title": "Mesh Denoising Guided by Patch Normal Co-Filtering via Kernel Low-Rank Recovery",
"doi": null,
"abstractUrl": "/journal/tg/2019/10/08434353/13rRUy2YLYE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a001",
"title": "Adaptive Patches for Mesh Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a001/17D45XvMce1",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a123",
"title": "Point Cloud Denoising Algorithm Based on Noise Classification",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a123/1p1gtJQEUPC",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08352725",
"articleId": "13rRUxOdD2M",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08352050",
"articleId": "13rRUwwJWFW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "19EzSSiMIik",
"name": "ttg201906-08344461s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201906-08344461s1.zip",
"extension": "zip",
"size": "1.12 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNAPBbfM",
"title": "Oct.",
"year": "2019",
"issueNum": "10",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy2YLYE",
"doi": "10.1109/TVCG.2018.2865363",
"abstract": "Mesh denoising is a classical, yet not well-solved problem in digital geometry processing. The challenge arises from noise removal with the minimal disturbance of surface intrinsic properties (e.g., sharp features and shallow details). We propose a new patch normal co-filter (PcFilter) for mesh denoising. It is inspired by the geometry statistics which show that surface patches with similar intrinsic properties exist on the underlying surface of a noisy mesh. We model the PcFilter as a low-rank matrix recovery problem of similar-patch collaboration, aiming at removing different levels of noise, yet preserving various surface features. We generalize our model to pursue the low-rank matrix recovery in the kernel space for handling the nonlinear structure contained in the data. By making use of the block coordinate descent minimization and the specifics of a proximal based coordinate descent method, we optimize the nonlinear and nonconvex objective function efficiently. The detailed quantitative and qualitative results on synthetic and real data show that the PcFilter competes favorably with the state-of-the-art methods in surface accuracy and noise-robustness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mesh denoising is a classical, yet not well-solved problem in digital geometry processing. The challenge arises from noise removal with the minimal disturbance of surface intrinsic properties (e.g., sharp features and shallow details). We propose a new patch normal co-filter (PcFilter) for mesh denoising. It is inspired by the geometry statistics which show that surface patches with similar intrinsic properties exist on the underlying surface of a noisy mesh. We model the PcFilter as a low-rank matrix recovery problem of similar-patch collaboration, aiming at removing different levels of noise, yet preserving various surface features. We generalize our model to pursue the low-rank matrix recovery in the kernel space for handling the nonlinear structure contained in the data. By making use of the block coordinate descent minimization and the specifics of a proximal based coordinate descent method, we optimize the nonlinear and nonconvex objective function efficiently. The detailed quantitative and qualitative results on synthetic and real data show that the PcFilter competes favorably with the state-of-the-art methods in surface accuracy and noise-robustness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mesh denoising is a classical, yet not well-solved problem in digital geometry processing. The challenge arises from noise removal with the minimal disturbance of surface intrinsic properties (e.g., sharp features and shallow details). We propose a new patch normal co-filter (PcFilter) for mesh denoising. It is inspired by the geometry statistics which show that surface patches with similar intrinsic properties exist on the underlying surface of a noisy mesh. We model the PcFilter as a low-rank matrix recovery problem of similar-patch collaboration, aiming at removing different levels of noise, yet preserving various surface features. We generalize our model to pursue the low-rank matrix recovery in the kernel space for handling the nonlinear structure contained in the data. By making use of the block coordinate descent minimization and the specifics of a proximal based coordinate descent method, we optimize the nonlinear and nonconvex objective function efficiently. The detailed quantitative and qualitative results on synthetic and real data show that the PcFilter competes favorably with the state-of-the-art methods in surface accuracy and noise-robustness.",
"title": "Mesh Denoising Guided by Patch Normal Co-Filtering via Kernel Low-Rank Recovery",
"normalizedTitle": "Mesh Denoising Guided by Patch Normal Co-Filtering via Kernel Low-Rank Recovery",
"fno": "08434353",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Geometry",
"Concave Programming",
"Convex Programming",
"Gradient Methods",
"Image Denoising",
"Image Filtering",
"Image Reconstruction",
"Image Representation",
"Matrix Algebra",
"Minimisation",
"Statistical Analysis",
"Nonconvex Objective Function",
"Nonlinear Objective Function",
"Proximal Based Coordinate Descent Method",
"Block Coordinate Descent Minimization",
"Surface Features",
"Similar Patch Collaboration",
"Low Rank Matrix Recovery Problem",
"Surface Patches",
"Geometry Statistics",
"Pc Filter",
"Patch Normal Co Filter",
"Surface Intrinsic Properties",
"Minimal Disturbance",
"Digital Geometry Processing",
"Mesh Denoising",
"Kernel Low Rank Recovery",
"Noise Reduction",
"Noise Measurement",
"Three Dimensional Displays",
"Surface Treatment",
"Geometry",
"Surface Reconstruction",
"Kernel",
"Mesh Denoising",
"Patch Normal Co Filtering",
"Kernel Low Rank Recovery",
"Self Similarity"
],
"authors": [
{
"givenName": "Mingqiang",
"surname": "Wei",
"fullName": "Mingqiang Wei",
"affiliation": "College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, Jiangsu, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jin",
"surname": "Huang",
"fullName": "Jin Huang",
"affiliation": "College of Mechanical and Electrical Engineering, Nanjing University of Aeronautics and Astronautics, Nanjing, Jiangsu, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xingyu",
"surname": "Xie",
"fullName": "Xingyu Xie",
"affiliation": "College of Mechanical and Electrical Engineering, Nanjing University of Aeronautics and Astronautics, Nanjing, Jiangsu, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ligang",
"surname": "Liu",
"fullName": "Ligang Liu",
"affiliation": "University of Technology and Science of China, Hefei, Anhui, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jun",
"surname": "Wang",
"fullName": "Jun Wang",
"affiliation": "College of Mechanical and Electrical Engineering, Nanjing University of Aeronautics and Astronautics, Nanjing, Jiangsu, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Qin",
"fullName": "Jing Qin",
"affiliation": "Hong Kong Polytechnical University, Kowloon, Hong Kong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "10",
"pubDate": "2019-10-01 00:00:00",
"pubType": "trans",
"pages": "2910-2926",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391a603",
"title": "External Patch Prior Guided Internal Clustering for Image Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a603/12OmNBpEeUR",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2013/5099/0/5099a187",
"title": "Normal Correction towards Smoothing Point-Based Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2013/5099a187/12OmNwFicSu",
"parentPublication": {
"id": "proceedings/sibgrapi/2013/5099/0",
"title": "2013 XXVI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/10/ttg2011101521",
"title": "Bilateral Normal Filtering for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2011/10/ttg2011101521/13rRUwjXZS9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/08012522",
"title": "Mesh Denoising Based on Normal Voting Tensor and Binary Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/08012522/13rRUx0PqpA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/01/06822598",
"title": "Bi-Normal Filtering for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2015/01/06822598/13rRUxYINff",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08344461",
"title": "Robust and High Fidelity Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08344461/13rRUxcbnHm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a001",
"title": "Adaptive Patches for Mesh Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a001/17D45XvMce1",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/11/08730533",
"title": "Multi-Patch Collaborative Point Cloud Denoising via Low-Rank Recovery with Graph Constraint",
"doi": null,
"abstractUrl": "/journal/tg/2020/11/08730533/1aAxaVT7HtS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09115285",
"title": "DNF-Net: A Deep Normal Filtering Network for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09115285/1kzC0PMrQXu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09296808",
"title": "Mesh Denoising With Facet Graph Convolutions",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09296808/1pDnJLfMBWg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08439008",
"articleId": "13rRUIJuxpE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08421591",
"articleId": "13rRUEgs2Mb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1ECXHMu0OWc",
"title": "Aug.",
"year": "2022",
"issueNum": "08",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1pDnJLfMBWg",
"doi": "10.1109/TVCG.2020.3045490",
"abstract": "We examine the problem of mesh denoising, which consists of removing noise from corrupted 3D meshes while preserving existing geometric features. Most mesh denoising methods require a lot of mesh-specific parameter fine-tuning, to account for specific features and noise types. In recent years, data-driven methods have demonstrated their robustness and effectiveness with respect to noise and feature properties on a wide variety of geometry and image problems. Most existing mesh denoising methods still use hand-crafted features, and locally denoise facets rather than examine the mesh globally. In this work, we propose the use of a fully end-to-end learning strategy based on graph convolutions, where meaningful features are learned directly by our network. It operates on a graph of facets, directly on the existing topology of the mesh, without resampling, and follows a multi-scale design to extract geometric features at different resolution levels. Similar to most recent pipelines, given a noisy mesh, we first denoise face normals with our novel approach, then update vertex positions accordingly. Our method performs significantly better than the current state-of-the-art learning-based methods. Additionally, we show that it can be trained on noisy data, without explicit correspondence between noisy and ground-truth facets. We also propose a multi-scale denoising strategy, better suited to correct noise with a low spatial frequency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We examine the problem of mesh denoising, which consists of removing noise from corrupted 3D meshes while preserving existing geometric features. Most mesh denoising methods require a lot of mesh-specific parameter fine-tuning, to account for specific features and noise types. In recent years, data-driven methods have demonstrated their robustness and effectiveness with respect to noise and feature properties on a wide variety of geometry and image problems. Most existing mesh denoising methods still use hand-crafted features, and locally denoise facets rather than examine the mesh globally. In this work, we propose the use of a fully end-to-end learning strategy based on graph convolutions, where meaningful features are learned directly by our network. It operates on a graph of facets, directly on the existing topology of the mesh, without resampling, and follows a multi-scale design to extract geometric features at different resolution levels. Similar to most recent pipelines, given a noisy mesh, we first denoise face normals with our novel approach, then update vertex positions accordingly. Our method performs significantly better than the current state-of-the-art learning-based methods. Additionally, we show that it can be trained on noisy data, without explicit correspondence between noisy and ground-truth facets. We also propose a multi-scale denoising strategy, better suited to correct noise with a low spatial frequency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We examine the problem of mesh denoising, which consists of removing noise from corrupted 3D meshes while preserving existing geometric features. Most mesh denoising methods require a lot of mesh-specific parameter fine-tuning, to account for specific features and noise types. In recent years, data-driven methods have demonstrated their robustness and effectiveness with respect to noise and feature properties on a wide variety of geometry and image problems. Most existing mesh denoising methods still use hand-crafted features, and locally denoise facets rather than examine the mesh globally. In this work, we propose the use of a fully end-to-end learning strategy based on graph convolutions, where meaningful features are learned directly by our network. It operates on a graph of facets, directly on the existing topology of the mesh, without resampling, and follows a multi-scale design to extract geometric features at different resolution levels. Similar to most recent pipelines, given a noisy mesh, we first denoise face normals with our novel approach, then update vertex positions accordingly. Our method performs significantly better than the current state-of-the-art learning-based methods. Additionally, we show that it can be trained on noisy data, without explicit correspondence between noisy and ground-truth facets. We also propose a multi-scale denoising strategy, better suited to correct noise with a low spatial frequency.",
"title": "Mesh Denoising With Facet Graph Convolutions",
"normalizedTitle": "Mesh Denoising With Facet Graph Convolutions",
"fno": "09296808",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Geometry",
"Computer Graphics",
"Feature Extraction",
"Image Denoising",
"Learning Artificial Intelligence",
"Mesh Generation",
"Image Problems",
"Existing Mesh",
"Hand Crafted Features",
"Locally Denoise Facets",
"End To End Learning Strategy",
"Existing Topology",
"Geometric Features",
"Noisy Mesh",
"Ground Truth Facets",
"Multiscale Denoising Strategy",
"Facet Graph Convolutions",
"Removing Noise",
"Corrupted 3 D Meshes",
"Mesh Denoising Methods",
"Mesh Specific Parameter Fine Tuning",
"Data Driven Methods",
"Feature Properties",
"Geometry",
"Noise Reduction",
"Faces",
"Shape",
"Noise Measurement",
"Three Dimensional Displays",
"Surface Treatment",
"Optimization",
"Mesh Denoising",
"Normal Filtering",
"Graph Convolution",
"Feature Preserving",
"Geometric Deep Learning"
],
"authors": [
{
"givenName": "Matthieu",
"surname": "Armando",
"fullName": "Matthieu Armando",
"affiliation": "Inria, CNRS, Grenoble INP, LJK, University Grenoble Alpes, Grenoble, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean-Sébastien",
"surname": "Franco",
"fullName": "Jean-Sébastien Franco",
"affiliation": "Inria, CNRS, Grenoble INP, LJK, University Grenoble Alpes, Grenoble, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Edmond",
"surname": "Boyer",
"fullName": "Edmond Boyer",
"affiliation": "Inria, CNRS, Grenoble INP, LJK, University Grenoble Alpes, Grenoble, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "2999-3012",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2012/1611/0/06238917",
"title": "Similarity based filtering of point clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06238917/12OmNvk7JOA",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720332",
"title": "Mesh Denoising Using Quadric Error Metric",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720332/12OmNxecRQw",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/03/07328329",
"title": "A Robust Scheme for Feature-Preserving Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2016/03/07328329/13rRUwIF69l",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/08012522",
"title": "Mesh Denoising Based on Normal Voting Tensor and Binary Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/08012522/13rRUx0PqpA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/01/06822598",
"title": "Bi-Normal Filtering for Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2015/01/06822598/13rRUxYINff",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08344461",
"title": "Robust and High Fidelity Mesh Denoising",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08344461/13rRUxcbnHm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/10/08434353",
"title": "Mesh Denoising Guided by Patch Normal Co-Filtering via Kernel Low-Rank Recovery",
"doi": null,
"abstractUrl": "/journal/tg/2019/10/08434353/13rRUy2YLYE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08283576",
"title": "Feature Preserving Mesh Denoising Based on Graph Spectral Processing",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08283576/17D45XcttjZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a001",
"title": "Adaptive Patches for Mesh Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a001/17D45XvMce1",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956054",
"title": "DMD-Net: Deep Mesh Denoising Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956054/1IHqmwf4Sk0",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09307276",
"articleId": "1pOZrP70lGM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09286680",
"articleId": "1por35qBdQs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1ECXQLAVaYU",
"name": "ttg202208-09296808s1-supp1-3045490.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202208-09296808s1-supp1-3045490.pdf",
"extension": "pdf",
"size": "45.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1I1KuH1xVF6",
"doi": "10.1109/TVCG.2022.3219762",
"abstract": "A systematic review (SR) is essential with up-to-date research evidence to support clinical decisions and practices. However, the growing literature volume makes it challenging for SR reviewers and clinicians to discover useful information efficiently. Many human-in-the-loop information retrieval approaches (HIR) have been proposed to rank documents semantically similar to users' queries and provide interactive visualizations to facilitate document retrieval. Given that the queries are mainly composed of keywords and keyphrases retrieving documents that are semantically similar to a query does not necessarily respond to the clinician's need. Clinicians still have to review many documents to find the solution. The problem motivates us to develop a visual analytics system, DocFlow, to facilitate information-seeking. One of the features of our DocFlow is accepting natural language questions. The detailed description enables retrieving documents that can answer users' questions. Additionally, clinicians often categorize documents based on their backgrounds and with different purposes (e.g., populations, treatments). Since the criteria are unknown and cannot be pre-defined in advance, existing methods can only achieve categorization by considering the entire information in documents. In contrast, by locating answers in each document, our DocFlow can intelligently categorize documents based on users' questions. The second feature of our DocFlow is a flexible interface where users can arrange a sequence of questions to customize their rules for document retrieval and categorization. The two features of this visual analytics system support a flexible information-seeking process. The case studies and the feedback from domain experts demonstrate the usefulness and effectiveness of our DocFlow.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A systematic review (SR) is essential with up-to-date research evidence to support clinical decisions and practices. However, the growing literature volume makes it challenging for SR reviewers and clinicians to discover useful information efficiently. Many human-in-the-loop information retrieval approaches (HIR) have been proposed to rank documents semantically similar to users' queries and provide interactive visualizations to facilitate document retrieval. Given that the queries are mainly composed of keywords and keyphrases retrieving documents that are semantically similar to a query does not necessarily respond to the clinician's need. Clinicians still have to review many documents to find the solution. The problem motivates us to develop a visual analytics system, DocFlow, to facilitate information-seeking. One of the features of our DocFlow is accepting natural language questions. The detailed description enables retrieving documents that can answer users' questions. Additionally, clinicians often categorize documents based on their backgrounds and with different purposes (e.g., populations, treatments). Since the criteria are unknown and cannot be pre-defined in advance, existing methods can only achieve categorization by considering the entire information in documents. In contrast, by locating answers in each document, our DocFlow can intelligently categorize documents based on users' questions. The second feature of our DocFlow is a flexible interface where users can arrange a sequence of questions to customize their rules for document retrieval and categorization. The two features of this visual analytics system support a flexible information-seeking process. The case studies and the feedback from domain experts demonstrate the usefulness and effectiveness of our DocFlow.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A systematic review (SR) is essential with up-to-date research evidence to support clinical decisions and practices. However, the growing literature volume makes it challenging for SR reviewers and clinicians to discover useful information efficiently. Many human-in-the-loop information retrieval approaches (HIR) have been proposed to rank documents semantically similar to users' queries and provide interactive visualizations to facilitate document retrieval. Given that the queries are mainly composed of keywords and keyphrases retrieving documents that are semantically similar to a query does not necessarily respond to the clinician's need. Clinicians still have to review many documents to find the solution. The problem motivates us to develop a visual analytics system, DocFlow, to facilitate information-seeking. One of the features of our DocFlow is accepting natural language questions. The detailed description enables retrieving documents that can answer users' questions. Additionally, clinicians often categorize documents based on their backgrounds and with different purposes (e.g., populations, treatments). Since the criteria are unknown and cannot be pre-defined in advance, existing methods can only achieve categorization by considering the entire information in documents. In contrast, by locating answers in each document, our DocFlow can intelligently categorize documents based on users' questions. The second feature of our DocFlow is a flexible interface where users can arrange a sequence of questions to customize their rules for document retrieval and categorization. The two features of this visual analytics system support a flexible information-seeking process. The case studies and the feedback from domain experts demonstrate the usefulness and effectiveness of our DocFlow.",
"title": "DocFlow: A Visual Analytics System for Question-based Document Retrieval and Categorization",
"normalizedTitle": "DocFlow: A Visual Analytics System for Question-based Document Retrieval and Categorization",
"fno": "09939115",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Systematics",
"Visual Analytics",
"Task Analysis",
"Semantics",
"Human In The Loop",
"Natural Languages",
"Bit Error Rate",
"Biomedical Systematic Review",
"Evidence Based Practice",
"Human In The Loop Information Retrieval",
"Question Based Document Retrieval",
"Question Based Document Categorization"
],
"authors": [
{
"givenName": "Rui",
"surname": "Qiu",
"fullName": "Rui Qiu",
"affiliation": "Computer Science and Engineering, The Ohio State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yamei",
"surname": "Tu",
"fullName": "Yamei Tu",
"affiliation": "Computer Science and Engineering, The Ohio State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu-Shuen",
"surname": "Wang",
"fullName": "Yu-Shuen Wang",
"affiliation": "Computer Science, National Yang Ming Chiao Tung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Po-Yin",
"surname": "Yen",
"fullName": "Po-Yin Yen",
"affiliation": "Institute for Informatics, Washington University School of Medicine, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "Computer Science and Engineering, The Ohio State University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042511",
"title": "VisIRR: Visual analytics for information retrieval and recommendation with large-scale document data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042511/12OmNASraHn",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460915",
"title": "Logo spotting for document categorization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460915/12OmNBInLk9",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmwa/2009/3646/0/3646a197",
"title": "Web Document Categorization Algorithm Using LDE and MA",
"doi": null,
"abstractUrl": "/proceedings-article/wmwa/2009/3646a197/12OmNBmf3bO",
"parentPublication": {
"id": "proceedings/wmwa/2009/3646/0",
"title": "Web Mining and Web-based Application, Pacific-Asia Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsit/2008/3308/0/3308a367",
"title": "A New Retrieval Ranking Method based on Document Retrieval Expected Value in Chinese Document",
"doi": null,
"abstractUrl": "/proceedings-article/iccsit/2008/3308a367/12OmNBqdr2Y",
"parentPublication": {
"id": "proceedings/iccsit/2008/3308/0",
"title": "2008 International Conference on Computer Science and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbie/2008/3561/0/3561a437",
"title": "Research on Medical Document Categorization",
"doi": null,
"abstractUrl": "/proceedings-article/fbie/2008/3561a437/12OmNx7G5Sj",
"parentPublication": {
"id": "proceedings/fbie/2008/3561/0",
"title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iitaw/2008/3505/0/3505a601",
"title": "Kernel Discriminant Analysis Algorithm for Document Categorization",
"doi": null,
"abstractUrl": "/proceedings-article/iitaw/2008/3505a601/12OmNyiUBoZ",
"parentPublication": {
"id": "proceedings/iitaw/2008/3505/0",
"title": "2008 International Symposium on Intelligent Information Technology Application Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2001/1230/0/12300270",
"title": "Document Categorization and Retrieval Using Semantic Microfeatures and Growing Cell Structures",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2001/12300270/12OmNzaQoaP",
"parentPublication": {
"id": "proceedings/dexa/2001/1230/0",
"title": "12th International Workshop on Database and Expert Systems Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2002/06/i0838",
"title": "Imaged Document Text Retrieval Without OCR",
"doi": null,
"abstractUrl": "/journal/tp/2002/06/i0838/13rRUzphDyR",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aike/2021/3736/0/373600a029",
"title": "Towards Intelligent Legal Advisors for Document Retrieval and Question-Answering in German Legal Documents",
"doi": null,
"abstractUrl": "/proceedings-article/aike/2021/373600a029/1BrADc8Hd5K",
"parentPublication": {
"id": "proceedings/aike/2021/3736/0",
"title": "2021 IEEE Fourth International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/escience/2019/2451/0/245100a533",
"title": "Iterative Document Retrieval via Deep Learning Approaches for Biomedical Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/escience/2019/245100a533/1ike1bwk62I",
"parentPublication": {
"id": "proceedings/escience/2019/2451/0",
"title": "2019 15th International Conference on eScience (eScience)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09938388",
"articleId": "1I05BGZpHZ6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09940545",
"articleId": "1I6O5QqMxQ4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1I6O5iENFi8",
"name": "ttg555501-09939115s1-supp1-3219762.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09939115s1-supp1-3219762.mov",
"extension": "mov",
"size": "140 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xlw2uJhEXe",
"doi": "10.1109/TVCG.2021.3114836",
"abstract": "Machine learning (ML) is increasingly applied to Electronic Health Records (EHRs) to solve clinical prediction tasks. Although many ML models perform promisingly, issues with model transparency and interpretability limit their adoption in clinical practice. Directly using existing explainable ML techniques in clinical settings can be challenging. Through literature surveys and collaborations with six clinicians with an average of 17 years of clinical experience, we identified three key challenges, including clinicians' unfamiliarity with ML features, lack of contextual information, and the need for cohort-level evidence. Following an iterative design process, we further designed and developed VBridge, a visual analytics tool that seamlessly incorporates ML explanations into clinicians' decision-making workflow. The system includes a novel hierarchical display of contribution-based feature explanations and enriched interactions that <italic>connect the dots</italic> between ML features, explanations, and data. We demonstrated the effectiveness of VBridge through two case studies and expert interviews with four clinicians, showing that visually associating model explanations with patients' situational records can help clinicians better interpret and use model predictions when making clinician decisions. We further derived a list of design implications for developing future explainable ML tools to support clinical decision-making.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Machine learning (ML) is increasingly applied to Electronic Health Records (EHRs) to solve clinical prediction tasks. Although many ML models perform promisingly, issues with model transparency and interpretability limit their adoption in clinical practice. Directly using existing explainable ML techniques in clinical settings can be challenging. Through literature surveys and collaborations with six clinicians with an average of 17 years of clinical experience, we identified three key challenges, including clinicians' unfamiliarity with ML features, lack of contextual information, and the need for cohort-level evidence. Following an iterative design process, we further designed and developed VBridge, a visual analytics tool that seamlessly incorporates ML explanations into clinicians' decision-making workflow. The system includes a novel hierarchical display of contribution-based feature explanations and enriched interactions that <italic>connect the dots</italic> between ML features, explanations, and data. We demonstrated the effectiveness of VBridge through two case studies and expert interviews with four clinicians, showing that visually associating model explanations with patients' situational records can help clinicians better interpret and use model predictions when making clinician decisions. We further derived a list of design implications for developing future explainable ML tools to support clinical decision-making.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Machine learning (ML) is increasingly applied to Electronic Health Records (EHRs) to solve clinical prediction tasks. Although many ML models perform promisingly, issues with model transparency and interpretability limit their adoption in clinical practice. Directly using existing explainable ML techniques in clinical settings can be challenging. Through literature surveys and collaborations with six clinicians with an average of 17 years of clinical experience, we identified three key challenges, including clinicians' unfamiliarity with ML features, lack of contextual information, and the need for cohort-level evidence. Following an iterative design process, we further designed and developed VBridge, a visual analytics tool that seamlessly incorporates ML explanations into clinicians' decision-making workflow. The system includes a novel hierarchical display of contribution-based feature explanations and enriched interactions that connect the dots between ML features, explanations, and data. We demonstrated the effectiveness of VBridge through two case studies and expert interviews with four clinicians, showing that visually associating model explanations with patients' situational records can help clinicians better interpret and use model predictions when making clinician decisions. We further derived a list of design implications for developing future explainable ML tools to support clinical decision-making.",
"title": "VBridge: Connecting the Dots Between Features and Data to Explain Healthcare Models",
"normalizedTitle": "VBridge: Connecting the Dots Between Features and Data to Explain Healthcare Models",
"fno": "09555810",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Predictive Models",
"Decision Making",
"Tools",
"Computational Modeling",
"Visual Analytics",
"Hospitals",
"Task Analysis",
"Explainable Artificial Intelligence",
"Healthcare",
"Visual Analytics",
"Decision Making"
],
"authors": [
{
"givenName": "Furui",
"surname": "Cheng",
"fullName": "Furui Cheng",
"affiliation": "Hong Kong University of Science and Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dongyu",
"surname": "Liu",
"fullName": "Dongyu Liu",
"affiliation": "Massachusetts Institute of Technology, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fan",
"surname": "Du",
"fullName": "Fan Du",
"affiliation": "Adobe Research, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yanna",
"surname": "Lin",
"fullName": "Yanna Lin",
"affiliation": "Hong Kong University of Science and Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexandra",
"surname": "Zytek",
"fullName": "Alexandra Zytek",
"affiliation": "Massachusetts Institute of Technology, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haomin",
"surname": "Li",
"fullName": "Haomin Li",
"affiliation": "Children's Hospital of Zhejiang University School of Medicine, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kalyan",
"surname": "Veeramachaneni",
"fullName": "Kalyan Veeramachaneni",
"affiliation": "Massachusetts Institute of Technology, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "378-388",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2019/03/08304678",
"title": "KAVAGait: Knowledge-Assisted Visual Analytics for Clinical Gait Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08304678/17D45WaTkk5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2009/5283/0/05333023",
"title": "Connecting the dots in visual analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2009/05333023/1HJyHYJk7eg",
"parentPublication": {
"id": "proceedings/vast/2009/5283/0",
"title": "2009 IEEE Symposium on Visual Analytics Science and Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/5555/01/09971460",
"title": "User-Centric Explainability in Healthcare: A Knowledge-Level Perspective of Informed Machine Learning",
"doi": null,
"abstractUrl": "/journal/ai/5555/01/09971460/1ISVU8Rd528",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2022/6124/0/612400a431",
"title": "PiMS: A Pre-ML Labelling Tool",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2022/612400a431/1J6hpa8Fh8k",
"parentPublication": {
"id": "proceedings/e-science/2022/6124/0",
"title": "2022 IEEE 18th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2022/9356/0/935600a008",
"title": "Trustworthy Visual Analytics in Clinical Gait Analysis: A Case Study for Patients with Cerebral Palsy",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2022/935600a008/1J9BkDHcAz6",
"parentPublication": {
"id": "proceedings/trex/2022/9356/0",
"title": "2022 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2022/0103/0/10108523",
"title": "Evaluation of Data Visualizations for an Electronic Patient Preferences Tool for Older Adults Diagnosed with Hematologic Malignancies",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2022/10108523/1MIgSlSuYw0",
"parentPublication": {
"id": "proceedings/vahc/2022/0103/0",
"title": "2022 Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bracis/2019/4253/0/425300a467",
"title": "Argumentation-Based Agents that Explain Their Decisions",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2019/425300a467/1fHkH71PDFu",
"parentPublication": {
"id": "proceedings/bracis/2019/4253/0",
"title": "2019 8th Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2019/1867/0/08983360",
"title": "Patient Activity Monitoring Based on Real-Time Location Data",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2019/08983360/1hguccNTnfG",
"parentPublication": {
"id": "proceedings/bibm/2019/1867/0",
"title": "2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2020/5382/0/09374365",
"title": "Machine Learning Based Clinical Decision Support and Clinician Trust",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2020/09374365/1rUIXSTum4M",
"parentPublication": {
"id": "proceedings/ichi/2020/5382/0",
"title": "2020 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2021/1817/0/181700a052",
"title": "How to deal with Uncertainty in Machine Learning for Medical Imaging?",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2021/181700a052/1yQB6pOqNNK",
"parentPublication": {
"id": "proceedings/trex/2021/1817/0",
"title": "2021 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09552229",
"articleId": "1xic387kwVy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552906",
"articleId": "1xic46x3fmU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zBb0CGCySQ",
"name": "ttg202201-09555810s1-supp2-3114836.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555810s1-supp2-3114836.pdf",
"extension": "pdf",
"size": "437 kB",
"__typename": "WebExtraType"
},
{
"id": "1zBb0ITwDgk",
"name": "ttg202201-09555810s1-supp1-3114836.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555810s1-supp1-3114836.mp4",
"extension": "mp4",
"size": "14.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNySXEX8",
"title": "May-June",
"year": "2018",
"issueNum": "03",
"idPrefix": "tb",
"pubType": "journal",
"volume": "15",
"label": "May-June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyXKxT5",
"doi": "10.1109/TCBB.2017.2695187",
"abstract": "Phenotypes and diseases are often determined by the complex interactions between genetic factors and environmental factors (EFs). However, compared with protein-coding genes and microRNAs, there is a paucity of computational methods for understanding the associations between long non-coding RNAs (lncRNAs) and EFs. In this study, we focused on the associations between lncRNA and EFs. By using the common miRNA partners of any pair of lncRNA and EF, based on the competing endogenous RNA (ceRNA) hypothesis and the technique of resources transfer within the experimentally-supported lncRNA-miRNA and miRNA-EF association bipartite networks, we propose an algorithm for predicting new lncRNA-EF associations. Results show that, compared with another recently-proposed method, our approach is capable of predicting more credible lncRNA-EF associations. These results support the validity of our approach to predict biologically significant associations, which could lead to a better understanding of the molecular processes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Phenotypes and diseases are often determined by the complex interactions between genetic factors and environmental factors (EFs). However, compared with protein-coding genes and microRNAs, there is a paucity of computational methods for understanding the associations between long non-coding RNAs (lncRNAs) and EFs. In this study, we focused on the associations between lncRNA and EFs. By using the common miRNA partners of any pair of lncRNA and EF, based on the competing endogenous RNA (ceRNA) hypothesis and the technique of resources transfer within the experimentally-supported lncRNA-miRNA and miRNA-EF association bipartite networks, we propose an algorithm for predicting new lncRNA-EF associations. Results show that, compared with another recently-proposed method, our approach is capable of predicting more credible lncRNA-EF associations. These results support the validity of our approach to predict biologically significant associations, which could lead to a better understanding of the molecular processes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Phenotypes and diseases are often determined by the complex interactions between genetic factors and environmental factors (EFs). However, compared with protein-coding genes and microRNAs, there is a paucity of computational methods for understanding the associations between long non-coding RNAs (lncRNAs) and EFs. In this study, we focused on the associations between lncRNA and EFs. By using the common miRNA partners of any pair of lncRNA and EF, based on the competing endogenous RNA (ceRNA) hypothesis and the technique of resources transfer within the experimentally-supported lncRNA-miRNA and miRNA-EF association bipartite networks, we propose an algorithm for predicting new lncRNA-EF associations. Results show that, compared with another recently-proposed method, our approach is capable of predicting more credible lncRNA-EF associations. These results support the validity of our approach to predict biologically significant associations, which could lead to a better understanding of the molecular processes.",
"title": "A Bipartite Network and Resource Transfer-Based Approach to Infer lncRNA-Environmental Factor Associations",
"normalizedTitle": "A Bipartite Network and Resource Transfer-Based Approach to Infer lncRNA-Environmental Factor Associations",
"fno": "07903695",
"hasPdf": true,
"idPrefix": "tb",
"keywords": [
"Bioinformatics",
"Cellular Biophysics",
"Diseases",
"Genetics",
"Genomics",
"Molecular Biophysics",
"Molecular Configurations",
"Proteins",
"RNA",
"Resource Transfer",
"Lnc RNA Environmental Factor Associations",
"Diseases",
"Complex Interactions",
"Genetic Factors",
"Environmental Factors",
"E Fs",
"Protein Coding Genes",
"Noncoding RN As",
"Common Mi RNA Partners",
"Competing Endogenous RNA Hypothesis",
"Resources Transfer",
"Credible Lnc RNA EF Associations",
"Biologically Significant Associations",
"Molecular Processes",
"Mi RNA EF Association Bipartite Networks",
"Diseases",
"Bioinformatics",
"Proteins",
"RNA",
"Genomics",
"Environmental Factors",
"Lnc RNA",
"M RNA",
"Bipartite Network",
"Resource Transfer"
],
"authors": [
{
"givenName": "Jie",
"surname": "Zhou",
"fullName": "Jie Zhou",
"affiliation": "Guangdong Key Laboratory of Computer Network, School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuan-Yuan",
"surname": "Shi",
"fullName": "Yuan-Yuan Shi",
"affiliation": "Guangdong Key Laboratory of Computer Network, School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2018-05-01 00:00:00",
"pubType": "trans",
"pages": "753-759",
"year": "2018",
"issn": "1545-5963",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2017/3050/0/08217630",
"title": "Combining diffusion and HeteSim features for accurate prediction of protein-lncRNA interactions",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217630/12OmNwDSdkc",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2017/04/07447729",
"title": "Inferring MicroRNA-Disease Associations by Random Walk on a Heterogeneous Network with Multiple Data Sources",
"doi": null,
"abstractUrl": "/journal/tb/2017/04/07447729/13rRUxN5eC7",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995182",
"title": "Predicting LncRNA-Disease Associations Based on LncRNA-MiRNA-Disease Multilayer Association Network and Bipartite Network Recommendation",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995182/1JC2rl3s9qw",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2021/03/08807138",
"title": "ILDMSF: Inferring Associations Between Long Non-Coding RNA and Disease Based on Multi-Similarity Fusion",
"doi": null,
"abstractUrl": "/journal/tb/2021/03/08807138/1cG5WCJy2Ry",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2021/04/08897023",
"title": "LDAH2V: Exploring Meta-Paths Across Multiple Networks for lncRNA-Disease Association Prediction",
"doi": null,
"abstractUrl": "/journal/tb/2021/04/08897023/1eTOrcv0Gha",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2021/06/09001212",
"title": "MHRWR: Prediction of lncRNA-Disease Associations Based on Multiple Heterogeneous Networks",
"doi": null,
"abstractUrl": "/journal/tb/2021/06/09001212/1hwsZTKT6us",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313139",
"title": "Prediction of LncRNA-Disease Associations Based on Network Representation Learning",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313139/1qmfMsBX4ek",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2022/04/09395216",
"title": "Graph Convolutional Auto-Encoders for Predicting Novel lncRNA-Disease Associations",
"doi": null,
"abstractUrl": "/journal/tb/2022/04/09395216/1sypWaBmk5q",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2022/06/09541087",
"title": "DHNLDA: A Novel Deep Hierarchical Network Based Method for Predicting lncRNA-Disease Associations",
"doi": null,
"abstractUrl": "/journal/tb/2022/06/09541087/1x3fKVMzj7W",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2023/01/09657207",
"title": "HEGANLDA: A Computational Model for Predicting Potential Lncrna-Disease Associations Based On Multiple Heterogeneous Networks",
"doi": null,
"abstractUrl": "/journal/tb/2023/01/09657207/1zw1cXVOn6g",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07737072",
"articleId": "13rRUwInvxe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07845626",
"articleId": "13rRUxC0SuH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1vQz59P5p84",
"title": "July-Aug.",
"year": "2021",
"issueNum": "04",
"idPrefix": "tb",
"pubType": "journal",
"volume": "18",
"label": "July-Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1dM288eZYJO",
"doi": "10.1109/TCBB.2019.2944825",
"abstract": "Sparse canonical correlation analysis (SCCA) is a bi-multivariate technique used in imaging genetics to identify complex multi-SNP-multi-QT associations. However, the traditional SCCA algorithm has been designed to seek a linear correlation between the SNP genotype and brain imaging phenotype, ignoring the discriminant similarity information between within-class subjects in brain imaging genetics association analysis. In addition, multi-modality brain imaging phenotypes are extracted from different perspectives and imaging markers from the same region consistently showing up in multimodalities may provide more insights for the mechanistic understanding of diseases. In this paper, a novel multi-modality discriminant SCCA algorithm (MD-SCCA) is proposed to overcome these limitations as well as to improve learning results by incorporating valuable discriminant similarity information into the SCCA algorithm. Specifically, we first extract the discriminant similarity information between within-class subjects by the sparse representation. Second, the discriminant similarity information is enforced within SCCA to construct a discriminant SCCA algorithm (D-SCCA). At last, the MD-SCCA algorithm is adopted to fully explore the relationships among different modalities of different subjects. In experiments, both synthetic dataset and real data from the Alzheimer's Disease Neuroimaging Initiative database are used to test the performance of our algorithm. The empirical results have demonstrated that the proposed algorithm not only produces improved cross-validation performances but also identifies consistent cross-modality imaging genetic biomarkers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Sparse canonical correlation analysis (SCCA) is a bi-multivariate technique used in imaging genetics to identify complex multi-SNP-multi-QT associations. However, the traditional SCCA algorithm has been designed to seek a linear correlation between the SNP genotype and brain imaging phenotype, ignoring the discriminant similarity information between within-class subjects in brain imaging genetics association analysis. In addition, multi-modality brain imaging phenotypes are extracted from different perspectives and imaging markers from the same region consistently showing up in multimodalities may provide more insights for the mechanistic understanding of diseases. In this paper, a novel multi-modality discriminant SCCA algorithm (MD-SCCA) is proposed to overcome these limitations as well as to improve learning results by incorporating valuable discriminant similarity information into the SCCA algorithm. Specifically, we first extract the discriminant similarity information between within-class subjects by the sparse representation. Second, the discriminant similarity information is enforced within SCCA to construct a discriminant SCCA algorithm (D-SCCA). At last, the MD-SCCA algorithm is adopted to fully explore the relationships among different modalities of different subjects. In experiments, both synthetic dataset and real data from the Alzheimer's Disease Neuroimaging Initiative database are used to test the performance of our algorithm. The empirical results have demonstrated that the proposed algorithm not only produces improved cross-validation performances but also identifies consistent cross-modality imaging genetic biomarkers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Sparse canonical correlation analysis (SCCA) is a bi-multivariate technique used in imaging genetics to identify complex multi-SNP-multi-QT associations. However, the traditional SCCA algorithm has been designed to seek a linear correlation between the SNP genotype and brain imaging phenotype, ignoring the discriminant similarity information between within-class subjects in brain imaging genetics association analysis. In addition, multi-modality brain imaging phenotypes are extracted from different perspectives and imaging markers from the same region consistently showing up in multimodalities may provide more insights for the mechanistic understanding of diseases. In this paper, a novel multi-modality discriminant SCCA algorithm (MD-SCCA) is proposed to overcome these limitations as well as to improve learning results by incorporating valuable discriminant similarity information into the SCCA algorithm. Specifically, we first extract the discriminant similarity information between within-class subjects by the sparse representation. Second, the discriminant similarity information is enforced within SCCA to construct a discriminant SCCA algorithm (D-SCCA). At last, the MD-SCCA algorithm is adopted to fully explore the relationships among different modalities of different subjects. In experiments, both synthetic dataset and real data from the Alzheimer's Disease Neuroimaging Initiative database are used to test the performance of our algorithm. The empirical results have demonstrated that the proposed algorithm not only produces improved cross-validation performances but also identifies consistent cross-modality imaging genetic biomarkers.",
"title": "Identify Consistent Cross-Modality Imaging Genetic Patterns via Discriminant Sparse Canonical Correlation Analysis",
"normalizedTitle": "Identify Consistent Cross-Modality Imaging Genetic Patterns via Discriminant Sparse Canonical Correlation Analysis",
"fno": "08854121",
"hasPdf": true,
"idPrefix": "tb",
"keywords": [
"Biomedical MRI",
"Brain",
"Correlation Methods",
"Diseases",
"Genetics",
"Neurophysiology",
"Consistent Cross Modality Imaging Genetic Patterns",
"Complex Multi SNP Multi QT Associations",
"Linear Correlation",
"Within Class Subjects",
"Brain Imaging Genetics Association Analysis",
"Multimodality Brain Imaging Phenotypes",
"Imaging Markers",
"Sparse Representation",
"D SCCA",
"MD SCCA Algorithm",
"Genetic Biomarkers",
"Discriminant Sparse Canonical Correlation Analysis",
"Bimultivariate Technique",
"Multi SNP Multi QT Associations",
"SNP Genotype",
"Discriminant Similarity Information",
"Multimodality Discriminant SCCA Algorithm",
"Alzheimers Disease Neuroimaging Initiative Database",
"Imaging",
"Correlation",
"Genetics",
"Brain",
"Diseases",
"Data Mining",
"Sparse Matrices",
"Imaging Genetics",
"Sparse Canonical Correlation Analysis",
"Multi Modality",
"Multi SNP",
"Alzheimers Disease"
],
"authors": [
{
"givenName": "Meiling",
"surname": "Wang",
"fullName": "Meiling Wang",
"affiliation": "MIIT Key Laboratory of Pattern Analysis and Machine Intelligence, College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Shao",
"fullName": "Wei Shao",
"affiliation": "MIIT Key Laboratory of Pattern Analysis and Machine Intelligence, College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoke",
"surname": "Hao",
"fullName": "Xiaoke Hao",
"affiliation": "School of Artificial Intelligence, Hebei University of Technology, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Li",
"surname": "Shen",
"fullName": "Li Shen",
"affiliation": "Department of Biostatistics, Epidemiology and Informatics, Perelman School of Medicine, University of Pennsylvania, Philadelphia, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daoqiang",
"surname": "Zhang",
"fullName": "Daoqiang Zhang",
"affiliation": "MIIT Key Laboratory of Pattern Analysis and Machine Intelligence, College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2021-07-01 00:00:00",
"pubType": "trans",
"pages": "1549-1561",
"year": "2021",
"issn": "1545-5963",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2016/1611/0/07822605",
"title": "Sparse Canonical Correlation Analysis via truncated ℓ1-norm with application to brain imaging genetics",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2016/07822605/12OmNAYoKko",
"parentPublication": {
"id": "proceedings/bibm/2016/1611/0",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cia/2015/7771/0/07400866",
"title": "Brain Tumor Segmentation in Multi-modality MRIs Using Multiple Classifier System and Spatial Constraint",
"doi": null,
"abstractUrl": "/proceedings-article/cia/2015/07400866/12OmNwDAC7A",
"parentPublication": {
"id": "proceedings/cia/2015/7771/0",
"title": "2015 3rd International Conference on Computer, Information and Application (CIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669899",
"title": "Improved Multi-task SCCA for Brain Imaging Genetics via Joint Consideration of the Diagnosis, Parameter Decomposition and Network Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669899/1A9Vftihh28",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995342",
"title": "Preference Matrix Guided Sparse Canonical Correlation Analysis for Genetic Study of Quantitative Traits in Alzheimer’s Disease",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995342/1JC28NqSXCg",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995261",
"title": "Finding the Most Transferable Tasks for Brain Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995261/1JC3fl4ZEXu",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049688",
"title": "Accurate Registration of Cross-Modality Geometry via Consistent Clustering",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049688/1KYoraK6mLm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2021/01/08869839",
"title": "Multi-Task Sparse Canonical Correlation Analysis with Application to Multi-Modal Brain Imaging Genetics",
"doi": null,
"abstractUrl": "/journal/tb/2021/01/08869839/1e9gXzjOFKU",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0610",
"title": "DUAL-GLOW: Conditional Flow-Based Generative Model for Modality Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0610/1hQqkzaPpJe",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313503",
"title": "Mining High-order Multimodal Brain Image Associations via Sparse Tensor Canonical Correlation Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313503/1qmfYHkjL20",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313246",
"title": "The group sparse canonical correlation analysis method in the imaging genetics research",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313246/1qmg2MFjlC0",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08880514",
"articleId": "1emy1qgNGk8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08892647",
"articleId": "1eJQXBatzaM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCaLEju",
"title": "Jan.",
"year": "2018",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxD9gXN",
"doi": "10.1109/TVCG.2017.2743859",
"abstract": "We extend the popular brushing and linking technique by incorporating personal agency in the interaction. We map existing research related to brushing and linking into a design space that deconstructs the interaction technique into three components: source (what is being brushed), link (the expression of relationship between source and target), and target (what is revealed as related to the source). Using this design space, we created MyBrush, a unified interface that offers personal agency over brushing and linking by giving people the flexibility to configure the source, link, and target of multiple brushes. The results of three focus groups demonstrate that people with different backgrounds leveraged personal agency in different ways, including performing complex tasks and showing links explicitly. We reflect on these results, paving the way for future research on the role of personal agency in information visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We extend the popular brushing and linking technique by incorporating personal agency in the interaction. We map existing research related to brushing and linking into a design space that deconstructs the interaction technique into three components: source (what is being brushed), link (the expression of relationship between source and target), and target (what is revealed as related to the source). Using this design space, we created MyBrush, a unified interface that offers personal agency over brushing and linking by giving people the flexibility to configure the source, link, and target of multiple brushes. The results of three focus groups demonstrate that people with different backgrounds leveraged personal agency in different ways, including performing complex tasks and showing links explicitly. We reflect on these results, paving the way for future research on the role of personal agency in information visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We extend the popular brushing and linking technique by incorporating personal agency in the interaction. We map existing research related to brushing and linking into a design space that deconstructs the interaction technique into three components: source (what is being brushed), link (the expression of relationship between source and target), and target (what is revealed as related to the source). Using this design space, we created MyBrush, a unified interface that offers personal agency over brushing and linking by giving people the flexibility to configure the source, link, and target of multiple brushes. The results of three focus groups demonstrate that people with different backgrounds leveraged personal agency in different ways, including performing complex tasks and showing links explicitly. We reflect on these results, paving the way for future research on the role of personal agency in information visualization.",
"title": "MyBrush: Brushing and Linking with Personal Agency",
"normalizedTitle": "MyBrush: Brushing and Linking with Personal Agency",
"fno": "08017621",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Joining Processes",
"Visualization",
"Data Visualization",
"Brushes",
"Image Color Analysis",
"Shape",
"Complexity Theory",
"Brushing",
"Linking",
"Personal Agency",
"Coordinated Multiple Views",
"Interaction",
"Design Space",
"Information Visualization"
],
"authors": [
{
"givenName": "Philipp",
"surname": "Koytek",
"fullName": "Philipp Koytek",
"affiliation": "University of CalgaryAugsburg University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Charles",
"surname": "Perin",
"fullName": "Charles Perin",
"affiliation": "City, University of LondonUniversity of Calgary",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jo",
"surname": "Vermeulen",
"fullName": "Jo Vermeulen",
"affiliation": "University of Calgary",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elisabeth",
"surname": "André",
"fullName": "Elisabeth André",
"affiliation": "Augsburg University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheelagh",
"surname": "Carpendale",
"fullName": "Sheelagh Carpendale",
"affiliation": "University of Calgary",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2018-01-01 00:00:00",
"pubType": "trans",
"pages": "605-615",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bigmm/2017/6549/0/07966709",
"title": "Large-Scale Endoscopic Image and Video Linking with Gradient-Based Signatures",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2017/07966709/12OmNAHEpBD",
"parentPublication": {
"id": "proceedings/bigmm/2017/6549/0",
"title": "2017 IEEE Third International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2011/935/0/05742380",
"title": "Collaborative information linking: Bridging knowledge gaps between users by linking across applications",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2011/05742380/12OmNAXglSL",
"parentPublication": {
"id": "proceedings/pacificvis/2011/935/0",
"title": "2011 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391e615",
"title": "Semantic Video Entity Linking Based on Visual Content and Metadata",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391e615/12OmNqG0SQI",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2014/5215/0/07013201",
"title": "Data-parallel halo finding with variable linking lengths",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2014/07013201/12OmNqzu6NX",
"parentPublication": {
"id": "proceedings/ldav/2014/5215/0",
"title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/infvis/2003/8154/0/01249024",
"title": "Compound brushing [dynamic data visualization]",
"doi": null,
"abstractUrl": "/proceedings-article/infvis/2003/01249024/12OmNvAiSFw",
"parentPublication": {
"id": "proceedings/infvis/2003/8154/0",
"title": "IEEE Symposium on Information Visualization 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2013/5062/0/06657131",
"title": "Semantic Linking and Contextualization for Social Forensic Text Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2013/06657131/12OmNvrMUh8",
"parentPublication": {
"id": "proceedings/eisic/2013/5062/0",
"title": "2013 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2003/2055/0/01249024",
"title": "Compound brushing [dynamic data visualization]",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2003/01249024/12OmNyOHG3V",
"parentPublication": {
"id": "proceedings/ieee-infovis/2003/2055/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2015/02/06823700",
"title": "Entity Linking with a Knowledge Base: Issues, Techniques, and Solutions",
"doi": null,
"abstractUrl": "/journal/tk/2015/02/06823700/13rRUwbs2bs",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122189",
"title": "Supporting Awareness through Collaborative Brushing and Linking of Tabular Data",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122189/13rRUwdrdSz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08302598",
"title": "Smart Brushing for Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08302598/17D45WaTkk4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08017584",
"articleId": "13rRUyueghe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08017644",
"articleId": "13rRUNvgz4o",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRG1",
"name": "ttg201801-08017621s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201801-08017621s1.zip",
"extension": "zip",
"size": "62.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwGqBqg",
"title": "November/December",
"year": "2009",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "15",
"label": "November/December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxNW1TQ",
"doi": "10.1109/TVCG.2009.179",
"abstract": "In this paper, we present a novel parallel coordinates design integrated with points (Scattering Points in Parallel Coordinates, SPPC), by taking advantage of both parallel coordinates and scatterplots. Different from most multiple views visualization frameworks involving parallel coordinates where each visualization type occupies an individual window, we convert two selected neighboring coordinate axes into a scatterplot directly. Multidimensional scaling is adopted to allow converting multiple axes into a single subplot. The transition between two visual types is designed in a seamless way. In our work, a series of interaction tools has been developed. Uniform brushing functionality is implemented to allow the user to perform data selection on both points and parallel coordinate polylines without explicitly switching tools. A GPU accelerated Dimensional Incremental Multidimensional Scaling (DIMDS) has been developed to significantly improve the system performance. Our case study shows that our scheme is more efficient than traditional multi-view methods in performing visual analysis tasks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a novel parallel coordinates design integrated with points (Scattering Points in Parallel Coordinates, SPPC), by taking advantage of both parallel coordinates and scatterplots. Different from most multiple views visualization frameworks involving parallel coordinates where each visualization type occupies an individual window, we convert two selected neighboring coordinate axes into a scatterplot directly. Multidimensional scaling is adopted to allow converting multiple axes into a single subplot. The transition between two visual types is designed in a seamless way. In our work, a series of interaction tools has been developed. Uniform brushing functionality is implemented to allow the user to perform data selection on both points and parallel coordinate polylines without explicitly switching tools. A GPU accelerated Dimensional Incremental Multidimensional Scaling (DIMDS) has been developed to significantly improve the system performance. Our case study shows that our scheme is more efficient than traditional multi-view methods in performing visual analysis tasks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a novel parallel coordinates design integrated with points (Scattering Points in Parallel Coordinates, SPPC), by taking advantage of both parallel coordinates and scatterplots. Different from most multiple views visualization frameworks involving parallel coordinates where each visualization type occupies an individual window, we convert two selected neighboring coordinate axes into a scatterplot directly. Multidimensional scaling is adopted to allow converting multiple axes into a single subplot. The transition between two visual types is designed in a seamless way. In our work, a series of interaction tools has been developed. Uniform brushing functionality is implemented to allow the user to perform data selection on both points and parallel coordinate polylines without explicitly switching tools. A GPU accelerated Dimensional Incremental Multidimensional Scaling (DIMDS) has been developed to significantly improve the system performance. Our case study shows that our scheme is more efficient than traditional multi-view methods in performing visual analysis tasks.",
"title": "Scattering Points in Parallel Coordinates",
"normalizedTitle": "Scattering Points in Parallel Coordinates",
"fno": "ttg2009061001",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Parallel Coordinates",
"Scatterplots",
"Information Visualization",
"Multidimensional Scaling"
],
"authors": [
{
"givenName": "Xiaoru",
"surname": "Yuan",
"fullName": "Xiaoru Yuan",
"affiliation": "Key Laboratory of Machine Perception & Peking University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peihong",
"surname": "Guo",
"fullName": "Peihong Guo",
"affiliation": "Key Laboratory of Machine Perception & Peking University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "He",
"surname": "Xiao",
"fullName": "He Xiao",
"affiliation": "Key Laboratory of Machine Perception & Peking University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hong",
"surname": "Zhou",
"fullName": "Hong Zhou",
"affiliation": "Hong Kong University of Science and Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2009-11-01 00:00:00",
"pubType": "trans",
"pages": "1001-1008",
"year": "2009",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/waim/2008/3185/0/3185a165",
"title": "Advanced Star Coordinates",
"doi": null,
"abstractUrl": "/proceedings-article/waim/2008/3185a165/12OmNAYoKoO",
"parentPublication": {
"id": "proceedings/waim/2008/3185/0",
"title": "Web-Age Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2009/3733/0/3733a025",
"title": "Many-to-Many Relational Parallel Coordinates Displays",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2009/3733a025/12OmNB06l2c",
"parentPublication": {
"id": "proceedings/iv/2009/3733/0",
"title": "2009 13th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2012/4789/0/4789a179",
"title": "The Polar Parallel Coordinates Method for Time-Series Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2012/4789a179/12OmNC8MsuO",
"parentPublication": {
"id": "proceedings/iccis/2012/4789/0",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2013/4952/0/4952b088",
"title": "Network Data Visualization Using Parallel Coordinates Version of Time-tunnel with 2Dto2D Visualization for Intrusion Detection",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2013/4952b088/12OmNCd2ryb",
"parentPublication": {
"id": "proceedings/waina/2013/4952/0",
"title": "2013 27th International Conference on Advanced Information Networking and Applications Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdcat/2011/4564/0/4564a156",
"title": "The Multidimensional Scaling and Barycentric Coordinates Based Distributed Localization in Wireless Sensor Networks",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2011/4564a156/12OmNqESuen",
"parentPublication": {
"id": "proceedings/pdcat/2011/4564/0",
"title": "Parallel and Distributed Computing Applications and Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2017/4822/0/482201a419",
"title": "Segmental Offset-Mapping Parallel Coordinates for Multidimensional Integer Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2017/482201a419/12OmNyrqzG9",
"parentPublication": {
"id": "proceedings/cis/2017/4822/0",
"title": "2017 13th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2005/2790/0/27900020",
"title": "An Interactive 3D Integration of Parallel Coordinates and Star Glyphs",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2005/27900020/12OmNzkMlUx",
"parentPublication": {
"id": "proceedings/ieee-infovis/2005/2790/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061017",
"title": "Pargnostics: Screen-Space Metrics for Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061017/13rRUxYINf6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061531",
"title": "Continuous Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061531/13rRUxZRbnX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011121912",
"title": "Features in Continuous Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011121912/13rRUyYBlgy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2009060993",
"articleId": "13rRUEgs2tm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2009061009",
"articleId": "13rRUwfZC0b",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRN2",
"name": "ttg2009061001s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2009061001s.mp4",
"extension": "mp4",
"size": "43.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNxI0KAU",
"title": "June",
"year": "2018",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly9e1",
"doi": "10.1109/TVCG.2017.2698041",
"abstract": "We address the problem of visualizing multivariate correlations in parallel coordinates. We focus on multivariate correlation in the form of linear relationships between multiple variables. Traditional parallel coordinates are well prepared to show negative correlations between two attributes by distinct visual patterns. However, it is difficult to recognize positive correlations in parallel coordinates. Furthermore, there is no support to highlight multivariate correlations in parallel coordinates. In this paper, we exploit the indexed point representation of p -flats (planes in multidimensional data) to visualize local multivariate correlations in parallel coordinates. Our method yields clear visual signatures for negative and positive correlations alike, and it supports large datasets. All information is shown in a unified parallel coordinates framework, which leads to easy and familiar user interactions for analysts who have experience with traditional parallel coordinates. The usefulness of our method is demonstrated through examples of typical multidimensional datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We address the problem of visualizing multivariate correlations in parallel coordinates. We focus on multivariate correlation in the form of linear relationships between multiple variables. Traditional parallel coordinates are well prepared to show negative correlations between two attributes by distinct visual patterns. However, it is difficult to recognize positive correlations in parallel coordinates. Furthermore, there is no support to highlight multivariate correlations in parallel coordinates. In this paper, we exploit the indexed point representation of p -flats (planes in multidimensional data) to visualize local multivariate correlations in parallel coordinates. Our method yields clear visual signatures for negative and positive correlations alike, and it supports large datasets. All information is shown in a unified parallel coordinates framework, which leads to easy and familiar user interactions for analysts who have experience with traditional parallel coordinates. The usefulness of our method is demonstrated through examples of typical multidimensional datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We address the problem of visualizing multivariate correlations in parallel coordinates. We focus on multivariate correlation in the form of linear relationships between multiple variables. Traditional parallel coordinates are well prepared to show negative correlations between two attributes by distinct visual patterns. However, it is difficult to recognize positive correlations in parallel coordinates. Furthermore, there is no support to highlight multivariate correlations in parallel coordinates. In this paper, we exploit the indexed point representation of p -flats (planes in multidimensional data) to visualize local multivariate correlations in parallel coordinates. Our method yields clear visual signatures for negative and positive correlations alike, and it supports large datasets. All information is shown in a unified parallel coordinates framework, which leads to easy and familiar user interactions for analysts who have experience with traditional parallel coordinates. The usefulness of our method is demonstrated through examples of typical multidimensional datasets.",
"title": "Indexed-Points Parallel Coordinates Visualization of Multivariate Correlations",
"normalizedTitle": "Indexed-Points Parallel Coordinates Visualization of Multivariate Correlations",
"fno": "07911335",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Correlation",
"Visualization",
"Two Dimensional Displays",
"Geometry",
"Brushes",
"Shape",
"Multidimensional Data Visualization",
"Multivariate Correlations",
"Parallel Coordinates"
],
"authors": [
{
"givenName": "Liang",
"surname": "Zhou",
"fullName": "Liang Zhou",
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Weiskopf",
"fullName": "Daniel Weiskopf",
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2018-06-01 00:00:00",
"pubType": "trans",
"pages": "1997-2010",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/waim/2008/3185/0/3185a165",
"title": "Advanced Star Coordinates",
"doi": null,
"abstractUrl": "/proceedings-article/waim/2008/3185a165/12OmNAYoKoO",
"parentPublication": {
"id": "proceedings/waim/2008/3185/0",
"title": "Web-Age Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/09/06171180",
"title": "Scalable Multivariate Volume Visualization and Analysis Based on Dimension Projection and Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2012/09/06171180/13rRUwwJWFL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/07930525",
"title": "The LloydRelaxer: An Approach to Minimize Scaling Effects for Multivariate Projections",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/07930525/13rRUxBrGh8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061001",
"title": "Scattering Points in Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061001/13rRUxNW1TQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061531",
"title": "Continuous Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061531/13rRUxZRbnX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440845",
"title": "Shape-preserving Star Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440845/17D45WYQJ9Z",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08302598",
"title": "Smart Brushing for Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08302598/17D45WaTkk4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vissoft/2018/8292/0/829200a012",
"title": "Detecting Bad Smells in Software Systems with Linked Multivariate Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/vissoft/2018/829200a012/17D45WrVg8H",
"parentPublication": {
"id": "proceedings/vissoft/2018/8292/0",
"title": "2018 IEEE Working Conference on Software Visualization (VISSOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a145",
"title": "A Study on 2D and 3D Parallel Coordinates for Pattern Identification in Temporal Multivariate Data",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a145/1cMFbhiN7xK",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933632",
"title": "Conditional Parallel Coordinates",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933632/1fTgJgZx0go",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07927461",
"articleId": "13rRUxly9e2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07930426",
"articleId": "13rRUwbs2b7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesP6",
"name": "ttg201806-07911335s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201806-07911335s1.zip",
"extension": "zip",
"size": "8.45 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvAiSlQ",
"title": "July-Aug.",
"year": "2019",
"issueNum": "04",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "39",
"label": "July-Aug.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1aXM9T7Z0xq",
"doi": "10.1109/MCG.2018.2881502",
"abstract": "Brushing is at the heart of most modern visual analytics solutions and effective and efficient brushing is crucial for successful interactive data exploration and analysis. As the user plays a central role in brushing, several data-driven brushing tools have been designed that are based on predicting the user's brushing goal. All of these general brushing models learn the users' average brushing preference, which is not optimal for every single user. In this paper, we propose an innovative framework that offers the user opportunities to improve the brushing technique while using it. We realized this framework with a CNN-based brushing technique and the result shows that with additional data from a particular user, the model can be refined (better performance in terms of accuracy), eventually converging to a personalized model based on a moderate amount of retraining.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Brushing is at the heart of most modern visual analytics solutions and effective and efficient brushing is crucial for successful interactive data exploration and analysis. As the user plays a central role in brushing, several data-driven brushing tools have been designed that are based on predicting the user's brushing goal. All of these general brushing models learn the users' average brushing preference, which is not optimal for every single user. In this paper, we propose an innovative framework that offers the user opportunities to improve the brushing technique while using it. We realized this framework with a CNN-based brushing technique and the result shows that with additional data from a particular user, the model can be refined (better performance in terms of accuracy), eventually converging to a personalized model based on a moderate amount of retraining.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Brushing is at the heart of most modern visual analytics solutions and effective and efficient brushing is crucial for successful interactive data exploration and analysis. As the user plays a central role in brushing, several data-driven brushing tools have been designed that are based on predicting the user's brushing goal. All of these general brushing models learn the users' average brushing preference, which is not optimal for every single user. In this paper, we propose an innovative framework that offers the user opportunities to improve the brushing technique while using it. We realized this framework with a CNN-based brushing technique and the result shows that with additional data from a particular user, the model can be refined (better performance in terms of accuracy), eventually converging to a personalized model based on a moderate amount of retraining.",
"title": "Personalized Sketch-Based Brushing in Scatterplots",
"normalizedTitle": "Personalized Sketch-Based Brushing in Scatterplots",
"fno": "08739141",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Computer Graphics",
"Data Visualisation",
"Learning Artificial Intelligence",
"Personalized Sketch Based Brushing",
"Data Driven Brushing Tools",
"Visual Analytics",
"Interactive Data Exploration",
"CNN Based Brushing Technique",
"Scatterplots",
"Data Models",
"Visual Analytics",
"Adaptation Models",
"Data Visualization",
"Analytical Models",
"Deep Learning"
],
"authors": [
{
"givenName": "Chaoran",
"surname": "Fan",
"fullName": "Chaoran Fan",
"affiliation": "University of Bergen",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Helwig",
"surname": "Hauser",
"fullName": "Helwig Hauser",
"affiliation": "University of Bergen",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2019-07-01 00:00:00",
"pubType": "mags",
"pages": "28-39",
"year": "2019",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-infovis/2003/2055/0/20550023",
"title": "Compound Brushing",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2003/20550023/12OmNBInLjT",
"parentPublication": {
"id": "proceedings/ieee-infovis/2003/2055/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870271",
"title": "High Dimensional Brushing for Interactive Exploration of Multivariate Data",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870271/12OmNBdJ5iK",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2014/6227/0/07042500",
"title": "A multidimensional brush for scatterplot data analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042500/12OmNxR5UJf",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122591",
"title": "Brushing Dimensions—A Dual Visual Analysis Model for High-Dimensional Data",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122591/13rRUwInvf4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011121882",
"title": "Interactive, Graph-based Visual Analysis of High-dimensional, Multi-parameter Fluorescence Microscopy Data in Toponomics",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011121882/13rRUytWF9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08302598",
"title": "Smart Brushing for Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08302598/17D45WaTkk4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a085",
"title": "Let's Get Personal: Exploring the Design of Personalized Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a085/1J6hbZrS4dG",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2018/6861/0/08802508",
"title": "Contourmap: Contour Based Visualization Of Water Chemical Data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2018/08802508/1cJ6Y0POhO0",
"parentPublication": {
"id": "proceedings/vast/2018/6861/0",
"title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2010/9488/0/05652644",
"title": "VASTvis — Visual analytics with multiple coordinated views",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2010/05652644/1eof30OMMi4",
"parentPublication": {
"id": "proceedings/vast/2010/9488/0",
"title": "2010 IEEE Symposium on Visual Analytics Science and Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/05/09490374",
"title": "On Sketch-Based Selections From Scatterplots Using KDE, Compared to Mahalanobis and CNN Brushing",
"doi": null,
"abstractUrl": "/magazine/cg/2021/05/09490374/1vmGVdpdhAY",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08739140",
"articleId": "1aXM6LBwGmk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08739137",
"articleId": "1aXM6mNkouI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzSQdj4",
"title": "Apr.-June",
"year": "2016",
"issueNum": "02",
"idPrefix": "mu",
"pubType": "magazine",
"volume": "23",
"label": "Apr.-June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyekJ2X",
"doi": "10.1109/MMUL.2015.52",
"abstract": "The authors address the problem of depth map upsampling using a corresponding high-resolution color image. The depth map is captured by low-resolution time-of-flight cameras paired with a high-resolution RGB camera. Inspired by guided image filtering, the proposed method not only uses the structure of the high-resolution color image as guidance, it also exploits local gradient information of depth images to suppress potential texture-copying artifacts. In addition, the authors introduce onion-peel-order filtering that predicts depth values from outside inward in a concentric-layer order, which avoids depth bleeding during the propagation process. Quantitative and qualitative experimental results demonstrate the effectiveness and robustness of this approach over prior depth map upsampling methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors address the problem of depth map upsampling using a corresponding high-resolution color image. The depth map is captured by low-resolution time-of-flight cameras paired with a high-resolution RGB camera. Inspired by guided image filtering, the proposed method not only uses the structure of the high-resolution color image as guidance, it also exploits local gradient information of depth images to suppress potential texture-copying artifacts. In addition, the authors introduce onion-peel-order filtering that predicts depth values from outside inward in a concentric-layer order, which avoids depth bleeding during the propagation process. Quantitative and qualitative experimental results demonstrate the effectiveness and robustness of this approach over prior depth map upsampling methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors address the problem of depth map upsampling using a corresponding high-resolution color image. The depth map is captured by low-resolution time-of-flight cameras paired with a high-resolution RGB camera. Inspired by guided image filtering, the proposed method not only uses the structure of the high-resolution color image as guidance, it also exploits local gradient information of depth images to suppress potential texture-copying artifacts. In addition, the authors introduce onion-peel-order filtering that predicts depth values from outside inward in a concentric-layer order, which avoids depth bleeding during the propagation process. Quantitative and qualitative experimental results demonstrate the effectiveness and robustness of this approach over prior depth map upsampling methods.",
"title": "Extended Guided Filtering for Depth Map Upsampling",
"normalizedTitle": "Extended Guided Filtering for Depth Map Upsampling",
"fno": "mmu2016020072",
"hasPdf": true,
"idPrefix": "mu",
"keywords": [
"Image Edge Detection",
"Color",
"Cameras",
"Kernel",
"Reliability",
"Image Color Analysis",
"Venus",
"Visualization",
"Depth Map Upsampling",
"Guided Filtering",
"To F Camera",
"Extended Guided Filtering",
"Graphics",
"Depth Sensing",
"Data Analysis"
],
"authors": [
{
"givenName": "Kai-Lung",
"surname": "Hua",
"fullName": "Kai-Lung Hua",
"affiliation": "National Taiwan University of Science and Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kai-Han",
"surname": "Lo",
"fullName": "Kai-Han Lo",
"affiliation": "National Taiwan University of Science and Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu-Chiang Frank",
"surname": "Frank Wang",
"fullName": "Yu-Chiang Frank Frank Wang",
"affiliation": "Academia Sinica",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2016-04-01 00:00:00",
"pubType": "mags",
"pages": "72-83",
"year": "2016",
"issn": "1070-986X",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2017/6067/0/08019366",
"title": "Minimum spanning forest with embedded edge inconsistency measurement for color-guided depth map upsampling",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019366/12OmNAlNiII",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a738",
"title": "Guided Depth Upsampling via a Cosparse Analysis Model",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a738/12OmNAlNiQF",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a133",
"title": "Depth Map Super-Resolution for Cost-Effective RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a133/12OmNApculG",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209e394",
"title": "A Nonlocal Filter-Based Hybrid Strategy for Depth Map Enhancement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209e394/12OmNBKW9FR",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2017/2937/0/2937a001",
"title": "Boundary-Preserving Depth Upsampling Without Texture Copying Artifacts and Holes",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a001/12OmNCbCrSX",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a152",
"title": "Joint Example-Based Depth Map Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a152/12OmNqBbHAj",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2011/1101/0/06126423",
"title": "High quality depth map upsampling for 3D-TOF cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2011/06126423/12OmNqI04LP",
"parentPublication": {
"id": "proceedings/iccv/2011/1101/0",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890185",
"title": "A Stereo-Vision-Assisted model for depth map super-resolution",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890185/12OmNs59JHy",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460991",
"title": "Depth image up-sampling using ant colony optimization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460991/12OmNzxPTNt",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f687",
"title": "Discrete Cosine Transform Network for Guided Depth Map Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f687/1H1il6YC2J2",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mmu2016020064",
"articleId": "13rRUwkfAW8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mmu2016020084",
"articleId": "13rRUwhHcNq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNrFBPWH",
"title": "July-Sept.",
"year": "2018",
"issueNum": "03",
"idPrefix": "tn",
"pubType": "journal",
"volume": "5",
"label": "July-Sept.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIJuxqo",
"doi": "10.1109/TNSE.2017.2753963",
"abstract": "We extend the concept of graph isomorphisms to multilayer networks with any number of “aspects” (i.e., types of layering). In developing this generalization, we identify multiple types of isomorphisms. For example, in multilayer networks with a single aspect, permuting vertex labels, layer labels, and both vertex labels and layer labels each yield different isomorphism relations between multilayer networks. Multilayer network isomorphisms lead naturally to defining isomorphisms in any of the numerous types of networks that can be represented as a multilayer network, and we thereby obtain isomorphisms for multiplex networks, temporal networks, networks with both of these features, and more. We reduce each of the multilayer network isomorphism problems to a graph isomorphism problem, where the size of the graph isomorphism problem grows linearly with the size of the multilayer network isomorphism problem. One can thus use software that has been developed to solve graph isomorphism problems as a practical means for solving multilayer network isomorphism problems. Our theory lays a foundation for extending many network analysis methods—including motifs, graphlets, structural roles, and network alignment—to any multilayer network.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We extend the concept of graph isomorphisms to multilayer networks with any number of “aspects” (i.e., types of layering). In developing this generalization, we identify multiple types of isomorphisms. For example, in multilayer networks with a single aspect, permuting vertex labels, layer labels, and both vertex labels and layer labels each yield different isomorphism relations between multilayer networks. Multilayer network isomorphisms lead naturally to defining isomorphisms in any of the numerous types of networks that can be represented as a multilayer network, and we thereby obtain isomorphisms for multiplex networks, temporal networks, networks with both of these features, and more. We reduce each of the multilayer network isomorphism problems to a graph isomorphism problem, where the size of the graph isomorphism problem grows linearly with the size of the multilayer network isomorphism problem. One can thus use software that has been developed to solve graph isomorphism problems as a practical means for solving multilayer network isomorphism problems. Our theory lays a foundation for extending many network analysis methods—including motifs, graphlets, structural roles, and network alignment—to any multilayer network.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We extend the concept of graph isomorphisms to multilayer networks with any number of “aspects” (i.e., types of layering). In developing this generalization, we identify multiple types of isomorphisms. For example, in multilayer networks with a single aspect, permuting vertex labels, layer labels, and both vertex labels and layer labels each yield different isomorphism relations between multilayer networks. Multilayer network isomorphisms lead naturally to defining isomorphisms in any of the numerous types of networks that can be represented as a multilayer network, and we thereby obtain isomorphisms for multiplex networks, temporal networks, networks with both of these features, and more. We reduce each of the multilayer network isomorphism problems to a graph isomorphism problem, where the size of the graph isomorphism problem grows linearly with the size of the multilayer network isomorphism problem. One can thus use software that has been developed to solve graph isomorphism problems as a practical means for solving multilayer network isomorphism problems. Our theory lays a foundation for extending many network analysis methods—including motifs, graphlets, structural roles, and network alignment—to any multilayer network.",
"title": "Isomorphisms in Multilayer Networks",
"normalizedTitle": "Isomorphisms in Multilayer Networks",
"fno": "08039503",
"hasPdf": true,
"idPrefix": "tn",
"keywords": [
"Nonhomogeneous Media",
"Tools",
"Multiplexing",
"Mathematics",
"Software",
"Terminology",
"Computer Science",
"Complex Networks"
],
"authors": [
{
"givenName": "Mikko",
"surname": "Kivelä",
"fullName": "Mikko Kivelä",
"affiliation": "Department of Computer Science, Aalto University, Espoo, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mason A.",
"surname": "Porter",
"fullName": "Mason A. Porter",
"affiliation": "Department of Mathematics, University of California, Los Angeles, CA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2018-07-01 00:00:00",
"pubType": "trans",
"pages": "198-211",
"year": "2018",
"issn": "2327-4697",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/asonam/2016/2846/0/07752422",
"title": "Local community detection in multilayer networks",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2016/07752422/12OmNxuXcAX",
"parentPublication": {
"id": "proceedings/asonam/2016/2846/0",
"title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2015/9721/0/9721a448",
"title": "Comparison of Inter-Layer Couplings of Multilayer Networks",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2015/9721a448/12OmNxymodQ",
"parentPublication": {
"id": "proceedings/sitis/2015/9721/0",
"title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lics/2011/4412/0/4412a152",
"title": "Isomorphisms of Types in the Presence of Higher-Order References",
"doi": null,
"abstractUrl": "/proceedings-article/lics/2011/4412a152/12OmNy2Jt89",
"parentPublication": {
"id": "proceedings/lics/2011/4412/0",
"title": "Logic in Computer Science, Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2015/3854/0/07403715",
"title": "MuNeG — The framework for multilayer network generator",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2015/07403715/12OmNywfKJj",
"parentPublication": {
"id": "proceedings/asonam/2015/3854/0",
"title": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2017/1600/0/1600a368",
"title": "Tasks for Visual Analytics in Multilayer Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2017/1600a368/12OmNz61dsf",
"parentPublication": {
"id": "proceedings/dsc/2017/1600/0",
"title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945701",
"title": "Hybrid community detection approach in multilayer social network: Scientific collaboration recommendation case study",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945701/12OmNzlD9Eh",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tn/2015/02/07093190",
"title": "Spreading Processes in Multilayer Networks",
"doi": null,
"abstractUrl": "/journal/tn/2015/02/07093190/13rRUxDIti0",
"parentPublication": {
"id": "trans/tn",
"title": "IEEE Transactions on Network Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wetice/2018/6916/0/691601a142",
"title": "Discovering Stable Communities in Dynamic Multilayer Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/wetice/2018/691601a142/17D45WaTkdR",
"parentPublication": {
"id": "proceedings/wetice/2018/6916/0",
"title": "2018 IEEE 27th International Conference on Enabling Technologies: Infrastructure for Collaborative Enterprises (WETICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671831",
"title": "Truss Decomposition on Multilayer Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671831/1A8hpC7FG92",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2022/02/09514471",
"title": "Pattern Discovery in Multilayer Networks",
"doi": null,
"abstractUrl": "/journal/tb/2022/02/09514471/1w7abbNXM6Q",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08023845",
"articleId": "13rRUwghd5E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08019818",
"articleId": "13rRUyueghO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNC8uRnt",
"title": "Sept.-Oct.",
"year": "2012",
"issueNum": "05",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "32",
"label": "Sept.-Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvPLcn",
"doi": "10.1109/MCG.2012.99",
"abstract": "Light fields are the multiview extension of stereo image pairs: a collection of images showing a 3D scene from slightly different perspectives. Depicting high-resolution light fields usually requires an excessively large display bandwidth; compressive light field displays are enabled by the codesign of optical elements and computational-processing algorithms. Rather than pursuing a direct “optical” solution (for example, adding one more pixel to support the emission of one additional light ray), compressive displays aim to create flexible optical systems that can synthesize a compressed target light field. In effect, each pixel emits a superposition of light rays. Through compression and tailored optical designs, fewer display pixels are necessary to emit a given light field than a direct optical solution would require.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Light fields are the multiview extension of stereo image pairs: a collection of images showing a 3D scene from slightly different perspectives. Depicting high-resolution light fields usually requires an excessively large display bandwidth; compressive light field displays are enabled by the codesign of optical elements and computational-processing algorithms. Rather than pursuing a direct “optical” solution (for example, adding one more pixel to support the emission of one additional light ray), compressive displays aim to create flexible optical systems that can synthesize a compressed target light field. In effect, each pixel emits a superposition of light rays. Through compression and tailored optical designs, fewer display pixels are necessary to emit a given light field than a direct optical solution would require.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Light fields are the multiview extension of stereo image pairs: a collection of images showing a 3D scene from slightly different perspectives. Depicting high-resolution light fields usually requires an excessively large display bandwidth; compressive light field displays are enabled by the codesign of optical elements and computational-processing algorithms. Rather than pursuing a direct “optical” solution (for example, adding one more pixel to support the emission of one additional light ray), compressive displays aim to create flexible optical systems that can synthesize a compressed target light field. In effect, each pixel emits a superposition of light rays. Through compression and tailored optical designs, fewer display pixels are necessary to emit a given light field than a direct optical solution would require.",
"title": "Compressive Light Field Displays",
"normalizedTitle": "Compressive Light Field Displays",
"fno": "mcg2012050006",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Three Dimensional Displays",
"Tensile Stress",
"Nonhomogeneous Media",
"Brightness",
"Multimedia Communication",
"Modulation",
"Liquid Crystal Displays",
"Polarization",
"Computer Graphics",
"Three Dimensional Displays",
"Tensile Stress",
"Nonhomogeneous Media",
"Brightness",
"Multimedia Communication",
"Modulation",
"Liquid Crystal Displays",
"Polarization",
"Multimedia",
"Light Fields",
"Compressive Light Field Displays",
"Multilayer Light Field Displays",
"3 D Displays",
"Parallax Displays",
"Multiview Displays",
"Tomographic Light Field Synthesis",
"Polarization Fields",
"LC Ds",
"Tensor Displays",
"Directional Backlighting"
],
"authors": [
{
"givenName": "G.",
"surname": "Wetzstein",
"fullName": "G. Wetzstein",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "D.",
"surname": "Lanman",
"fullName": "D. Lanman",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Hirsch",
"fullName": "M. Hirsch",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "W.",
"surname": "Heidrich",
"fullName": "W. Heidrich",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Raskar",
"fullName": "R. Raskar",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2012-09-01 00:00:00",
"pubType": "mags",
"pages": "6-11",
"year": "2012",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdh/2014/4284/0/4284a168",
"title": "Optimizing Mask and Dictionary of Compressive Light Field Photography",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a168/12OmNqBKTUD",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733b277",
"title": "Compressive Light Field Reconstructions Using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733b277/12OmNs0C9Cb",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007218",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c031",
"title": "Salience Guided Depth Calibration for Perceptually Optimized Compressive Light Field 3D Display",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c031/17D45VsBTZA",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2019/03/08368098",
"title": "Polarization-Based Visible Light Positioning",
"doi": null,
"abstractUrl": "/journal/tm/2019/03/08368098/17D45Xbl4ON",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08304611",
"title": "Unified Mathematical Model for Multilayer-Multiframe Compressive Light Field Displays Using LCDs",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08304611/17D45XeKgnu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a746",
"title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a387",
"title": "Light Field Display: An Adaptive Weighted Dual-Layer LCD Display for Multiple Views",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a387/1ap5x2N7jP2",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797796",
"title": "Full Parallax Table Top 3D Display Using Visually Equivalent Light Field",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797796/1cJ1cj63M3u",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcg2012050004",
"articleId": "13rRUynZ5qj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2012050012",
"articleId": "13rRUyoPSRy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNxRWI45",
"title": "April-June",
"year": "2015",
"issueNum": "02",
"idPrefix": "tn",
"pubType": "journal",
"volume": "2",
"label": "April-June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxDIti0",
"doi": "10.1109/TNSE.2015.2425961",
"abstract": "Several systems can be modeled as sets of interconnected networks or networks with multiple types of connections, here generally called multilayer networks. Spreading processes such as information propagation among users of online social networks, or the diffusion of pathogens among individuals through their contact network, are fundamental phenomena occurring in these networks. However, while information diffusion in single networks has received considerable attention from various disciplines for over a decade, spreading processes in multilayer networks is still a young research area presenting many challenging research issues. In this paper, we review the main models, results and applications of multilayer spreading processes and discuss some promising research directions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Several systems can be modeled as sets of interconnected networks or networks with multiple types of connections, here generally called multilayer networks. Spreading processes such as information propagation among users of online social networks, or the diffusion of pathogens among individuals through their contact network, are fundamental phenomena occurring in these networks. However, while information diffusion in single networks has received considerable attention from various disciplines for over a decade, spreading processes in multilayer networks is still a young research area presenting many challenging research issues. In this paper, we review the main models, results and applications of multilayer spreading processes and discuss some promising research directions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Several systems can be modeled as sets of interconnected networks or networks with multiple types of connections, here generally called multilayer networks. Spreading processes such as information propagation among users of online social networks, or the diffusion of pathogens among individuals through their contact network, are fundamental phenomena occurring in these networks. However, while information diffusion in single networks has received considerable attention from various disciplines for over a decade, spreading processes in multilayer networks is still a young research area presenting many challenging research issues. In this paper, we review the main models, results and applications of multilayer spreading processes and discuss some promising research directions.",
"title": "Spreading Processes in Multilayer Networks",
"normalizedTitle": "Spreading Processes in Multilayer Networks",
"fno": "07093190",
"hasPdf": true,
"idPrefix": "tn",
"keywords": [
"Nonhomogeneous Media",
"Electronic Mail",
"Multiplexing",
"Analytical Models",
"Social Network Services",
"Communities",
"Pathogens",
"Information Diffusion",
"Multilayer Network",
"Multiplex",
"Interconnected",
"Spreading Processes"
],
"authors": [
{
"givenName": "Mostafa",
"surname": "Salehi",
"fullName": "Mostafa Salehi",
"affiliation": ", University of Bologna, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rajesh",
"surname": "Sharma",
"fullName": "Rajesh Sharma",
"affiliation": ", University of Bologna, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Moreno",
"surname": "Marzolla",
"fullName": "Moreno Marzolla",
"affiliation": ", University of Bologna, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matteo",
"surname": "Magnani",
"fullName": "Matteo Magnani",
"affiliation": ", Uppsala University, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Payam",
"surname": "Siyari",
"fullName": "Payam Siyari",
"affiliation": ", Georgia Institute of Technology, Atlanta, GA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Danilo",
"surname": "Montesi",
"fullName": "Danilo Montesi",
"affiliation": ", University of Bologna, Italy",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2015-04-01 00:00:00",
"pubType": "trans",
"pages": "65-83",
"year": "2015",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdmw/2016/5910/0/07836826",
"title": "SpreadViz: Analytics and Visualization of Spreading Processes in Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2016/07836826/12OmNqyDjov",
"parentPublication": {
"id": "proceedings/icdmw/2016/5910/0",
"title": "2016 IEEE 16th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2017/0367/1/0367a697",
"title": "On the Effectiveness of Link Addition for Improving Robustness of Multiplex Networks against Layer Node-Based Attack",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2017/0367a697/12OmNsbY6RA",
"parentPublication": {
"id": "proceedings/compsac/2017/0367/1",
"title": "2017 IEEE 41st Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsacw/2014/3578/0/3578a240",
"title": "How Overlapping Community Structure Affects Epidemic Spreading in Complex Networks",
"doi": null,
"abstractUrl": "/proceedings-article/compsacw/2014/3578a240/12OmNx8OunN",
"parentPublication": {
"id": "proceedings/compsacw/2014/3578/0",
"title": "2014 IEEE 38th International Computer Software and Applications Conference Workshops (COMPSACW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2016/2846/0/07752422",
"title": "Local community detection in multilayer networks",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2016/07752422/12OmNxuXcAX",
"parentPublication": {
"id": "proceedings/asonam/2016/2846/0",
"title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2017/1600/0/1600a368",
"title": "Tasks for Visual Analytics in Multilayer Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2017/1600a368/12OmNz61dsf",
"parentPublication": {
"id": "proceedings/dsc/2017/1600/0",
"title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tn/2018/03/08039503",
"title": "Isomorphisms in Multilayer Networks",
"doi": null,
"abstractUrl": "/journal/tn/2018/03/08039503/13rRUIJuxqo",
"parentPublication": {
"id": "trans/tn",
"title": "IEEE Transactions on Network Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2013/05/06423227",
"title": "Generalized Epidemic Mean-field Model for Spreading Processes Over Multilayer Complex Networks",
"doi": null,
"abstractUrl": "/journal/nt/2013/05/06423227/13rRUxE04qN",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wetice/2018/6916/0/691601a142",
"title": "Discovering Stable Communities in Dynamic Multilayer Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/wetice/2018/691601a142/17D45WaTkdR",
"parentPublication": {
"id": "proceedings/wetice/2018/6916/0",
"title": "2018 IEEE 27th International Conference on Enabling Technologies: Infrastructure for Collaborative Enterprises (WETICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671831",
"title": "Truss Decomposition on Multilayer Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671831/1A8hpC7FG92",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047283",
"title": "Rumor Spreading with Cross Propagation in Multilayer Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047283/1iC6zT8dW4E",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07115154",
"articleId": "13rRUwh80vk",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxcbnHi",
"doi": "10.1109/TVCG.2017.2734427",
"abstract": "We propose an occlusion compensation method for optical see-through head-mounted displays (OST-HMDs) equipped with a singlelayer transmissive spatial light modulator (SLM), in particular, a liquid crystal display (LCD). Occlusion is an important depth cue for 3D perception, yet realizing it on OST-HMDs is particularly difficult due to the displays' semitransparent nature. A key component for the occlusion support is the SLM—a device that can selectively interfere with light rays passing through it. For example, an LCD is a transmissive SLM that can block or pass incoming light rays by turning pixels black or transparent. A straightforward solution places an LCD in front of an OST-HMD and drives the LCD to block light rays that could pass through rendered virtual objects at the viewpoint. This simple approach is, however, defective due to the depth mismatch between the LCD panel and the virtual objects, leading to blurred occlusion. This led existing OST-HMDs to employ dedicated hardware such as focus optics and multi-stacked SLMs. Contrary to these viable, yet complex and/or computationally expensive solutions, we return to the single-layer LCD approach for the hardware simplicity while maintaining fine occlusion—we compensate for a degraded occlusion area by overlaying a compensation image. We compute the image based on the HMD parameters and the background scene captured by a scene camera. The evaluation demonstrates that the proposed method reduced the occlusion leak error by 61.4% and the occlusion error by 85.7%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose an occlusion compensation method for optical see-through head-mounted displays (OST-HMDs) equipped with a singlelayer transmissive spatial light modulator (SLM), in particular, a liquid crystal display (LCD). Occlusion is an important depth cue for 3D perception, yet realizing it on OST-HMDs is particularly difficult due to the displays' semitransparent nature. A key component for the occlusion support is the SLM—a device that can selectively interfere with light rays passing through it. For example, an LCD is a transmissive SLM that can block or pass incoming light rays by turning pixels black or transparent. A straightforward solution places an LCD in front of an OST-HMD and drives the LCD to block light rays that could pass through rendered virtual objects at the viewpoint. This simple approach is, however, defective due to the depth mismatch between the LCD panel and the virtual objects, leading to blurred occlusion. This led existing OST-HMDs to employ dedicated hardware such as focus optics and multi-stacked SLMs. Contrary to these viable, yet complex and/or computationally expensive solutions, we return to the single-layer LCD approach for the hardware simplicity while maintaining fine occlusion—we compensate for a degraded occlusion area by overlaying a compensation image. We compute the image based on the HMD parameters and the background scene captured by a scene camera. The evaluation demonstrates that the proposed method reduced the occlusion leak error by 61.4% and the occlusion error by 85.7%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose an occlusion compensation method for optical see-through head-mounted displays (OST-HMDs) equipped with a singlelayer transmissive spatial light modulator (SLM), in particular, a liquid crystal display (LCD). Occlusion is an important depth cue for 3D perception, yet realizing it on OST-HMDs is particularly difficult due to the displays' semitransparent nature. A key component for the occlusion support is the SLM—a device that can selectively interfere with light rays passing through it. For example, an LCD is a transmissive SLM that can block or pass incoming light rays by turning pixels black or transparent. A straightforward solution places an LCD in front of an OST-HMD and drives the LCD to block light rays that could pass through rendered virtual objects at the viewpoint. This simple approach is, however, defective due to the depth mismatch between the LCD panel and the virtual objects, leading to blurred occlusion. This led existing OST-HMDs to employ dedicated hardware such as focus optics and multi-stacked SLMs. Contrary to these viable, yet complex and/or computationally expensive solutions, we return to the single-layer LCD approach for the hardware simplicity while maintaining fine occlusion—we compensate for a degraded occlusion area by overlaying a compensation image. We compute the image based on the HMD parameters and the background scene captured by a scene camera. The evaluation demonstrates that the proposed method reduced the occlusion leak error by 61.4% and the occlusion error by 85.7%.",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"normalizedTitle": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"fno": "08007218",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Liquid Crystal Displays",
"Cameras",
"Optical Imaging",
"Hardware",
"Lenses",
"Glass",
"Image Color Analysis",
"Occlusion Support",
"Optical See Through HMD",
"Occlusion Leak",
"Spatial Light Modulator",
"Depth Cue"
],
"authors": [
{
"givenName": "Yuta",
"surname": "Itoh",
"fullName": "Yuta Itoh",
"affiliation": "Keio University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Takumi",
"surname": "Hamasaki",
"fullName": "Takumi Hamasaki",
"affiliation": "Keio University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maki",
"surname": "Sugimoto",
"fullName": "Maki Sugimoto",
"affiliation": "Keio University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2463-2473",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a800",
"title": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a800/1CJeADcapNK",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998139",
"title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a301",
"title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09429918",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007219",
"articleId": "13rRUxC0Sw2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007317",
"articleId": "13rRUILc8fg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRKF",
"name": "ttg201711-08007218s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007218s1.zip",
"extension": "zip",
"size": "25.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzA6GUv",
"title": "May",
"year": "2019",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "18LFbQfp6x2",
"doi": "10.1109/TVCG.2019.2899229",
"abstract": "We present a display for optical see-through near-eye displays based on light attenuation, a new paradigm that forms images by spatially subtracting colors of light. Existing optical see-through head-mounted displays (OST-HMDs) form virtual images in an additive manner-they optically combine the light from an embedded light source such as a microdisplay into the users' field of view (FoV). Instead, our light attenuation display filters the color of the real background light pixel-wise in the users' see-through view, resulting in an image as a spatial color filter. Our image formation is complementary to existing light-additive OST-HMDs. The core optical component in our system is a phase-only spatial light modulator (PSLM), a liquid crystal module that can control the phase of the light in each pixel. By combining PSLMs with polarization optics, our system realizes a spatially programmable color filter. In this paper, we introduce our optics design, evaluate the spatial color filter, consider applications including image rendering and FoV color control, and discuss the limitations of the current prototype.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a display for optical see-through near-eye displays based on light attenuation, a new paradigm that forms images by spatially subtracting colors of light. Existing optical see-through head-mounted displays (OST-HMDs) form virtual images in an additive manner-they optically combine the light from an embedded light source such as a microdisplay into the users' field of view (FoV). Instead, our light attenuation display filters the color of the real background light pixel-wise in the users' see-through view, resulting in an image as a spatial color filter. Our image formation is complementary to existing light-additive OST-HMDs. The core optical component in our system is a phase-only spatial light modulator (PSLM), a liquid crystal module that can control the phase of the light in each pixel. By combining PSLMs with polarization optics, our system realizes a spatially programmable color filter. In this paper, we introduce our optics design, evaluate the spatial color filter, consider applications including image rendering and FoV color control, and discuss the limitations of the current prototype.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a display for optical see-through near-eye displays based on light attenuation, a new paradigm that forms images by spatially subtracting colors of light. Existing optical see-through head-mounted displays (OST-HMDs) form virtual images in an additive manner-they optically combine the light from an embedded light source such as a microdisplay into the users' field of view (FoV). Instead, our light attenuation display filters the color of the real background light pixel-wise in the users' see-through view, resulting in an image as a spatial color filter. Our image formation is complementary to existing light-additive OST-HMDs. The core optical component in our system is a phase-only spatial light modulator (PSLM), a liquid crystal module that can control the phase of the light in each pixel. By combining PSLMs with polarization optics, our system realizes a spatially programmable color filter. In this paper, we introduce our optics design, evaluate the spatial color filter, consider applications including image rendering and FoV color control, and discuss the limitations of the current prototype.",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"normalizedTitle": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"fno": "08676153",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Colour Displays",
"Helmet Mounted Displays",
"Light Attenuation",
"Light Polarisation",
"Light Sources",
"Liquid Crystal Displays",
"Microdisplays",
"Optical Filters",
"Spatial Filters",
"Spatial Light Modulators",
"Spatially Subtracting Colors",
"Embedded Light Source",
"Background Light Pixel Wise",
"Core Optical Component",
"Phase Only Spatial Light Modulator",
"Polarization Optics",
"Spatially Programmable Color Filter",
"Image Rendering",
"Fo V Color Control",
"Optical See Through Head Mounted Displays",
"Virtual Imaging",
"Subtractive See Through Near Eye Display",
"Light Additive OST HMD",
"Optical See Through Near Eye Displays",
"Microdisplay",
"Light Attenuation Display Filters",
"PSLM",
"Liquid Crystal Module",
"Image Color Analysis",
"Lighting",
"Attenuation",
"Liquid Crystal Displays",
"Optical Imaging",
"Optical Attenuators",
"Optical Polarization",
"Light Attenuation Display",
"Phase Modulation",
"See Through Display",
"Vision Augmentation",
"Augmented Reality"
],
"authors": [
{
"givenName": "Yuta",
"surname": "Itoh",
"fullName": "Yuta Itoh",
"affiliation": "Tokyo Institute of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tobias",
"surname": "Langlotz",
"fullName": "Tobias Langlotz",
"affiliation": "University of Otago",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daisuke",
"surname": "Iwai",
"fullName": "Daisuke Iwai",
"affiliation": "Osaka University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"fullName": "Kiyoshi Kiyokawa",
"affiliation": "Nara Institute of Science and Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Toshiyuki",
"surname": "Amano",
"fullName": "Toshiyuki Amano",
"affiliation": "Wakayama University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2019-05-01 00:00:00",
"pubType": "trans",
"pages": "1951-1960",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446441",
"title": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446441/13bd1sv5NxY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007218",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a409",
"title": "Adapting Michelson Contrast for use with Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a409/1J7WpecpAwU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09416829",
"title": "Design of a Pupil-Matched Occlusion-Capable Optical See-Through Wearable Display",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09416829/1t8VUXSYL2E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08643583",
"articleId": "18K0hdQEpoI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08676155",
"articleId": "18LFfGhc49i",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzA6GUv",
"title": "May",
"year": "2019",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "18LFfGhc49i",
"doi": "10.1109/TVCG.2019.2899249",
"abstract": "We propose a varifocal occlusion technique for optical see-through head-mounted displays (OST-HMDs). Occlusion in OST-HMDs is a powerful visual cue that enables depth perception in augmented reality (AR). Without occlusion, virtual objects rendered by an OST-HMD appear semi-transparent and less realistic. A common occlusion technique is to use spatial light modulators (SLMs) to block incoming light rays at each pixel on the SLM selectively. However, most of the existing methods create an occlusion mask only at a single, fixed depth-typically at infinity. With recent advances in varifocal OST-HMDs, such traditional fixed-focus occlusion causes a mismatch in depth between the occlusion mask plane and the virtual object to be occluded, leading to an uncomfortable user experience with blurred occlusion masks. In this paper, we thus propose an OST-HMD system with varifocal occlusion capability: we physically slide a transmissive liquid crystal display (LCD) to optically shift the occlusion plane along the optical path so that the mask appears sharp and aligns to a virtual image at a given depth. Our solution has several benefits over existing varifocal occlusion methods: it is computationally less demanding and, more importantly, it is optically consistent, i.e., when a user loses focus on the corresponding virtual image, the mask again gets blurred consistently as the virtual image does. In the experiment, we build a proof-of-concept varifocal occlusion system implemented with a custom retinal projection display and demonstrate that the system can shift the occlusion plane to depths ranging from 25 cm to infinity.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a varifocal occlusion technique for optical see-through head-mounted displays (OST-HMDs). Occlusion in OST-HMDs is a powerful visual cue that enables depth perception in augmented reality (AR). Without occlusion, virtual objects rendered by an OST-HMD appear semi-transparent and less realistic. A common occlusion technique is to use spatial light modulators (SLMs) to block incoming light rays at each pixel on the SLM selectively. However, most of the existing methods create an occlusion mask only at a single, fixed depth-typically at infinity. With recent advances in varifocal OST-HMDs, such traditional fixed-focus occlusion causes a mismatch in depth between the occlusion mask plane and the virtual object to be occluded, leading to an uncomfortable user experience with blurred occlusion masks. In this paper, we thus propose an OST-HMD system with varifocal occlusion capability: we physically slide a transmissive liquid crystal display (LCD) to optically shift the occlusion plane along the optical path so that the mask appears sharp and aligns to a virtual image at a given depth. Our solution has several benefits over existing varifocal occlusion methods: it is computationally less demanding and, more importantly, it is optically consistent, i.e., when a user loses focus on the corresponding virtual image, the mask again gets blurred consistently as the virtual image does. In the experiment, we build a proof-of-concept varifocal occlusion system implemented with a custom retinal projection display and demonstrate that the system can shift the occlusion plane to depths ranging from 25 cm to infinity.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a varifocal occlusion technique for optical see-through head-mounted displays (OST-HMDs). Occlusion in OST-HMDs is a powerful visual cue that enables depth perception in augmented reality (AR). Without occlusion, virtual objects rendered by an OST-HMD appear semi-transparent and less realistic. A common occlusion technique is to use spatial light modulators (SLMs) to block incoming light rays at each pixel on the SLM selectively. However, most of the existing methods create an occlusion mask only at a single, fixed depth-typically at infinity. With recent advances in varifocal OST-HMDs, such traditional fixed-focus occlusion causes a mismatch in depth between the occlusion mask plane and the virtual object to be occluded, leading to an uncomfortable user experience with blurred occlusion masks. In this paper, we thus propose an OST-HMD system with varifocal occlusion capability: we physically slide a transmissive liquid crystal display (LCD) to optically shift the occlusion plane along the optical path so that the mask appears sharp and aligns to a virtual image at a given depth. Our solution has several benefits over existing varifocal occlusion methods: it is computationally less demanding and, more importantly, it is optically consistent, i.e., when a user loses focus on the corresponding virtual image, the mask again gets blurred consistently as the virtual image does. In the experiment, we build a proof-of-concept varifocal occlusion system implemented with a custom retinal projection display and demonstrate that the system can shift the occlusion plane to depths ranging from 25 cm to infinity.",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"normalizedTitle": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"fno": "08676155",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Eye",
"Helmet Mounted Displays",
"Hidden Feature Removal",
"Liquid Crystal Displays",
"Rendering Computer Graphics",
"Spatial Light Modulators",
"Augmented Reality",
"LCD",
"Fixed Depth",
"Single Depth",
"Optical See Through Head Mounted Displays",
"Custom Retinal Projection Display",
"Proof Of Concept Varifocal Occlusion System",
"Corresponding Virtual Image",
"Varifocal Occlusion Methods",
"Given Depth",
"Optical Path",
"Transmissive Liquid Crystal Display",
"Varifocal Occlusion Capability",
"OST HMD System",
"Blurred Occlusion Masks",
"Occlusion Mask Plane",
"Traditional Fixed Focus Occlusion",
"Varifocal OST HM Ds",
"Incoming Light Rays",
"Spatial Light Modulators",
"Common Occlusion Technique",
"Virtual Object",
"Depth Perception",
"Powerful Visual Cue",
"Varifocal Occlusion Technique",
"Slide Occlusion Mask",
"Size 25 0 Cm",
"Optical Imaging",
"Liquid Crystal Displays",
"Cameras",
"Adaptive Optics",
"Lenses",
"Optical Distortion",
"Glass",
"Occlusion",
"Varifocal",
"See Through Display",
"Augmented Reality"
],
"authors": [
{
"givenName": "Takumi",
"surname": "Hamasaki",
"fullName": "Takumi Hamasaki",
"affiliation": "Keio University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuta",
"surname": "Itoh",
"fullName": "Yuta Itoh",
"affiliation": "Tokyo Institute of Technology",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2019-05-01 00:00:00",
"pubType": "trans",
"pages": "1961-1969",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007218",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a237",
"title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998139",
"title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a301",
"title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08676153",
"articleId": "18LFbQfp6x2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08658185",
"articleId": "187ZsHB2Pwk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNqHItJe",
"title": "May-June",
"year": "2015",
"issueNum": "03",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "35",
"label": "May-June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInv6U",
"doi": "10.1109/MCG.2015.49",
"abstract": "A long-term goal in prostate cancer research is a sound prognosis prior to surgery, and as a consequence, data-centered research is becoming increasingly important. Currently, it takes several days to define meaningful cohorts by manually selecting patients from health record systems and performing statistical hypothesis tests with cohorts. The authors developed an efficient and effective visual-interactive system for the definition and analysis of patient cohorts. The system provides an overview of large sets of patient records and allows medical researchers to interactively drill down to relevant patient cohorts. In addition, a guidance concept helps them identify interesting relations between defined cohorts and rich sets of attributes available in the patient records. The system increases the efficiency of the researchers' analytical workflow by reducing the temporal effort from days to minutes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A long-term goal in prostate cancer research is a sound prognosis prior to surgery, and as a consequence, data-centered research is becoming increasingly important. Currently, it takes several days to define meaningful cohorts by manually selecting patients from health record systems and performing statistical hypothesis tests with cohorts. The authors developed an efficient and effective visual-interactive system for the definition and analysis of patient cohorts. The system provides an overview of large sets of patient records and allows medical researchers to interactively drill down to relevant patient cohorts. In addition, a guidance concept helps them identify interesting relations between defined cohorts and rich sets of attributes available in the patient records. The system increases the efficiency of the researchers' analytical workflow by reducing the temporal effort from days to minutes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A long-term goal in prostate cancer research is a sound prognosis prior to surgery, and as a consequence, data-centered research is becoming increasingly important. Currently, it takes several days to define meaningful cohorts by manually selecting patients from health record systems and performing statistical hypothesis tests with cohorts. The authors developed an efficient and effective visual-interactive system for the definition and analysis of patient cohorts. The system provides an overview of large sets of patient records and allows medical researchers to interactively drill down to relevant patient cohorts. In addition, a guidance concept helps them identify interesting relations between defined cohorts and rich sets of attributes available in the patient records. The system increases the efficiency of the researchers' analytical workflow by reducing the temporal effort from days to minutes.",
"title": "A Visual-Interactive System for Prostate Cancer Cohort Analysis",
"normalizedTitle": "A Visual-Interactive System for Prostate Cancer Cohort Analysis",
"fno": "mcg2015030044",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Cancer",
"Interactive Systems",
"Medical Computing",
"Statistical Testing",
"Visual Interactive System",
"Prostate Cancer Cohort Analysis",
"Data Centered Research",
"Health Record Systems",
"Statistical Hypothesis Test",
"Prostate Cancer",
"Biomedical Image Processing",
"Data Visualization",
"Cancer",
"Surgery",
"Computer Graphics",
"Information Visualization",
"Visual Analytics",
"Cancer Research",
"Patient Cohorts",
"Hypotheses Generation",
"Hypotheses Validation"
],
"authors": [
{
"givenName": "Jürgen",
"surname": "Bernard",
"fullName": "Jürgen Bernard",
"affiliation": "Fraunhofer Institute for Computer Graphics Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Sessler",
"fullName": "David Sessler",
"affiliation": "Fraunhofer Institute for Computer Graphics Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thorsten",
"surname": "May",
"fullName": "Thorsten May",
"affiliation": "Fraunhofer Institute for Computer Graphics Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thorsten",
"surname": "Schlomm",
"fullName": "Thorsten Schlomm",
"affiliation": "University Medical Center Hamburg-Eppendorf",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dirk",
"surname": "Pehrke",
"fullName": "Dirk Pehrke",
"affiliation": "University Medical Center Hamburg-Eppendorf",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jörn",
"surname": "Kohlhammer",
"fullName": "Jörn Kohlhammer",
"affiliation": "Fraunhofer Institute for Computer Graphics Research",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2015-05-01 00:00:00",
"pubType": "mags",
"pages": "44-55",
"year": "2015",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibe/2004/2173/0/21730191",
"title": "Quantitation of Extra-Capsular Prostate Tissue from Reconstructed Tissue Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2004/21730191/12OmNvHoQpz",
"parentPublication": {
"id": "proceedings/bibe/2004/2173/0",
"title": "Fourth IEEE Symposium on Bioinformatics and Bioengineering (BIBE'04)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2017/3187/0/08387545",
"title": "Visual analytics for radiomics: Combining medical imaging with patient data for clinical research",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2017/08387545/12OmNwdL7ku",
"parentPublication": {
"id": "proceedings/vahc/2017/3187/0",
"title": "2017 IEEE Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/niss/2009/3687/0/3687b063",
"title": "Applying Data Mining for Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/niss/2009/3687b063/12OmNzC5SNj",
"parentPublication": {
"id": "proceedings/niss/2009/3687/0",
"title": "2009 International Conference on New Trends in Information and Service Science (NISS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2018/5377/0/537701a380",
"title": "Mapping the Treatment Journey for Patients with Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2018/537701a380/12OmNzSQdji",
"parentPublication": {
"id": "proceedings/ichi/2018/5377/0",
"title": "2018 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539638",
"title": "PROACT: Iterative Design of a Patient-Centered Visualization for Effective Prostate Cancer Health Risk Communication",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539638/13rRUxYINfk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534774",
"title": "PhenoStacks: Cross-Sectional Cohort Phenotype Comparison Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534774/13rRUy2YLYB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192665",
"title": "Supporting Iterative Cohort Construction with Visual Temporal Queries",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192665/13rRUyeCkal",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08283817",
"title": "Using Dashboard Networks to Visualize Multiple Patient Histories: A Design Study on Post-Operative Prostate Cancer",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08283817/17D45XacGi3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09241732",
"title": "Visual cohort comparison for spatial single-cell omics-data",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09241732/1oijQyHFwVa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2020/2644/0/264400a017",
"title": "Visualization Co-Design with Prostate Cancer Survivors who have Limited Graph Literacy",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2020/264400a017/1yhFE7okzgk",
"parentPublication": {
"id": "proceedings/vahc/2020/2644/0",
"title": "2020 Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcg2015030042",
"articleId": "13rRUwInv6T",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2015030056",
"articleId": "13rRUB7a13F",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvTBB87",
"title": "Jan.-March",
"year": "2017",
"issueNum": "01",
"idPrefix": "lt",
"pubType": "journal",
"volume": "10",
"label": "Jan.-March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwdIOWV",
"doi": "10.1109/TLT.2016.2599522",
"abstract": "This paper presents a systematic literature review of the state-of-the-art of research on learning dashboards in the fields of Learning Analytics and Educational Data Mining. Research on learning dashboards aims to identify what data is meaningful to different stakeholders and how data can be presented to support sense-making processes. Learning dashboards are becoming popular due to the increased use of educational technologies, such as Learning Management Systems (LMS) and Massive Open Online Courses (MOOCs). The initial search of five main academic databases and GScholar resulted in 346 papers out of which 55 papers were included in the final analysis. Our review distinguishes different kinds of research studies as well as various aspects of learning dashboards and their maturity regarding evaluation. As the research field is still relatively young, most studies are exploratory and proof-of-concept. The review concludes by offering a definition for learning dashboards and by outlining open issues and future lines of work in the area of learning dashboards. There is a need for longitudinal research in authentic settings and studies that systematically compare different dashboard designs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a systematic literature review of the state-of-the-art of research on learning dashboards in the fields of Learning Analytics and Educational Data Mining. Research on learning dashboards aims to identify what data is meaningful to different stakeholders and how data can be presented to support sense-making processes. Learning dashboards are becoming popular due to the increased use of educational technologies, such as Learning Management Systems (LMS) and Massive Open Online Courses (MOOCs). The initial search of five main academic databases and GScholar resulted in 346 papers out of which 55 papers were included in the final analysis. Our review distinguishes different kinds of research studies as well as various aspects of learning dashboards and their maturity regarding evaluation. As the research field is still relatively young, most studies are exploratory and proof-of-concept. The review concludes by offering a definition for learning dashboards and by outlining open issues and future lines of work in the area of learning dashboards. There is a need for longitudinal research in authentic settings and studies that systematically compare different dashboard designs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a systematic literature review of the state-of-the-art of research on learning dashboards in the fields of Learning Analytics and Educational Data Mining. Research on learning dashboards aims to identify what data is meaningful to different stakeholders and how data can be presented to support sense-making processes. Learning dashboards are becoming popular due to the increased use of educational technologies, such as Learning Management Systems (LMS) and Massive Open Online Courses (MOOCs). The initial search of five main academic databases and GScholar resulted in 346 papers out of which 55 papers were included in the final analysis. Our review distinguishes different kinds of research studies as well as various aspects of learning dashboards and their maturity regarding evaluation. As the research field is still relatively young, most studies are exploratory and proof-of-concept. The review concludes by offering a definition for learning dashboards and by outlining open issues and future lines of work in the area of learning dashboards. There is a need for longitudinal research in authentic settings and studies that systematically compare different dashboard designs.",
"title": "Perceiving Learning at a Glance: A Systematic Literature Review of Learning Dashboard Research",
"normalizedTitle": "Perceiving Learning at a Glance: A Systematic Literature Review of Learning Dashboard Research",
"fno": "07542151",
"hasPdf": true,
"idPrefix": "lt",
"keywords": [
"Data Mining",
"Data Visualization",
"Systematics",
"Proposals",
"Bibliographies",
"Databases",
"Context",
"Systematic Review",
"Learning Analytics",
"Educational Data Mining",
"Information Visualization",
"Dashboards"
],
"authors": [
{
"givenName": "Beat A.",
"surname": "Schwendimann",
"fullName": "Beat A. Schwendimann",
"affiliation": "CHILI Lab, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "María Jesús",
"surname": "Rodríguez-Triana",
"fullName": "María Jesús Rodríguez-Triana",
"affiliation": "REACT Lab, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andrii",
"surname": "Vozniuk",
"fullName": "Andrii Vozniuk",
"affiliation": "REACT Lab, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luis P.",
"surname": "Prieto",
"fullName": "Luis P. Prieto",
"affiliation": "CHILI Lab, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mina Shirvani",
"surname": "Boroujeni",
"fullName": "Mina Shirvani Boroujeni",
"affiliation": "CHILI Lab, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Adrian",
"surname": "Holzer",
"fullName": "Adrian Holzer",
"affiliation": "REACT Lab, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Denis",
"surname": "Gillet",
"fullName": "Denis Gillet",
"affiliation": "REACT Lab, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pierre",
"surname": "Dillenbourg",
"fullName": "Pierre Dillenbourg",
"affiliation": "CHILI Lab, École Polytechnique Fédérale de Lausanne, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2017-01-01 00:00:00",
"pubType": "trans",
"pages": "30-41",
"year": "2017",
"issn": "1939-1382",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/seaa/2016/2820/0/2820a181",
"title": "Literature Review of Empirical Research Studies within the Domain of Acceptance Testing",
"doi": null,
"abstractUrl": "/proceedings-article/seaa/2016/2820a181/12OmNAu1Fky",
"parentPublication": {
"id": "proceedings/seaa/2016/2820/0",
"title": "2016 42th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2017/3581/0/3581b354",
"title": "A Systematic Literature Review on Mobile Learning in Saudi Arabia",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2017/3581b354/12OmNwekjJ8",
"parentPublication": {
"id": "proceedings/aiccsa/2017/3581/0",
"title": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seaa/2013/5091/0/5091a009",
"title": "Kanban in software development: A systematic literature review",
"doi": null,
"abstractUrl": "/proceedings-article/seaa/2013/5091a009/12OmNyyeWuL",
"parentPublication": {
"id": "proceedings/seaa/2013/5091/0",
"title": "2013 39th EUROMICRO Conference on Software Engineering and Advanced Applications (SEAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2018/2666/1/266601a781",
"title": "Systematic Literature Review Regarding Communication Support in Project-Based Learning of Software Development",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2018/266601a781/144U9iFhNu2",
"parentPublication": {
"id": "proceedings/compsac/2018/2666/2",
"title": "2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icngcis/2017/4205/0/6361a043",
"title": "Systematic Literature Review on Software Effort Estimation Using Machine Learning Approaches",
"doi": null,
"abstractUrl": "/proceedings-article/icngcis/2017/6361a043/17D45VtKiwt",
"parentPublication": {
"id": "proceedings/icngcis/2017/4205/0",
"title": "2017 International Conference on Next Generation Computing and Information Systems (ICNGCIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659306",
"title": "Systematic Literature Review of Students’ Affective Responses to Active Learning: Overview of Results",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659306/18j9e3hUj7i",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/se4rai/2022/9319/0/931900a001",
"title": "Operationalizing Machine Learning Models - A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/se4rai/2022/931900a001/1ED20Ql4AIo",
"parentPublication": {
"id": "proceedings/se4rai/2022/9319/0",
"title": "2022 IEEE/ACM 1st International Workshop on Software Engineering for Responsible Artificial Intelligence (SE4RAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esem/2019/2968/0/08870142",
"title": "Multivocal literature reviews in software engineering: Preliminary findings from a tertiary study",
"doi": null,
"abstractUrl": "/proceedings-article/esem/2019/08870142/1ecCOtyuDT2",
"parentPublication": {
"id": "proceedings/esem/2019/2968/0",
"title": "2019 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2020/1974/0/09411535",
"title": "Automatic prediction of learning styles in learning management systems: a literature review",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2020/09411535/1taFanxpzcA",
"parentPublication": {
"id": "proceedings/csde/2020/1974/0",
"title": "2020 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/encycris/2021/4553/0/455300a021",
"title": "A Systematic Literature Review on Malicious Use of Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/encycris/2021/455300a021/1v566uyMFFe",
"parentPublication": {
"id": "proceedings/encycris/2021/4553/0",
"title": "2021 IEEE/ACM 2nd International Workshop on Engineering and Cybersecurity of Critical Systems (EnCyCriS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07589022",
"articleId": "13rRUwI5TZp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07723865",
"articleId": "13rRUEgs2yi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNxvO04Q",
"title": "Jan.",
"year": "2017",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYINfk",
"doi": "10.1109/TVCG.2016.2598588",
"abstract": "Prostate cancer is the most common cancer among men in the US, and yet most cases represent localized cancer for which the optimal treatment is unclear. Accumulating evidence suggests that the available treatment options, including surgery and conservative treatment, result in a similar prognosis for most men with localized prostate cancer. However, approximately 90% of patients choose surgery over conservative treatment, despite the risk of severe side effects like erectile dysfunction and incontinence. Recent medical research suggests that a key reason is the lack of patient-centered tools that can effectively communicate personalized risk information and enable them to make better health decisions. In this paper, we report the iterative design process and results of developing the PROgnosis Assessment for Conservative Treatment (PROACT) tool, a personalized health risk communication tool for localized prostate cancer patients. PROACT utilizes two published clinical prediction models to communicate the patients' personalized risk estimates and compare treatment options. In collaboration with the Maine Medical Center, we conducted two rounds of evaluations with prostate cancer survivors and urologists to identify the design elements and narrative structure that effectively facilitate patient comprehension under emotional distress. Our results indicate that visualization can be an effective means to communicate complex risk information to patients with low numeracy and visual literacy. However, the visualizations need to be carefully chosen to balance readability with ease of comprehension. In addition, due to patients' charged emotional state, an intuitive narrative structure that considers the patients' information need is critical to aid the patients' comprehension of their risk information.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Prostate cancer is the most common cancer among men in the US, and yet most cases represent localized cancer for which the optimal treatment is unclear. Accumulating evidence suggests that the available treatment options, including surgery and conservative treatment, result in a similar prognosis for most men with localized prostate cancer. However, approximately 90% of patients choose surgery over conservative treatment, despite the risk of severe side effects like erectile dysfunction and incontinence. Recent medical research suggests that a key reason is the lack of patient-centered tools that can effectively communicate personalized risk information and enable them to make better health decisions. In this paper, we report the iterative design process and results of developing the PROgnosis Assessment for Conservative Treatment (PROACT) tool, a personalized health risk communication tool for localized prostate cancer patients. PROACT utilizes two published clinical prediction models to communicate the patients' personalized risk estimates and compare treatment options. In collaboration with the Maine Medical Center, we conducted two rounds of evaluations with prostate cancer survivors and urologists to identify the design elements and narrative structure that effectively facilitate patient comprehension under emotional distress. Our results indicate that visualization can be an effective means to communicate complex risk information to patients with low numeracy and visual literacy. However, the visualizations need to be carefully chosen to balance readability with ease of comprehension. In addition, due to patients' charged emotional state, an intuitive narrative structure that considers the patients' information need is critical to aid the patients' comprehension of their risk information.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Prostate cancer is the most common cancer among men in the US, and yet most cases represent localized cancer for which the optimal treatment is unclear. Accumulating evidence suggests that the available treatment options, including surgery and conservative treatment, result in a similar prognosis for most men with localized prostate cancer. However, approximately 90% of patients choose surgery over conservative treatment, despite the risk of severe side effects like erectile dysfunction and incontinence. Recent medical research suggests that a key reason is the lack of patient-centered tools that can effectively communicate personalized risk information and enable them to make better health decisions. In this paper, we report the iterative design process and results of developing the PROgnosis Assessment for Conservative Treatment (PROACT) tool, a personalized health risk communication tool for localized prostate cancer patients. PROACT utilizes two published clinical prediction models to communicate the patients' personalized risk estimates and compare treatment options. In collaboration with the Maine Medical Center, we conducted two rounds of evaluations with prostate cancer survivors and urologists to identify the design elements and narrative structure that effectively facilitate patient comprehension under emotional distress. Our results indicate that visualization can be an effective means to communicate complex risk information to patients with low numeracy and visual literacy. However, the visualizations need to be carefully chosen to balance readability with ease of comprehension. In addition, due to patients' charged emotional state, an intuitive narrative structure that considers the patients' information need is critical to aid the patients' comprehension of their risk information.",
"title": "PROACT: Iterative Design of a Patient-Centered Visualization for Effective Prostate Cancer Health Risk Communication",
"normalizedTitle": "PROACT: Iterative Design of a Patient-Centered Visualization for Effective Prostate Cancer Health Risk Communication",
"fno": "07539638",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cancer",
"Data Visualisation",
"Decision Making",
"Medical Information Systems",
"Patient Treatment",
"Psychology",
"Risk Analysis",
"Iterative Design",
"Patient Centered Visualization",
"Prostate Cancer Health Risk Communication",
"Optimal Treatment",
"Surgery",
"Conservative Treatment",
"Side Effects",
"Patient Centered Tools",
"Personalized Risk Information",
"Health Decision Making",
"Iterative Design Process",
"Prognosis Assessment For Conservative Treatment",
"PROACT Tool",
"Personalized Health Risk Communication Tool",
"Localized Prostate Cancer Patients",
"Clinical Prediction Models",
"Personalized Risk Estimates",
"Patient Treatment",
"Maine Medical Center",
"Emotional Distress",
"Visual Literacy",
"Prostate Cancer",
"Data Visualization",
"Visualization",
"Prognostics And Health Management",
"Medical Diagnostic Imaging",
"Design Studies",
"Task And Requirement Analysis",
"Presentation",
"Production",
"And Dissemination",
"Medical Visualization"
],
"authors": [
{
"givenName": "Anzu",
"surname": "Hakone",
"fullName": "Anzu Hakone",
"affiliation": "Tufts University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lane",
"surname": "Harrison",
"fullName": "Lane Harrison",
"affiliation": "Worcester Polytechnic Institute",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alvitta",
"surname": "Ottley",
"fullName": "Alvitta Ottley",
"affiliation": "Tufts University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nathan",
"surname": "Winters",
"fullName": "Nathan Winters",
"affiliation": "Tufts University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Caitlin",
"surname": "Gutheil",
"fullName": "Caitlin Gutheil",
"affiliation": "Maine Medical Center",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Paul K. J.",
"surname": "Han",
"fullName": "Paul K. J. Han",
"affiliation": "Maine Medical Center",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Remco",
"surname": "Chang",
"fullName": "Remco Chang",
"affiliation": "Tufts University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2017-01-01 00:00:00",
"pubType": "trans",
"pages": "601-610",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2016/9036/0/9036a019",
"title": "Predicting Advanced Prostate Cancer Endpoints from Early Indications via Transductive Semi-Supervised Regression",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2016/9036a019/12OmNAoDhTW",
"parentPublication": {
"id": "proceedings/cbms/2016/9036/0",
"title": "2016 IEEE 29th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itag/2014/6795/0/6795a020",
"title": "An Intelligent Serious Game for Supporting African and African Caribbean Men during Pre- and Post-Diagnosis of Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/itag/2014/6795a020/12OmNxEBzdb",
"parentPublication": {
"id": "proceedings/itag/2014/6795/0",
"title": "2014 International Conference on Interactive Technologies and Games (iTAG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wetice/2011/4410/0/4410a151",
"title": "Comparison of Machine Learning Techniques using the WEKA Environment for Prostate Cancer Therapy Plan",
"doi": null,
"abstractUrl": "/proceedings-article/wetice/2011/4410a151/12OmNyYm2px",
"parentPublication": {
"id": "proceedings/wetice/2011/4410/0",
"title": "2011 IEEE 20th International Workshops on Enabling Technologies: Infrastructure for Collaborative Enterprises",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/niss/2009/3687/0/3687b063",
"title": "Applying Data Mining for Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/niss/2009/3687b063/12OmNzC5SNj",
"parentPublication": {
"id": "proceedings/niss/2009/3687/0",
"title": "2009 International Conference on New Trends in Information and Service Science (NISS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2018/5377/0/537701a380",
"title": "Mapping the Treatment Journey for Patients with Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2018/537701a380/12OmNzSQdji",
"parentPublication": {
"id": "proceedings/ichi/2018/5377/0",
"title": "2018 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0/08276897",
"title": "Analyse Lifestyle Related Prostate Cancer Risk Factors Retrieved from Literacy",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2017/08276897/17D45XeKgvD",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0",
"title": "2017 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995633",
"title": "A Multi-Omics Classifier For Prediction Of Androgen Deprivation Treatment Response In Prostate Cancer Patients",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995633/1JC3tC1Ks80",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09006036",
"title": "Classification Models and Survival Analysis for Prostate Cancer Using RNA Sequencing and Clinical Data",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09006036/1hJsnQmUiEE",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2021/0132/0/013200a181",
"title": "Identification of Disease-Disease Network Communities in Subpopulations of Patients with Prostate Cancer",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2021/013200a181/1xIOTnI0kkU",
"parentPublication": {
"id": "proceedings/ichi/2021/0132/0",
"title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2020/2644/0/264400a017",
"title": "Visualization Co-Design with Prostate Cancer Survivors who have Limited Graph Literacy",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2020/264400a017/1yhFE7okzgk",
"parentPublication": {
"id": "proceedings/vahc/2020/2644/0",
"title": "2020 Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07539649",
"articleId": "13rRUNvgz4k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07536133",
"articleId": "13rRUyuNsx1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LbFmG2HHnW",
"doi": "10.1109/TVCG.2023.3251344",
"abstract": "Dashboards, which comprise multiple views on a single display, help analyze and communicate multiple perspectives of data simultaneously. However, creating effective and elegant dashboards is challenging since it requires careful and logical arrangement and coordination of multiple visualizations. To solve the problem, we propose a data-driven approach for mining design rules from dashboards and automating dashboard organization. Specifically, we focus on two prominent aspects of the organization: <italic>arrangement</italic>, which describes the position, size, and layout of each view in the display space; and <italic>coordination</italic>, which indicates the interaction between pairwise views. We build a new dataset containing 854 dashboards crawled online, and develop feature engineering methods for describing the single views and view-wise relationships in terms of data, encoding, layout, and interactions. Further, we identify design rules among those features and develop a recommender for dashboard design. We demonstrate the usefulness of DMiner through an expert study and a user study. The expert study shows that our extracted design rules are reasonable and conform to the design practice of experts. Moreover, a comparative user study shows that our recommender could help automate dashboard organization and reach human-level performance. In summary, our work offers a promising starting point for design mining visualizations to build recommenders.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dashboards, which comprise multiple views on a single display, help analyze and communicate multiple perspectives of data simultaneously. However, creating effective and elegant dashboards is challenging since it requires careful and logical arrangement and coordination of multiple visualizations. To solve the problem, we propose a data-driven approach for mining design rules from dashboards and automating dashboard organization. Specifically, we focus on two prominent aspects of the organization: <italic>arrangement</italic>, which describes the position, size, and layout of each view in the display space; and <italic>coordination</italic>, which indicates the interaction between pairwise views. We build a new dataset containing 854 dashboards crawled online, and develop feature engineering methods for describing the single views and view-wise relationships in terms of data, encoding, layout, and interactions. Further, we identify design rules among those features and develop a recommender for dashboard design. We demonstrate the usefulness of DMiner through an expert study and a user study. The expert study shows that our extracted design rules are reasonable and conform to the design practice of experts. Moreover, a comparative user study shows that our recommender could help automate dashboard organization and reach human-level performance. In summary, our work offers a promising starting point for design mining visualizations to build recommenders.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dashboards, which comprise multiple views on a single display, help analyze and communicate multiple perspectives of data simultaneously. However, creating effective and elegant dashboards is challenging since it requires careful and logical arrangement and coordination of multiple visualizations. To solve the problem, we propose a data-driven approach for mining design rules from dashboards and automating dashboard organization. Specifically, we focus on two prominent aspects of the organization: arrangement, which describes the position, size, and layout of each view in the display space; and coordination, which indicates the interaction between pairwise views. We build a new dataset containing 854 dashboards crawled online, and develop feature engineering methods for describing the single views and view-wise relationships in terms of data, encoding, layout, and interactions. Further, we identify design rules among those features and develop a recommender for dashboard design. We demonstrate the usefulness of DMiner through an expert study and a user study. The expert study shows that our extracted design rules are reasonable and conform to the design practice of experts. Moreover, a comparative user study shows that our recommender could help automate dashboard organization and reach human-level performance. In summary, our work offers a promising starting point for design mining visualizations to build recommenders.",
"title": "Dashboard Design Mining and Recommendation",
"normalizedTitle": "Dashboard Design Mining and Recommendation",
"fno": "10057994",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Layout",
"Encoding",
"Feature Extraction",
"Data Mining",
"Visualization",
"Software Development Management",
"Design Mining",
"Visualization Recommendation",
"Multiple View Visualization",
"Dashboards"
],
"authors": [
{
"givenName": "Yanna",
"surname": "Lin",
"fullName": "Yanna Lin",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haotian",
"surname": "Li",
"fullName": "Haotian Li",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aoyu",
"surname": "Wu",
"fullName": "Aoyu Wu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong",
"surname": "Wang",
"fullName": "Yong Wang",
"affiliation": "Singapore Management University, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2016/5670/0/5670d483",
"title": "Insights from the Design and Evaluation of a Personal Health Dashboard",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670d483/12OmNAnMuCE",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08443395",
"title": "What Do We Talk About When We Talk About Dashboards?",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08443395/17D45XDIXWb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/03/08283817",
"title": "Using Dashboard Networks to Visualize Multiple Patient Histories: A Design Study on Post-Operative Prostate Cancer",
"doi": null,
"abstractUrl": "/journal/tg/2019/03/08283817/17D45XacGi3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903550",
"title": "Dashboard Design Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903550/1GZolSVvsPu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09906971",
"title": "DashBot: Insight-Driven Dashboard Generation Based on Deep Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09906971/1H5EWMQX9ZK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09911200",
"title": "MEDLEY: Intent-based Recommendations to Support Dashboard Composition<sc/>",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09911200/1Hcjm0PMkgw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09035622",
"title": "LADV: Deep Learning Assisted Authoring of Dashboard Visualizations From Images and Sketches",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09035622/1iaeAO11H6o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issrew/2020/7735/0/773500a215",
"title": "Declarative Dashboard Generation",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2020/773500a215/1q7jsZHI07u",
"parentPublication": {
"id": "proceedings/issrew/2020/7735/0",
"title": "2020 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552449",
"title": "MultiVision: Designing Analytical Dashboards with Deep Learning Based Recommendation",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552449/1xic65iQBoY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2023/01/09656613",
"title": "Finding Their Data Voice: Practices and Challenges of Dashboard Users",
"doi": null,
"abstractUrl": "/magazine/cg/2023/01/09656613/1zumu8nC20U",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10057010",
"articleId": "1La0xnHKuAM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10057483",
"articleId": "1LbFmZlZK24",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1LdkjxiZML6",
"name": "ttg555501-010057994s1-tvcg-3251344-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010057994s1-tvcg-3251344-mm.zip",
"extension": "zip",
"size": "6.27 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1Krcdxh8rDO",
"title": "Jan.-Feb.",
"year": "2023",
"issueNum": "01",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "43",
"label": "Jan.-Feb.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1zumu8nC20U",
"doi": "10.1109/MCG.2021.3136545",
"abstract": "Dashboards are the ubiquitous means of data communication within organizations. Yet we have limited understanding of how they factor into data practices in the workplace, particularly for data workers who do not self-identify as professional analysts. We focus on data workers who use dashboards as a primary interface to data, reporting on an interview study that characterizes their data practices and the accompanying barriers to seamless data interaction. While dashboards are typically designed for data consumption, our findings show that dashboard users have far more diverse needs. To capture these activities, we frame data workers' practices as data conversations: conversations with data capture classic analysis (asking and answering data questions), while conversations through and around data involve constructing representations and narratives for sharing and communication. Dashboard users faced substantial barriers in their data conversations: their engagement with data was often intermittent, dependent on experts, and involved an awkward assembly of tools. We challenge the visualization and analytics community to embrace dashboard users as a population and design tools that blend seamlessly into their work contexts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dashboards are the ubiquitous means of data communication within organizations. Yet we have limited understanding of how they factor into data practices in the workplace, particularly for data workers who do not self-identify as professional analysts. We focus on data workers who use dashboards as a primary interface to data, reporting on an interview study that characterizes their data practices and the accompanying barriers to seamless data interaction. While dashboards are typically designed for data consumption, our findings show that dashboard users have far more diverse needs. To capture these activities, we frame data workers' practices as data conversations: conversations with data capture classic analysis (asking and answering data questions), while conversations through and around data involve constructing representations and narratives for sharing and communication. Dashboard users faced substantial barriers in their data conversations: their engagement with data was often intermittent, dependent on experts, and involved an awkward assembly of tools. We challenge the visualization and analytics community to embrace dashboard users as a population and design tools that blend seamlessly into their work contexts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dashboards are the ubiquitous means of data communication within organizations. Yet we have limited understanding of how they factor into data practices in the workplace, particularly for data workers who do not self-identify as professional analysts. We focus on data workers who use dashboards as a primary interface to data, reporting on an interview study that characterizes their data practices and the accompanying barriers to seamless data interaction. While dashboards are typically designed for data consumption, our findings show that dashboard users have far more diverse needs. To capture these activities, we frame data workers' practices as data conversations: conversations with data capture classic analysis (asking and answering data questions), while conversations through and around data involve constructing representations and narratives for sharing and communication. Dashboard users faced substantial barriers in their data conversations: their engagement with data was often intermittent, dependent on experts, and involved an awkward assembly of tools. We challenge the visualization and analytics community to embrace dashboard users as a population and design tools that blend seamlessly into their work contexts.",
"title": "Finding Their Data Voice: Practices and Challenges of Dashboard Users",
"normalizedTitle": "Finding Their Data Voice: Practices and Challenges of Dashboard Users",
"fno": "09656613",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Data Privacy",
"Data Visualisation",
"Mobile Computing",
"Personnel",
"Professional Aspects",
"User Interfaces",
"Dashboard Users",
"Dashboards",
"Data Capture Classic Analysis",
"Data Communication",
"Data Consumption",
"Data Conversations",
"Data Practices",
"Data Questions",
"Data Voice",
"Frame Data Workers",
"Seamless Data Interaction",
"Oral Communication",
"Social Factors",
"Documentation",
"Data Visualization",
"Statistics",
"Design Tools",
"Data Communication"
],
"authors": [
{
"givenName": "Melanie",
"surname": "Tory",
"fullName": "Melanie Tory",
"affiliation": "Northeastern University, Portland, ME, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lyn",
"surname": "Bartram",
"fullName": "Lyn Bartram",
"affiliation": "Simon Fraser University, Surrey, BC, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Brittany",
"surname": "Fiore-Gartland",
"fullName": "Brittany Fiore-Gartland",
"affiliation": "Tableau Software, Seattle, WA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anamaria",
"surname": "Crisan",
"fullName": "Anamaria Crisan",
"affiliation": "Tableau Research, Seattle, WA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "mags",
"pages": "22-36",
"year": "2023",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ipdpsw/2016/3682/0/3682b106",
"title": "Re-Examining HPC Energy Efficiency Dashboard Elements",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2016/3682b106/12OmNwKGArR",
"parentPublication": {
"id": "proceedings/ipdpsw/2016/3682/0",
"title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2014/2736/0/06815180",
"title": "The mobile manufacturing dashboard",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2014/06815180/12OmNxw5B1Z",
"parentPublication": {
"id": "proceedings/percomw/2014/2736/0",
"title": "2014 IEEE International Conference on Pervasive Computing and Communication Workshops (PERCOM WORKSHOPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2017/01/07542151",
"title": "Perceiving Learning at a Glance: A Systematic Literature Review of Learning Dashboard Research",
"doi": null,
"abstractUrl": "/journal/lt/2017/01/07542151/13rRUwdIOWV",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a152",
"title": "Examining of Learners’ Dashboard Interaction in Computer Classification Testing Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a152/1FUUfw7K8BG",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903550",
"title": "Dashboard Design Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903550/1GZolSVvsPu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904866",
"title": "Visualization Design Practices in a Crisis: Behind the Scenes with COVID-19 Dashboard Creators",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904866/1H2llxba9ws",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09906971",
"title": "DashBot: Insight-Driven Dashboard Generation Based on Deep Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09906971/1H5EWMQX9ZK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09911200",
"title": "MEDLEY: Intent-based Recommendations to Support Dashboard Composition<sc/>",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09911200/1Hcjm0PMkgw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10057994",
"title": "Dashboard Design Mining and Recommendation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10057994/1LbFmG2HHnW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09035622",
"title": "LADV: Deep Learning Assisted Authoring of Dashboard Visualizations From Images and Sketches",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09035622/1iaeAO11H6o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09708430",
"articleId": "1AR0uN8gBW0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10035722",
"articleId": "1KrcfQxslSE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1KrcgnuUDh6",
"name": "mcg202301-09656613s1-supp1-3136545.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/mcg202301-09656613s1-supp1-3136545.pdf",
"extension": "pdf",
"size": "97.4 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwIF69i",
"doi": "10.1109/TVCG.2012.246",
"abstract": "The Editor-in-Chief introduces the December 2012 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). It contains 96 papers, including all papers presented at the IEEE Scientific Visualization Conference (Vis) and the IEEE Information Visualization Conference (InfoVis), and the 10 best papers presented at the IEEE Conference on Visual Analytics Science and Technolgy (VAST), in Seattle, Washington, USA, from 14-19 October 2012. These papers that were recommended for acceptance by the program committee of these three conferences, after having undergone a rigorous two-round review process, are published in this issue. This special issue is the culmination of the ongoing partnership between TVCG and IEEE VisWeek. The goal of this cooperation between the IEEE Computer Society and the IEEE Visualization and Graphics Technical Committee (VGTC) is to introduce many high-quality research results from the world's top visualization conferences to TVCG’s readership, while improving the overall quality and visibility of conference publications through a rigorous journal-style review. This special issue continues to demonstrate that this objective has been achieved. With a similar motivation, the authors of 23 TVCG regular papers were invited to give an oral presentation of their recent work at IEEE VisWeek 2012. This arrangement provides a unique opportunity for the VisWeek audience to keep abreast of high-quality visualization research featured in TVCG, while encouraging more TVCG authors to attend VisWeek. Ultimately, this closely coupled relationship between TVCG and VisWeek should lead to a more timely exchange of new ideas, foster rapid dissemination of recent works via an integrated forum for both publications and presentations, and further expand and grow our community.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Editor-in-Chief introduces the December 2012 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). It contains 96 papers, including all papers presented at the IEEE Scientific Visualization Conference (Vis) and the IEEE Information Visualization Conference (InfoVis), and the 10 best papers presented at the IEEE Conference on Visual Analytics Science and Technolgy (VAST), in Seattle, Washington, USA, from 14-19 October 2012. These papers that were recommended for acceptance by the program committee of these three conferences, after having undergone a rigorous two-round review process, are published in this issue. This special issue is the culmination of the ongoing partnership between TVCG and IEEE VisWeek. The goal of this cooperation between the IEEE Computer Society and the IEEE Visualization and Graphics Technical Committee (VGTC) is to introduce many high-quality research results from the world's top visualization conferences to TVCG’s readership, while improving the overall quality and visibility of conference publications through a rigorous journal-style review. This special issue continues to demonstrate that this objective has been achieved. With a similar motivation, the authors of 23 TVCG regular papers were invited to give an oral presentation of their recent work at IEEE VisWeek 2012. This arrangement provides a unique opportunity for the VisWeek audience to keep abreast of high-quality visualization research featured in TVCG, while encouraging more TVCG authors to attend VisWeek. Ultimately, this closely coupled relationship between TVCG and VisWeek should lead to a more timely exchange of new ideas, foster rapid dissemination of recent works via an integrated forum for both publications and presentations, and further expand and grow our community.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Editor-in-Chief introduces the December 2012 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). It contains 96 papers, including all papers presented at the IEEE Scientific Visualization Conference (Vis) and the IEEE Information Visualization Conference (InfoVis), and the 10 best papers presented at the IEEE Conference on Visual Analytics Science and Technolgy (VAST), in Seattle, Washington, USA, from 14-19 October 2012. These papers that were recommended for acceptance by the program committee of these three conferences, after having undergone a rigorous two-round review process, are published in this issue. This special issue is the culmination of the ongoing partnership between TVCG and IEEE VisWeek. The goal of this cooperation between the IEEE Computer Society and the IEEE Visualization and Graphics Technical Committee (VGTC) is to introduce many high-quality research results from the world's top visualization conferences to TVCG’s readership, while improving the overall quality and visibility of conference publications through a rigorous journal-style review. This special issue continues to demonstrate that this objective has been achieved. With a similar motivation, the authors of 23 TVCG regular papers were invited to give an oral presentation of their recent work at IEEE VisWeek 2012. This arrangement provides a unique opportunity for the VisWeek audience to keep abreast of high-quality visualization research featured in TVCG, while encouraging more TVCG authors to attend VisWeek. Ultimately, this closely coupled relationship between TVCG and VisWeek should lead to a more timely exchange of new ideas, foster rapid dissemination of recent works via an integrated forum for both publications and presentations, and further expand and grow our community.",
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "ttg20121200ix",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Ming",
"surname": "Lin",
"fullName": "Ming Lin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "ix-ix",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/01/ttg2014010001",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010001/13rRUEgarsI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935055",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935055/13rRUwh80He",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514109",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514109/14M3E12c6Eo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09663062",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09663062/1zBaC3IZK9y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201212000i",
"articleId": "13rRUx0gefl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201212000x",
"articleId": "13rRUxYIN49",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwpGgK8",
"title": "Dec.",
"year": "2014",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwh80He",
"doi": "10.1109/TVCG.2014.2346658",
"abstract": "I am pleased to introduce the December 2014 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). With mixed emotions, I am writing my last editorial for IEEE VIS and ending my tenure as the EIC of IEEE TVCG. Selected from a record high number of 480 submissions, this special issue contains 111 papers presented at the 2014 IEEE VIS, including the IEEE Conference on Visual Analytics Science and Technolgy (VAST), the IEEE Information Visualization Conference (InfoVis), and the IEEE Scientific Visualization Conference (SciVis) in Paris, France from 9-14 November 2014. These papers that were recommended for acceptance by the program committees of these three conferences, after having undergone a rigorous two-round review process, are published in this issue.",
"abstracts": [
{
"abstractType": "Regular",
"content": "I am pleased to introduce the December 2014 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). With mixed emotions, I am writing my last editorial for IEEE VIS and ending my tenure as the EIC of IEEE TVCG. Selected from a record high number of 480 submissions, this special issue contains 111 papers presented at the 2014 IEEE VIS, including the IEEE Conference on Visual Analytics Science and Technolgy (VAST), the IEEE Information Visualization Conference (InfoVis), and the IEEE Scientific Visualization Conference (SciVis) in Paris, France from 9-14 November 2014. These papers that were recommended for acceptance by the program committees of these three conferences, after having undergone a rigorous two-round review process, are published in this issue.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "I am pleased to introduce the December 2014 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). With mixed emotions, I am writing my last editorial for IEEE VIS and ending my tenure as the EIC of IEEE TVCG. Selected from a record high number of 480 submissions, this special issue contains 111 papers presented at the 2014 IEEE VIS, including the IEEE Conference on Visual Analytics Science and Technolgy (VAST), the IEEE Information Visualization Conference (InfoVis), and the IEEE Scientific Visualization Conference (SciVis) in Paris, France from 9-14 November 2014. These papers that were recommended for acceptance by the program committees of these three conferences, after having undergone a rigorous two-round review process, are published in this issue.",
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "06935055",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2014-12-01 00:00:00",
"pubType": "trans",
"pages": "x-x",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2016/01/07423841",
"title": "Message From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2016/01/07423841/13rRUwgQpvG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514109",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514109/14M3E12c6Eo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09663062",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09663062/1zBaC3IZK9y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06935069",
"articleId": "13rRUyYjK5j",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06935059",
"articleId": "13rRUxBa564",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa56a",
"doi": "10.1109/TVCG.2017.2738998",
"abstract": "Welcome the November 2017 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>. This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Nantes, France, from September 9 to September 13, 2017.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Welcome the November 2017 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>. This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Nantes, France, from September 9 to September 13, 2017.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Welcome the November 2017 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Nantes, France, from September 9 to September 13, 2017.",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"fno": "08053887",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Augmented Reality"
],
"authors": [
{
"givenName": "Leila",
"surname": "De Floriani",
"fullName": "Leila De Floriani",
"affiliation": "University of Maryland, College Park, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2365-2365",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09052628",
"title": "Introducing the IEEE Virtual Reality 2020 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09052628/1iFLKo4ODvO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "08007327",
"articleId": "13rRUyft7D7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvAiSp1",
"title": "Nov.",
"year": "2018",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "14M3E12c6Eo",
"doi": "10.1109/TVCG.2018.2868403",
"abstract": "Wwelcome to the November 2018 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Munich, Germany, from October 16 to October 20, 2018.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wwelcome to the November 2018 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Munich, Germany, from October 16 to October 20, 2018.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wwelcome to the November 2018 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Munich, Germany, from October 16 to October 20, 2018.",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"fno": "08514109",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Leila",
"surname": "De Floriani",
"fullName": "Leila De Floriani",
"affiliation": "University of Maryland at College Park, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2018-11-01 00:00:00",
"pubType": "trans",
"pages": "2843",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935055",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935055/13rRUwh80He",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514064",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514064/14M3DZSFbS8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699218",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699218/19F1TteG3QI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "08514064",
"articleId": "14M3DZSFbS8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1HGJ6XQen96",
"title": "Nov.",
"year": "2022",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HGJm87UJvq",
"doi": "10.1109/TVCG.2022.3203810",
"abstract": "Welcome to the November 2022 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference took place in Singapore from October 17-22,2022 in hybrid mode.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Welcome to the November 2022 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference took place in Singapore from October 17-22,2022 in hybrid mode.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Welcome to the November 2022 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference took place in Singapore from October 17-22,2022 in hybrid mode.",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"fno": "09927195",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Stony Brook University (State University of New York), USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug",
"surname": "Bowman",
"fullName": "Doug Bowman",
"affiliation": "Virginia Tech, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "v-v",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935055",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935055/13rRUwh80He",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514109",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514109/14M3E12c6Eo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z020",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z020/1pysy7gKfLO",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09930682",
"articleId": "1HMP0lH8r9m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09927176",
"articleId": "1HGJ8mlD3S8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNy5hRch",
"title": "Nov.",
"year": "2019",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1dNHm0Dq8lG",
"doi": "10.1109/TVCG.2019.2934698",
"abstract": "Welcome to the November 2019 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics</italic> (<italic>TVCG</italic>). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Beijing, China from October 14 to October 18, 2019.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Welcome to the November 2019 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics</italic> (<italic>TVCG</italic>). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Beijing, China from October 14 to October 18, 2019.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Welcome to the November 2019 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), held this year in Beijing, China from October 14 to October 18, 2019.",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"fno": "08855103",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Augmented Reality",
"Rendering Computer Graphics",
"Haptic Interfaces",
"Special Issues And Sections",
"Production"
],
"authors": [
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "EIC IEEE TVCG, Stony Brook University, State University of New York, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug",
"surname": "Bowman",
"fullName": "Doug Bowman",
"affiliation": "EIC IEEE TVCG, Stony Brook University, State University of New York, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2019-11-01 00:00:00",
"pubType": "trans",
"pages": "3049-3049",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514109",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514109/14M3E12c6Eo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "08855105",
"articleId": "1dNHma690d2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNBBhN8N",
"title": "Dec.",
"year": "2020",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1oDXLUaRaDK",
"doi": "10.1109/TVCG.2020.3021811",
"abstract": "Welcome to the December 2020 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>. This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference was scheduled to take place in Recife, Porto de Galinhas (Brazil) but was moved to a virtual event due to the global coronavirus pandemic. This virtual conference took place from November 9-13, 2020.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Welcome to the December 2020 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>. This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference was scheduled to take place in Recife, Porto de Galinhas (Brazil) but was moved to a virtual event due to the global coronavirus pandemic. This virtual conference took place from November 9-13, 2020.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Welcome to the December 2020 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This issue contains selected papers accepted at the IEEE International Symposium on Mixed and Augmented Reality (ISMAR). The conference was scheduled to take place in Recife, Porto de Galinhas (Brazil) but was moved to a virtual event due to the global coronavirus pandemic. This virtual conference took place from November 9-13, 2020.",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"fno": "09254193",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Stony Brook University, State University of New York, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug",
"surname": "Bowman",
"fullName": "Doug Bowman",
"affiliation": "Virginia Tech, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2020-12-01 00:00:00",
"pubType": "trans",
"pages": "3386-3386",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "letters/ca/2013/01/lca2013010002",
"title": "A Message from the New Editor-in-Chief and Introduction of New Associate Editors",
"doi": null,
"abstractUrl": "/journal/ca/2013/01/lca2013010002/13rRUxbCbkD",
"parentPublication": {
"id": "letters/ca",
"title": "IEEE Computer Architecture Letters",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514109",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514109/14M3E12c6Eo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z022",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z022/1pysusxMUiQ",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z020",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z020/1pysy7gKfLO",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09663062",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09663062/1zBaC3IZK9y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "09254194",
"articleId": "1oDXMHvn1aU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1y2FkV9ZFKM",
"title": "Nov.",
"year": "2021",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1y2FvGMxBuM",
"doi": "10.1109/TVCG.2021.3110544",
"abstract": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>, we are pleased to present the journal papers from the 20th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2021), which will be held as a virtual conference between October 4 and 8, 2021. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>, we are pleased to present the journal papers from the 20th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2021), which will be held as a virtual conference between October 4 and 8, 2021. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the journal papers from the 20th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2021), which will be held as a virtual conference between October 4 and 8, 2021. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"normalizedTitle": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"fno": "09591492",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Augmented Reality",
"Meetings"
],
"authors": [
{
"givenName": "Daisuke",
"surname": "Iwai",
"fullName": "Daisuke Iwai",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guillaume",
"surname": "Moreau",
"fullName": "Guillaume Moreau",
"affiliation": "IMT Atlantique, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Denis",
"surname": "Kalkofen",
"fullName": "Denis Kalkofen",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tabitha",
"surname": "Peck",
"fullName": "Tabitha Peck",
"affiliation": "Davidson College, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2021-11-01 00:00:00",
"pubType": "trans",
"pages": "4086-4086",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514064",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514064/14M3DZSFbS8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699218",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699218/19F1TteG3QI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z020",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z020/1pysy7gKfLO",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405571",
"title": "Introducing the IEEE Virtual Reality 2021 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/09430173",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2021",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/09430173/1tzuiF6azcs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09591457",
"articleId": "1y2Fxh3IZDG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09523830",
"articleId": "1wpqs1dtKes",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1zBaC3IZK9y",
"doi": "10.1109/TVCG.2021.3114914",
"abstract": "Welcome to the January 2022 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG).</italic> This is traditionally the time of the year when we feature the IEEE VIS special issue and this year is no different. But there is one significant difference — IEEE VIS is now fully unified. There is no more IEEE VAST, IEEE InfoVis and IEEE SciVis; there is just IEEE VIS, tiled into six overarching areas: Applications (24 papers), Analytics & Decisions (19), Theoretical & Empirical (24), Representations & Interaction (19), Data Transformations (14) and Systems & Rendering (11). The conference was scheduled to take place in New Orleans (LA) but was moved to a virtual event due to the global coronavirus pandemic. This virtual conference took place from October 24-29, 2021. Contained in this special issue are the top 110 papers selected by the unified program committee from a total of 441 submissions. In addition, this issue also contains the Best Paper of the 2021 IEEE Large Scale Data Analysis and Visualization (LDAV) Symposium and the Best Paper of the 2021 IEEE Symposium on Visualization for Cyber Security (VizSec).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Welcome to the January 2022 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG).</italic> This is traditionally the time of the year when we feature the IEEE VIS special issue and this year is no different. But there is one significant difference — IEEE VIS is now fully unified. There is no more IEEE VAST, IEEE InfoVis and IEEE SciVis; there is just IEEE VIS, tiled into six overarching areas: Applications (24 papers), Analytics & Decisions (19), Theoretical & Empirical (24), Representations & Interaction (19), Data Transformations (14) and Systems & Rendering (11). The conference was scheduled to take place in New Orleans (LA) but was moved to a virtual event due to the global coronavirus pandemic. This virtual conference took place from October 24-29, 2021. Contained in this special issue are the top 110 papers selected by the unified program committee from a total of 441 submissions. In addition, this issue also contains the Best Paper of the 2021 IEEE Large Scale Data Analysis and Visualization (LDAV) Symposium and the Best Paper of the 2021 IEEE Symposium on Visualization for Cyber Security (VizSec).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Welcome to the January 2022 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG). This is traditionally the time of the year when we feature the IEEE VIS special issue and this year is no different. But there is one significant difference — IEEE VIS is now fully unified. There is no more IEEE VAST, IEEE InfoVis and IEEE SciVis; there is just IEEE VIS, tiled into six overarching areas: Applications (24 papers), Analytics & Decisions (19), Theoretical & Empirical (24), Representations & Interaction (19), Data Transformations (14) and Systems & Rendering (11). The conference was scheduled to take place in New Orleans (LA) but was moved to a virtual event due to the global coronavirus pandemic. This virtual conference took place from October 24-29, 2021. Contained in this special issue are the top 110 papers selected by the unified program committee from a total of 441 submissions. In addition, this issue also contains the Best Paper of the 2021 IEEE Large Scale Data Analysis and Visualization (LDAV) Symposium and the Best Paper of the 2021 IEEE Symposium on Visualization for Cyber Security (VizSec).",
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "09663062",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Stony Brook University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "xii-xii",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2016/01/07423841",
"title": "Message From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2016/01/07423841/13rRUwgQpvG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935055",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935055/13rRUwh80He",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514109",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514109/14M3E12c6Eo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09663061",
"title": "Preface",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09663061/1zBb8giCGEU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09663068",
"articleId": "1zBb6esE5tC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09663063",
"articleId": "1zBaynLGiHe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1zBb8giCGEU",
"doi": "10.1109/TVCG.2021.3114891",
"abstract": "This February 2022 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic> contains the proceedings of IEEE VIS 2021, held online on October 24-29, 2021, with General Chairs from Tulane University and Universidade de Sao Paulo. With IEEE VIS 2021, the conference series is in its 32nd year.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This February 2022 issue of the <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic> contains the proceedings of IEEE VIS 2021, held online on October 24-29, 2021, with General Chairs from Tulane University and Universidade de Sao Paulo. With IEEE VIS 2021, the conference series is in its 32nd year.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This February 2022 issue of the IEEE Transactions on Visualization and Computer Graphics (TVCG) contains the proceedings of IEEE VIS 2021, held online on October 24-29, 2021, with General Chairs from Tulane University and Universidade de Sao Paulo. With IEEE VIS 2021, the conference series is in its 32nd year.",
"title": "Preface",
"normalizedTitle": "Preface",
"fno": "09663061",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Bongshin",
"surname": "Lee",
"fullName": "Bongshin Lee",
"affiliation": "Microsoft Research, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Silvia",
"surname": "Miksch",
"fullName": "Silvia Miksch",
"affiliation": "Vienna University of Technology (TU Wien), Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anders",
"surname": "Ynnerman",
"fullName": "Anders Ynnerman",
"affiliation": "Linköping University, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anastasia",
"surname": "Bezerianos",
"fullName": "Anastasia Bezerianos",
"affiliation": "Univ. Paris-Saclay, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Chen",
"fullName": "Jian Chen",
"affiliation": "The Ohio State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christopher",
"surname": "Collins",
"fullName": "Christopher Collins",
"affiliation": "Ontario Tech University, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Gleicher",
"fullName": "Michael Gleicher",
"affiliation": "University of Wisconsin - Madison, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eduard",
"surname": "Gröller",
"fullName": "Eduard Gröller",
"affiliation": "TU Wien, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Lex",
"fullName": "Alexander Lex",
"affiliation": "University of Utah, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bernhard",
"surname": "Preim",
"fullName": "Bernhard Preim",
"affiliation": "Otto-von-Guericke-University of Magdeburg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jinwook",
"surname": "Seo",
"fullName": "Jinwook Seo",
"affiliation": "Seoul National University, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ruediger",
"surname": "Westermann",
"fullName": "Ruediger Westermann",
"affiliation": "Technical University of Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Yang",
"fullName": "Jing Yang",
"affiliation": "University of North Carolina at Charlotte, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoru",
"surname": "Yuan",
"fullName": "Xiaoru Yuan",
"affiliation": "Peking University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "The Ohio State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean-Daniel",
"surname": "Fekete",
"fullName": "Jean-Daniel Fekete",
"affiliation": "Inria, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shixia",
"surname": "Liu",
"fullName": "Shixia Liu",
"affiliation": "Tsinghua University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "xiv-xxiii",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2019/01/08570935",
"title": "Preface",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08570935/17D45XzbnJm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08930151",
"title": "Preface",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08930151/1fEi0WcYUXS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09340112",
"title": "Preface",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09340112/1qMK1vPtB7i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405571",
"title": "Introducing the IEEE Virtual Reality 2021 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591457",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591457/1y2Fxh3IZDG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis4good/2021/1366/0/136600z006",
"title": "VIS4Good 2021 [Workshop Description and Committees]",
"doi": null,
"abstractUrl": "/proceedings-article/vis4good/2021/136600z006/1yNiRDWGgcE",
"parentPublication": {
"id": "proceedings/vis4good/2021/1366/0",
"title": "2021 IEEE Workshop on Visualization for Social Good (VIS4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09663062",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09663062/1zBaC3IZK9y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09663063",
"title": "Welcome",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09663063/1zBaynLGiHe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2021/4261/0/09635338",
"title": "Preface",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2021/09635338/1zmvltGBJ72",
"parentPublication": {
"id": "proceedings/bibe/2021/4261/0",
"title": "2021 IEEE 21st International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09663063",
"articleId": "1zBaynLGiHe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09663069",
"articleId": "1zBazUF67Is",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvAiSp1",
"title": "Nov.",
"year": "2018",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "14M3DZSFbS8",
"doi": "10.1109/TVCG.2018.2870548",
"abstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the TVCG papers from the 17th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2018), held October 16–20 in Munich, Germany. ISMAR continues the 20-year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for mixed and augmented reality in the world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the TVCG papers from the 17th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2018), held October 16–20 in Munich, Germany. ISMAR continues the 20-year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for mixed and augmented reality in the world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the TVCG papers from the 17th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2018), held October 16–20 in Munich, Germany. ISMAR continues the 20-year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for mixed and augmented reality in the world.",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"normalizedTitle": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"fno": "08514064",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "David",
"surname": "Chu",
"fullName": "David Chu",
"affiliation": "Google, US",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joseph L.",
"surname": "Gabbard",
"fullName": "Joseph L. Gabbard",
"affiliation": "Virginia Tech, US",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jens",
"surname": "Grubert",
"fullName": "Jens Grubert",
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Holger",
"surname": "Regenbrecht",
"fullName": "Holger Regenbrecht",
"affiliation": "University of Otago, New Zealand",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2018-11-01 00:00:00",
"pubType": "trans",
"pages": "2844-2845",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671748",
"title": "Program chairs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671748/12OmNCmGNMp",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643536",
"title": "From the Science & Technology program chairs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643536/12OmNrHSD0H",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402518",
"title": "Preface",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402518/12OmNx1qV2x",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699218",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699218/19F1TteG3QI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z022",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z022/1pysusxMUiQ",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z020",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z020/1pysy7gKfLO",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08514109",
"articleId": "14M3E12c6Eo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08493594",
"articleId": "14M3DYV3qyA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1CpcG1DISYM",
"title": "May",
"year": "2022",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Cpd7Bwusk8",
"doi": "10.1109/TVCG.2022.3156766",
"abstract": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics</italic> (TVCG), we are pleased to present a subset of papers from the 29th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2022), held virtually March 12-16, 2022, in Christchurch, New Zealand.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics</italic> (TVCG), we are pleased to present a subset of papers from the 29th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2022), held virtually March 12-16, 2022, in Christchurch, New Zealand.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present a subset of papers from the 29th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2022), held virtually March 12-16, 2022, in Christchurch, New Zealand.",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"normalizedTitle": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"fno": "09754286",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Visualization",
"Computer Graphics"
],
"authors": [
{
"givenName": "Luciana",
"surname": "Nedel",
"fullName": "Luciana Nedel",
"affiliation": "Federal University of Rio Grande do Sul - UFRGS, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ferran",
"surname": "Argelaguet",
"fullName": "Ferran Argelaguet",
"affiliation": "Inria Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lili",
"surname": "Wang",
"fullName": "Lili Wang",
"affiliation": "Beihang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jeanine",
"surname": "Stefannuci",
"fullName": "Jeanine Stefannuci",
"affiliation": "University of Utah, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daisuke",
"surname": "Iwai",
"fullName": "Daisuke Iwai",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "05",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "vii-vii",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/04/ttg2013040000vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040000vi/13rRUILtJma",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/07/ttg2013071076",
"title": "Guest Editors' Introduction: Special Section on the IEEE Conference on Visual Analytics Science and Technology (VAST)",
"doi": null,
"abstractUrl": "/journal/tg/2013/07/ttg2013071076/13rRUxOdD2D",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754285",
"title": "IEEE VR 2022 Introducing the Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754285/1CpcIar9LS8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/06/09766260",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2022",
"doi": null,
"abstractUrl": "/journal/tg/2022/06/09766260/1D34QjpFGyQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09052628",
"title": "Introducing the IEEE Virtual Reality 2020 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09052628/1iFLKo4ODvO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405571",
"title": "Introducing the IEEE Virtual Reality 2021 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09754285",
"articleId": "1CpcIar9LS8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09754291",
"articleId": "1Cpd19CezAI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1HGJ6XQen96",
"title": "Nov.",
"year": "2022",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HGJ8mlD3S8",
"doi": "10.1109/TVCG.2022.3203811",
"abstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the journal papers from the 21st IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2022), which will be held as a hybrid conference between October 17 and 21, 2022 in Singapore. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the journal papers from the 21st IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2022), which will be held as a hybrid conference between October 17 and 21, 2022 in Singapore. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the journal papers from the 21st IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2022), which will be held as a hybrid conference between October 17 and 21, 2022 in Singapore. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is the premier conference for Mixed and Augmented Reality in the world.",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"normalizedTitle": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"fno": "09927176",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Augmented Reality"
],
"authors": [
{
"givenName": "Daisuke",
"surname": "Iwai",
"fullName": "Daisuke Iwai",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joseph L.",
"surname": "Gabbard",
"fullName": "Joseph L. Gabbard",
"affiliation": "Virginia Tech, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guillaume",
"surname": "Moreau",
"fullName": "Guillaume Moreau",
"affiliation": "IMT Atlantique, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lili",
"surname": "Wang",
"fullName": "Lili Wang",
"affiliation": "Beihang University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2015/05/07067526",
"title": "Guest Editor's Introduction to the Special Section on the International Symposium on Mixed and Augmented Reality 2013",
"doi": null,
"abstractUrl": "/journal/tg/2015/05/07067526/13rRUwwaKta",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514064",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514064/14M3DZSFbS8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699218",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699218/19F1TteG3QI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/06/09766260",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2022",
"doi": null,
"abstractUrl": "/journal/tg/2022/06/09766260/1D34QjpFGyQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z020",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z020/1pysy7gKfLO",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09927195",
"articleId": "1HGJm87UJvq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09930681",
"articleId": "1HMP0v71jI4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNy5hRch",
"title": "Nov.",
"year": "2019",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1dNHma690d2",
"doi": "10.1109/TVCG.2019.2934813",
"abstract": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>, we are pleased to present the <italic>TVCG</italic> papers from the 18th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2019), held October 14–18 in Beijing, China. ISMAR continues the over 20-year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for mixed and augmented reality in the world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>, we are pleased to present the <italic>TVCG</italic> papers from the 18th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2019), held October 14–18 in Beijing, China. ISMAR continues the over 20-year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for mixed and augmented reality in the world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the TVCG papers from the 18th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2019), held October 14–18 in Beijing, China. ISMAR continues the over 20-year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for mixed and augmented reality in the world.",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"normalizedTitle": "Message from the ISMAR 2019 Science and Technology Program Chairs and TVCG Guest Editors",
"fno": "08855105",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Computer Graphics",
"Visualization",
"Meetings"
],
"authors": [
{
"givenName": "Joseph L.",
"surname": "Gabbard",
"fullName": "Joseph L. Gabbard",
"affiliation": "Virginia Tech, US",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jens",
"surname": "Grubert",
"fullName": "Jens Grubert",
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shimin",
"surname": "Hu",
"fullName": "Shimin Hu",
"affiliation": "Tsinghua University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefanie",
"surname": "Zollmann",
"fullName": "Stefanie Zollmann",
"affiliation": "University of Otago, New Zealand",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2019-11-01 00:00:00",
"pubType": "trans",
"pages": "3050-3051",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08514064",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08514064/14M3DZSFbS8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699218",
"title": "Message from the ISMAR 2018 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699218/19F1TteG3QI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z020",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z020/1pysy7gKfLO",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08855103",
"articleId": "1dNHm0Dq8lG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08794595",
"articleId": "1dNHomd5th6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNBBhN8N",
"title": "Dec.",
"year": "2020",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1oDXMHvn1aU",
"doi": "10.1109/TVCG.2020.3021812",
"abstract": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>, we are pleased to present the <italic>TVCG</italic> papers from the 19th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2020), which had been originally planned to hold in Recife/Porto de Galinhas, Brazil. In order to preserve the safety and well-being of all participants under the global pandemic of COVID-19, ISMAR 2020 will be held as a virtual conference between November 9 and 13, 2020. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for Mixed and Augmented Reality in the world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this special issue of <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic>, we are pleased to present the <italic>TVCG</italic> papers from the 19th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2020), which had been originally planned to hold in Recife/Porto de Galinhas, Brazil. In order to preserve the safety and well-being of all participants under the global pandemic of COVID-19, ISMAR 2020 will be held as a virtual conference between November 9 and 13, 2020. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for Mixed and Augmented Reality in the world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the TVCG papers from the 19th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2020), which had been originally planned to hold in Recife/Porto de Galinhas, Brazil. In order to preserve the safety and well-being of all participants under the global pandemic of COVID-19, ISMAR 2020 will be held as a virtual conference between November 9 and 13, 2020. ISMAR continues the over twenty year long tradition of IWAR, ISMR, and ISAR, and is undoubtedly the premier conference for Mixed and Augmented Reality in the world.",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"normalizedTitle": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors",
"fno": "09254194",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Augmented Reality",
"Meetings",
"Virtual Reality"
],
"authors": [
{
"givenName": "Shi-Min",
"surname": "Hu",
"fullName": "Shi-Min Hu",
"affiliation": "Tsinghua University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Denis",
"surname": "Kalkofen",
"fullName": "Denis Kalkofen",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jonathan",
"surname": "Ventura",
"fullName": "Jonathan Ventura",
"affiliation": "California Polytechnic State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefanie",
"surname": "Zollmann",
"fullName": "Stefanie Zollmann",
"affiliation": "University of Otago, New Zealand",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2020-12-01 00:00:00",
"pubType": "trans",
"pages": "3387-3388",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09052628",
"title": "Introducing the IEEE Virtual Reality 2020 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09052628/1iFLKo4ODvO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09052630",
"title": "Preface",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09052630/1iFLLHpsBfW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800z020",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800z020/1pysy7gKfLO",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09254193",
"articleId": "1oDXLUaRaDK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09199570",
"articleId": "1ncgoC1SEMw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1sP18ke9Y64",
"title": "May",
"year": "2021",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1sP18PmVuQU",
"doi": "10.1109/TVCG.2021.3067811",
"abstract": "Welcome to the 10<sup>th</sup> <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic> special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 25 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2021), held fully virtual from March 27 to April 3, 2021.</p> <p>Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the <italic>IEEE TVCG</italic> VR special issue mark a major highlight of the year.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Welcome to the 10<sup>th</sup> <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG)</italic> special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 25 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2021), held fully virtual from March 27 to April 3, 2021.</p> <p>Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the <italic>IEEE TVCG</italic> VR special issue mark a major highlight of the year.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Welcome to the 10th IEEE Transactions on Visualization and Computer Graphics (TVCG) special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 25 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2021), held fully virtual from March 27 to April 3, 2021. Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the IEEE TVCG VR special issue mark a major highlight of the year.",
"title": "Introducing the IEEE Virtual Reality 2021 Special Issue",
"normalizedTitle": "Introducing the IEEE Virtual Reality 2021 Special Issue",
"fno": "09405571",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Virtual Realty",
"Meetings"
],
"authors": [
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Stony Brook University, USA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug",
"surname": "Bowman",
"fullName": "Doug Bowman",
"affiliation": "Virginia Tech, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "05",
"pubDate": "2021-05-01 00:00:00",
"pubType": "trans",
"pages": "iv-iv",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010001",
"title": "Guest Editor's Introduction Special Section on the Virtual Reality Conference (VR)",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010001/13rRUwIF6l4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676185",
"title": "Introducing the IEEE Virtual Reality 2019 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676185/18NkgxdV8sM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754285",
"title": "IEEE VR 2022 Introducing the Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754285/1CpcIar9LS8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09052628",
"title": "Introducing the IEEE Virtual Reality 2020 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09052628/1iFLKo4ODvO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09052630",
"title": "Preface",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09052630/1iFLLHpsBfW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09405522",
"articleId": "1sP1bNOvg64",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09405530",
"articleId": "1sP1eDRuGMU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwCsdFw",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tk",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1JrMyxvllsY",
"doi": "10.1109/TKDE.2022.3232482",
"abstract": "Multi-view partial multi-label learning (MVPML) aims to learn a multi-label predictive model from the training examples, each of which is presented by multiple feature vectors while associated with a set of candidate labels where only a subset is correct. Generally, existing techniques work simply by identifying the ground-truth label via aggregating the features from all views to train a final classifier, but ignore the cause of the incorrect labels in the candidate label sets, i.e., the diverse property of the representation from different views leads to the incorrect labels which form the candidate labels alone with the essential supervision. In this paper, a novel MVPML approach is proposed to learn the predictive model and the incorrect-labeling model jointly by incorporating the graph-fusion-based topological structure of the feature space. Specifically, the latent label distribution and the incorrect labels are identified simultaneously in a unified framework under the supervision of candidate labels. In addition, a common topological structure of the feature space from all views is learned via the graph fusion for further capturing the latent label distribution. Experimental results on the real-world datasets clearly validate the effectiveness of the proposed approach for solving multi-view partial multi-label learning problems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Multi-view partial multi-label learning (MVPML) aims to learn a multi-label predictive model from the training examples, each of which is presented by multiple feature vectors while associated with a set of candidate labels where only a subset is correct. Generally, existing techniques work simply by identifying the ground-truth label via aggregating the features from all views to train a final classifier, but ignore the cause of the incorrect labels in the candidate label sets, i.e., the diverse property of the representation from different views leads to the incorrect labels which form the candidate labels alone with the essential supervision. In this paper, a novel MVPML approach is proposed to learn the predictive model and the incorrect-labeling model jointly by incorporating the graph-fusion-based topological structure of the feature space. Specifically, the latent label distribution and the incorrect labels are identified simultaneously in a unified framework under the supervision of candidate labels. In addition, a common topological structure of the feature space from all views is learned via the graph fusion for further capturing the latent label distribution. Experimental results on the real-world datasets clearly validate the effectiveness of the proposed approach for solving multi-view partial multi-label learning problems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Multi-view partial multi-label learning (MVPML) aims to learn a multi-label predictive model from the training examples, each of which is presented by multiple feature vectors while associated with a set of candidate labels where only a subset is correct. Generally, existing techniques work simply by identifying the ground-truth label via aggregating the features from all views to train a final classifier, but ignore the cause of the incorrect labels in the candidate label sets, i.e., the diverse property of the representation from different views leads to the incorrect labels which form the candidate labels alone with the essential supervision. In this paper, a novel MVPML approach is proposed to learn the predictive model and the incorrect-labeling model jointly by incorporating the graph-fusion-based topological structure of the feature space. Specifically, the latent label distribution and the incorrect labels are identified simultaneously in a unified framework under the supervision of candidate labels. In addition, a common topological structure of the feature space from all views is learned via the graph fusion for further capturing the latent label distribution. Experimental results on the real-world datasets clearly validate the effectiveness of the proposed approach for solving multi-view partial multi-label learning problems.",
"title": "Multi-View Partial Multi-Label Learning via Graph-Fusion-Based Label Enhancement",
"normalizedTitle": "Multi-View Partial Multi-Label Learning via Graph-Fusion-Based Label Enhancement",
"fno": "09999508",
"hasPdf": true,
"idPrefix": "tk",
"keywords": [
"Predictive Models",
"Training",
"Noise Measurement",
"Task Analysis",
"Aggregates",
"Manifolds",
"Labeling",
"Label Enhancement",
"Label Distribution",
"Partial Multi Label Learning",
"Multi Label Learning",
"Label Ambiguity"
],
"authors": [
{
"givenName": "Ning",
"surname": "Xu",
"fullName": "Ning Xu",
"affiliation": "School of Computer Science and Engineering, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong-Di",
"surname": "Wu",
"fullName": "Yong-Di Wu",
"affiliation": "School of Computer Science and Engineering, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Congyu",
"surname": "Qiao",
"fullName": "Congyu Qiao",
"affiliation": "School of Computer Science and Engineering, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yi",
"surname": "Ren",
"fullName": "Yi Ren",
"affiliation": "Research Center for Healthcare Data Science, Zhejiang Laboratory, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Minxue",
"surname": "Zhang",
"fullName": "Minxue Zhang",
"affiliation": "School of Computer Science and Engineering, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Geng",
"fullName": "Xin Geng",
"affiliation": "School of Computer Science and Engineering, Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1041-4347",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tk/2017/10/07964762",
"title": "Disambiguation-Free Partial Label Learning",
"doi": null,
"abstractUrl": "/journal/tk/2017/10/07964762/13rRUIJcWlN",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2018/9159/0/08595001",
"title": "Feature-Induced Partial Multi-label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2018/08595001/17D45WHONq8",
"parentPublication": {
"id": "proceedings/icdm/2018/9159/0",
"title": "2018 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2021/2398/0/239800a926",
"title": "Few-Shot Partial Multi-Label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2021/239800a926/1Aqx1YNNFU4",
"parentPublication": {
"id": "proceedings/icdm/2021/2398/0",
"title": "2021 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2022/5099/0/509900a478",
"title": "Few-shot Partial Multi-label Learning with Data Augmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2022/509900a478/1KpCrNUGKHe",
"parentPublication": {
"id": "proceedings/icdm/2022/5099/0",
"title": "2022 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/02/08792183",
"title": "GM-PLL: Graph Matching Based Partial Label Learning",
"doi": null,
"abstractUrl": "/journal/tk/2021/02/08792183/1ckpmarL2RG",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/10/09057438",
"title": "Partial Multi-Label Learning via Credible Label Elicitation",
"doi": null,
"abstractUrl": "/journal/tp/2021/10/09057438/1iQafwEcU9i",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2020/8316/0/831600a691",
"title": "Semi-Supervised Partial Multi-label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2020/831600a691/1r54C5I9DJC",
"parentPublication": {
"id": "proceedings/icdm/2020/8316/0",
"title": "2020 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/07/09354590",
"title": "Partial Multi-Label Learning With Noisy Label Identification",
"doi": null,
"abstractUrl": "/journal/tp/2022/07/09354590/1reXib2cwWk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428103",
"title": "A Generative Model for Partial Label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428103/1uilNaDG2ys",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09573413",
"title": "Adaptive Graph Guided Disambiguation for Partial Label Learning",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09573413/1xH5E3Yjgek",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09999339",
"articleId": "1JqCjx4RA0U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09999496",
"articleId": "1JrMyKEQUak",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1JDoUOUHcg8",
"name": "ttk555501-09999508s1-supp1-3232482.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttk555501-09999508s1-supp1-3232482.pdf",
"extension": "pdf",
"size": "215 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1AH3vTZdct2",
"title": "March",
"year": "2022",
"issueNum": "03",
"idPrefix": "tp",
"pubType": "journal",
"volume": "44",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1mq8fINsglW",
"doi": "10.1109/TPAMI.2020.3017456",
"abstract": "With increasing data volumes, the bottleneck in obtaining data for training a given learning task is the cost of manually labeling instances within the data. To alleviate this issue, various reduced label settings have been considered including semi-supervised learning, partial- or incomplete-label learning, multiple-instance learning, and active learning. Here, we focus on multiple-instance multiple-label learning with missing bag labels. Little research has been done for this challenging yet potentially powerful variant of incomplete supervision learning. We introduce a novel discriminative probabilistic model for missing labels in multiple-instance multiple-label learning. To address inference challenges, we introduce an efficient implementation of the EM algorithm for the model. Additionally, we consider an alternative inference approach that relies on maximizing the label-wise marginal likelihood of the proposed model instead of the joint likelihood. Numerical experiments on benchmark datasets illustrate the robustness of the proposed approach. In particular, comparison to state-of-the-art methods shows that our approach introduces a significantly smaller decrease in performance when the proportion of missing labels is increased.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With increasing data volumes, the bottleneck in obtaining data for training a given learning task is the cost of manually labeling instances within the data. To alleviate this issue, various reduced label settings have been considered including semi-supervised learning, partial- or incomplete-label learning, multiple-instance learning, and active learning. Here, we focus on multiple-instance multiple-label learning with missing bag labels. Little research has been done for this challenging yet potentially powerful variant of incomplete supervision learning. We introduce a novel discriminative probabilistic model for missing labels in multiple-instance multiple-label learning. To address inference challenges, we introduce an efficient implementation of the EM algorithm for the model. Additionally, we consider an alternative inference approach that relies on maximizing the label-wise marginal likelihood of the proposed model instead of the joint likelihood. Numerical experiments on benchmark datasets illustrate the robustness of the proposed approach. In particular, comparison to state-of-the-art methods shows that our approach introduces a significantly smaller decrease in performance when the proportion of missing labels is increased.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With increasing data volumes, the bottleneck in obtaining data for training a given learning task is the cost of manually labeling instances within the data. To alleviate this issue, various reduced label settings have been considered including semi-supervised learning, partial- or incomplete-label learning, multiple-instance learning, and active learning. Here, we focus on multiple-instance multiple-label learning with missing bag labels. Little research has been done for this challenging yet potentially powerful variant of incomplete supervision learning. We introduce a novel discriminative probabilistic model for missing labels in multiple-instance multiple-label learning. To address inference challenges, we introduce an efficient implementation of the EM algorithm for the model. Additionally, we consider an alternative inference approach that relies on maximizing the label-wise marginal likelihood of the proposed model instead of the joint likelihood. Numerical experiments on benchmark datasets illustrate the robustness of the proposed approach. In particular, comparison to state-of-the-art methods shows that our approach introduces a significantly smaller decrease in performance when the proportion of missing labels is increased.",
"title": "Incomplete Label Multiple Instance Multiple Label Learning",
"normalizedTitle": "Incomplete Label Multiple Instance Multiple Label Learning",
"fno": "09171331",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"Expectation Maximisation Algorithm",
"Image Classification",
"Probability",
"Supervised Learning",
"Incomplete Label Multiple Instance Multiple Label Learning",
"Reduced Label Settings",
"Semisupervised Learning",
"Active Learning",
"Bag Labels",
"Incomplete Supervision Learning",
"Label Wise Marginal Likelihood",
"Discriminative Probabilistic Model",
"EM Algorithm",
"Alternative Inference Approach",
"Labeling",
"Training",
"Phase Locked Loops",
"Birds",
"Numerical Models",
"Graphical Models",
"Standards",
"Incomplete Label Learning",
"Learning With Missing Labels",
"Multiple Instance Multiple Label Learning",
"Multi Instance Multi Label Learning",
"Maximum Likelihood",
"Marginal Maximum Likelihood",
"EM Algorithm",
"Graphical Models",
"Probabilistic Models"
],
"authors": [
{
"givenName": "Tam",
"surname": "Nguyen",
"fullName": "Tam Nguyen",
"affiliation": "School of Electrical Engineering and Computer Science, Oregon State University, Corvallis, OR, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Raviv",
"surname": "Raich",
"fullName": "Raviv Raich",
"affiliation": "School of Electrical Engineering and Computer Science, Oregon State University, Corvallis, OR, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1320-1337",
"year": "2022",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209b964",
"title": "Multi-label Learning with Missing Labels",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b964/12OmNrGb2hX",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2014/4302/0/4302b067",
"title": "Learning Low-Rank Label Correlations for Multi-label Classification with Missing Labels",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2014/4302b067/12OmNvAAtKJ",
"parentPublication": {
"id": "proceedings/icdm/2014/4302/0",
"title": "2014 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607686",
"title": "Graph-based semi-supervised learning with multi-label",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607686/12OmNvUaNrW",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/11/08423669",
"title": "Fast Multi-Instance Multi-Label Learning",
"doi": null,
"abstractUrl": "/journal/tp/2019/11/08423669/13rRUx0xPjt",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/12/07807308",
"title": "Dynamic Programming for Instance Annotation in Multi-Instance Multi-Label Learning",
"doi": null,
"abstractUrl": "/journal/tp/2017/12/07807308/13rRUyuegil",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545329",
"title": "Learning with Latent Label Hierarchy from Incomplete Multi-Label Data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545329/17D45Wuc33q",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/2022/03/09699063",
"title": "Distributed Semisupervised Partial Label Learning Over Networks",
"doi": null,
"abstractUrl": "/journal/ai/2022/03/09699063/1ADJiwwfjtC",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/02/08792183",
"title": "GM-PLL: Graph Matching Based Partial Label Learning",
"doi": null,
"abstractUrl": "/journal/tk/2021/02/08792183/1ckpmarL2RG",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2019/4896/0/489600a689",
"title": "Improved Multi-view Multi-label Learning with Incomplete Views and Labels",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2019/489600a689/1gAwVDR6lG0",
"parentPublication": {
"id": "proceedings/icdmw/2019/4896/0",
"title": "2019 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/07/09354590",
"title": "Partial Multi-Label Learning With Noisy Label Identification",
"doi": null,
"abstractUrl": "/journal/tp/2022/07/09354590/1reXib2cwWk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09197621",
"articleId": "1n8WHJLfVqU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09194079",
"articleId": "1n0E5w0gYOQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1AH3z2M0OxW",
"name": "ttp202203-09171331s1-supp1-3017456.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttp202203-09171331s1-supp1-3017456.pdf",
"extension": "pdf",
"size": "819 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1DU9C1cnFPq",
"title": "July",
"year": "2022",
"issueNum": "07",
"idPrefix": "tp",
"pubType": "journal",
"volume": "44",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1reXib2cwWk",
"doi": "10.1109/TPAMI.2021.3059290",
"abstract": "Partial multi-label learning (PML) deals with problems where each instance is assigned with a candidate label set, which contains multiple relevant labels and some noisy labels. Recent studies usually solve PML problems with the disambiguation strategy, which recovers ground-truth labels from the candidate label set by simply assuming that the noisy labels are generated randomly. In real applications, however, noisy labels are usually caused by some ambiguous contents of the example. Based on this observation, we propose a partial multi-label learning approach to simultaneously recover the ground-truth information and identify the noisy labels. The two objectives are formalized in a unified framework with trace norm and <inline-formula><tex-math notation=\"LaTeX\">Z_$\\ell _1$_Z</tex-math></inline-formula> norm regularizers. Under the supervision of the observed noise-corrupted label matrix, the multi-label classifier and noisy label identifier are jointly optimized by incorporating the label correlation exploitation and feature-induced noise model. Furthermore, by mapping each bag to a feature vector, we extend PML-NI method into multi-instance multi-label learning by identifying noisy labels based on ambiguous instances. A theoretical analysis of generalization bound and extensive experiments on multiple data sets from various real-world tasks demonstrate the effectiveness of the proposed approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Partial multi-label learning (PML) deals with problems where each instance is assigned with a candidate label set, which contains multiple relevant labels and some noisy labels. Recent studies usually solve PML problems with the disambiguation strategy, which recovers ground-truth labels from the candidate label set by simply assuming that the noisy labels are generated randomly. In real applications, however, noisy labels are usually caused by some ambiguous contents of the example. Based on this observation, we propose a partial multi-label learning approach to simultaneously recover the ground-truth information and identify the noisy labels. The two objectives are formalized in a unified framework with trace norm and <inline-formula><tex-math notation=\"LaTeX\">$\\ell _1$</tex-math><alternatives><mml:math><mml:msub><mml:mi>ℓ</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:math><inline-graphic xlink:href=\"xie-ieq1-3059290.gif\"/></alternatives></inline-formula> norm regularizers. Under the supervision of the observed noise-corrupted label matrix, the multi-label classifier and noisy label identifier are jointly optimized by incorporating the label correlation exploitation and feature-induced noise model. Furthermore, by mapping each bag to a feature vector, we extend PML-NI method into multi-instance multi-label learning by identifying noisy labels based on ambiguous instances. A theoretical analysis of generalization bound and extensive experiments on multiple data sets from various real-world tasks demonstrate the effectiveness of the proposed approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Partial multi-label learning (PML) deals with problems where each instance is assigned with a candidate label set, which contains multiple relevant labels and some noisy labels. Recent studies usually solve PML problems with the disambiguation strategy, which recovers ground-truth labels from the candidate label set by simply assuming that the noisy labels are generated randomly. In real applications, however, noisy labels are usually caused by some ambiguous contents of the example. Based on this observation, we propose a partial multi-label learning approach to simultaneously recover the ground-truth information and identify the noisy labels. The two objectives are formalized in a unified framework with trace norm and - norm regularizers. Under the supervision of the observed noise-corrupted label matrix, the multi-label classifier and noisy label identifier are jointly optimized by incorporating the label correlation exploitation and feature-induced noise model. Furthermore, by mapping each bag to a feature vector, we extend PML-NI method into multi-instance multi-label learning by identifying noisy labels based on ambiguous instances. A theoretical analysis of generalization bound and extensive experiments on multiple data sets from various real-world tasks demonstrate the effectiveness of the proposed approach.",
"title": "Partial Multi-Label Learning With Noisy Label Identification",
"normalizedTitle": "Partial Multi-Label Learning With Noisy Label Identification",
"fno": "09354590",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"Learning Artificial Intelligence",
"Matrix Algebra",
"Pattern Classification",
"Ground Truth Labels",
"Candidate Label",
"Partial Multilabel Learning Approach",
"Multilabel Classifier",
"Noisy Label Identifier",
"Label Correlation Exploitation",
"Multiinstance Multilabel Learning",
"Noisy Label Identification",
"Noise Corrupted Label Matrix",
"Ground Truth Information",
"PML NI Method",
"Noise Measurement",
"Task Analysis",
"Training",
"Labeling",
"Crowdsourcing",
"Correlation",
"Phase Locked Loops",
"Multi Lable Learning",
"Partial Multi Label Learning",
"Candidate Label Set",
"Noisy Label Identification",
"Multi Instance Multi Label Learning"
],
"authors": [
{
"givenName": "Ming-Kun",
"surname": "Xie",
"fullName": "Ming-Kun Xie",
"affiliation": "MIIT Key Laboratory of Pattern Analysis and Machine Intelligence, Collaborative Innovation Center of Novel Software Technology and Industrialization, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheng-Jun",
"surname": "Huang",
"fullName": "Sheng-Jun Huang",
"affiliation": "MIIT Key Laboratory of Pattern Analysis and Machine Intelligence, Collaborative Innovation Center of Novel Software Technology and Industrialization, Nanjing University of Aeronautics and Astronautics, Nanjing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "3676-3687",
"year": "2022",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2018/9159/0/08595001",
"title": "Feature-Induced Partial Multi-label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2018/08595001/17D45WHONq8",
"parentPublication": {
"id": "proceedings/icdm/2018/9159/0",
"title": "2018 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/2022/03/09699063",
"title": "Distributed Semisupervised Partial Label Learning Over Networks",
"doi": null,
"abstractUrl": "/journal/ai/2022/03/09699063/1ADJiwwfjtC",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2021/2398/0/239800a926",
"title": "Few-Shot Partial Multi-Label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2021/239800a926/1Aqx1YNNFU4",
"parentPublication": {
"id": "proceedings/icdm/2021/2398/0",
"title": "2021 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09983986",
"title": "A Unifying Probabilistic Framework for Partially Labeled Data Learning",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09983986/1J4xVf3mQfK",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2022/5099/0/509900a478",
"title": "Few-shot Partial Multi-label Learning with Data Augmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2022/509900a478/1KpCrNUGKHe",
"parentPublication": {
"id": "proceedings/icdm/2022/5099/0",
"title": "2022 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/02/08792183",
"title": "GM-PLL: Graph Matching Based Partial Label Learning",
"doi": null,
"abstractUrl": "/journal/tk/2021/02/08792183/1ckpmarL2RG",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/10/09057438",
"title": "Partial Multi-Label Learning via Credible Label Elicitation",
"doi": null,
"abstractUrl": "/journal/tp/2021/10/09057438/1iQafwEcU9i",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/03/09171331",
"title": "Incomplete Label Multiple Instance Multiple Label Learning",
"doi": null,
"abstractUrl": "/journal/tp/2022/03/09171331/1mq8fINsglW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2020/8316/0/831600a691",
"title": "Semi-Supervised Partial Multi-label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2020/831600a691/1r54C5I9DJC",
"parentPublication": {
"id": "proceedings/icdm/2020/8316/0",
"title": "2020 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2020/8316/0/831600a761",
"title": "Partial Multi-label Learning using Label Compression",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2020/831600a761/1r54ErJVgxq",
"parentPublication": {
"id": "proceedings/icdm/2020/8316/0",
"title": "2020 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09351628",
"articleId": "1r50mR8TOve",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09334445",
"articleId": "1qB7sxCjYwE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1JP1e1gAvYY",
"title": "Feb.",
"year": "2023",
"issueNum": "02",
"idPrefix": "tk",
"pubType": "journal",
"volume": "35",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1uIR9VwB7Xy",
"doi": "10.1109/TKDE.2021.3092406",
"abstract": "Label Distribution Learning (LDL) has attracted increasing research attentions due to its potential to address the label ambiguity problem in machine learning and success in many real-world applications. In LDL, it is usually expensive to obtain the ground-truth label distributions of data, but it is relatively easy to obtain the logical labels of data. How to use training instances only with logical labels to learn an effective LDL model is a challenging problem. In this paper, we propose a two-step framework to address this problem. Specifically, we first design an efficient recovery model to recover the latent label distributions of training instances, named <italic>Fast Label Enhancement</italic> (FLE). Our idea is to use non-negative matrix factorization (NMF) to mine the label distribution information from the feature space. Moreover, we take the instance-class similarities into consideration to discover the importance of each label to training instances, which is useful for learning precise label distributions. Then, we train a predictive model for testing instances based on generated label distributions of training instances and an existing LDL method (e.g., SA-BFGS). Experimental results on fifteen benchmark datasets show the effectiveness of the proposed two-step framework and verify the superiority of FLE over several state-of-the-art approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Label Distribution Learning (LDL) has attracted increasing research attentions due to its potential to address the label ambiguity problem in machine learning and success in many real-world applications. In LDL, it is usually expensive to obtain the ground-truth label distributions of data, but it is relatively easy to obtain the logical labels of data. How to use training instances only with logical labels to learn an effective LDL model is a challenging problem. In this paper, we propose a two-step framework to address this problem. Specifically, we first design an efficient recovery model to recover the latent label distributions of training instances, named <italic>Fast Label Enhancement</italic> (FLE). Our idea is to use non-negative matrix factorization (NMF) to mine the label distribution information from the feature space. Moreover, we take the instance-class similarities into consideration to discover the importance of each label to training instances, which is useful for learning precise label distributions. Then, we train a predictive model for testing instances based on generated label distributions of training instances and an existing LDL method (e.g., SA-BFGS). Experimental results on fifteen benchmark datasets show the effectiveness of the proposed two-step framework and verify the superiority of FLE over several state-of-the-art approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Label Distribution Learning (LDL) has attracted increasing research attentions due to its potential to address the label ambiguity problem in machine learning and success in many real-world applications. In LDL, it is usually expensive to obtain the ground-truth label distributions of data, but it is relatively easy to obtain the logical labels of data. How to use training instances only with logical labels to learn an effective LDL model is a challenging problem. In this paper, we propose a two-step framework to address this problem. Specifically, we first design an efficient recovery model to recover the latent label distributions of training instances, named Fast Label Enhancement (FLE). Our idea is to use non-negative matrix factorization (NMF) to mine the label distribution information from the feature space. Moreover, we take the instance-class similarities into consideration to discover the importance of each label to training instances, which is useful for learning precise label distributions. Then, we train a predictive model for testing instances based on generated label distributions of training instances and an existing LDL method (e.g., SA-BFGS). Experimental results on fifteen benchmark datasets show the effectiveness of the proposed two-step framework and verify the superiority of FLE over several state-of-the-art approaches.",
"title": "Fast Label Enhancement for Label Distribution Learning",
"normalizedTitle": "Fast Label Enhancement for Label Distribution Learning",
"fno": "09465741",
"hasPdf": true,
"idPrefix": "tk",
"keywords": [
"Learning Artificial Intelligence",
"Matrix Decomposition",
"Effective LDL Model",
"Fast Label Enhancement",
"FLE",
"Label Ambiguity Problem",
"Label Distribution Information",
"Label Distribution Learning",
"Latent Label Distributions",
"Logical Labels",
"Machine Learning",
"NMF",
"Nonnegative Matrix Factorization",
"Precise Label Distributions",
"SA BFGS",
"Training",
"Annotations",
"Correlation",
"Transforms",
"Machine Learning",
"Linear Programming",
"Time Complexity",
"Non Negative Matrix Factorization",
"Label Importance",
"Label Enhancement",
"Label Distribution Learning"
],
"authors": [
{
"givenName": "Ke",
"surname": "Wang",
"fullName": "Ke Wang",
"affiliation": "School of Computer Science and Engineering, and the Key Lab of Computer Network and Information Integration (Ministry of Education), Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ning",
"surname": "Xu",
"fullName": "Ning Xu",
"affiliation": "School of Computer Science and Engineering, and the Key Lab of Computer Network and Information Integration (Ministry of Education), Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Miaogen",
"surname": "Ling",
"fullName": "Miaogen Ling",
"affiliation": "School of Computer and Software, Engineering Research Center of Digital Forensics, Ministry of Education, Nanjing University of Information Science and Technology, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Geng",
"fullName": "Xin Geng",
"affiliation": "School of Computer Science and Engineering, and the Key Lab of Computer Network and Information Integration (Ministry of Education), Southeast University, Nanjing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1502-1514",
"year": "2023",
"issn": "1041-4347",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdmw/2013/3142/0/3143a377",
"title": "Label Distribution Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2013/3143a377/12OmNzlD9rX",
"parentPublication": {
"id": "proceedings/icdmw/2013/3142/0",
"title": "2013 IEEE 13th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09875104",
"title": "Variational Label Enhancement",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09875104/1GlbUwSqmLS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbd/2022/0971/0/097100a116",
"title": "Label Enhancement with Sample Correlation via Sparse Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cbd/2022/097100a116/1KdZhjYjh0Q",
"parentPublication": {
"id": "proceedings/cbd/2022/0971/0",
"title": "2022 Tenth International Conference on Advanced Cloud and Big Data (CBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/04/08847453",
"title": "Label Distribution Learning with Label Correlations on Local Samples",
"doi": null,
"abstractUrl": "/journal/tk/2021/04/08847453/1dApRY59TMY",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/04/08868206",
"title": "Label Enhancement for Label Distribution Learning",
"doi": null,
"abstractUrl": "/journal/tk/2021/04/08868206/1e7BW64F2nK",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/11/09340009",
"title": "A Novel Probabilistic Label Enhancement Algorithm for Multi-Label Distribution Learning",
"doi": null,
"abstractUrl": "/journal/tk/2022/11/09340009/1qL4SZMuLKM",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/01/09404874",
"title": "Generalized Label Enhancement With Sample Correlations",
"doi": null,
"abstractUrl": "/journal/tk/2023/01/09404874/1sNm45kvOU0",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbd/2020/2313/0/231300a096",
"title": "Label Enhancement Manifold Learning Algorithm for Multi-label Image Classification",
"doi": null,
"abstractUrl": "/proceedings-article/cbd/2020/231300a096/1sZ38nVv3A4",
"parentPublication": {
"id": "proceedings/cbd/2020/2313/0",
"title": "2020 Eighth International Conference on Advanced Cloud and Big Data (CBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/02/09468338",
"title": "Label Enhancement by Maintaining Positive and Negative Label Relation",
"doi": null,
"abstractUrl": "/journal/tk/2023/02/09468338/1uPuMklY24g",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/02/09495131",
"title": "Label Distribution Learning by Maintaining Label Ranking Relation",
"doi": null,
"abstractUrl": "/journal/tk/2023/02/09495131/1vyjeYBvCiQ",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09452804",
"articleId": "1ulCuxyZJfi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09495158",
"articleId": "1vyjgdBlrRS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1wznUTxaKsw",
"title": "Oct.",
"year": "2021",
"issueNum": "10",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1kBgVhAEmeA",
"doi": "10.1109/TVCG.2020.3001917",
"abstract": "Corresponding lighting and reflectance between real and virtual objects is important for spatial presence in augmented and mixed reality (AR and MR) applications. We present a method to reconstruct real-world environmental lighting, encoded as a reflection map (RM), from a conventional photograph. To achieve this, we propose a stacked convolutional neural network (SCNN) that predicts high dynamic range (HDR) 360° RMs with varying roughness from a limited field of view, low dynamic range photograph. The SCNN is progressively trained from high to low roughness to predict RMs at varying roughness levels, where each roughness level corresponds to a virtual object's roughness (from diffuse to glossy) for rendering. The predicted RM provides high-fidelity rendering of virtual objects to match with the background photograph. We illustrate the use of our method with indoor and outdoor scenes trained on separate indoor/outdoor SCNNs showing plausible rendering and composition of virtual objects in AR/MR. We show that our method has improved quality over previous methods with a comparative user study and error metrics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Corresponding lighting and reflectance between real and virtual objects is important for spatial presence in augmented and mixed reality (AR and MR) applications. We present a method to reconstruct real-world environmental lighting, encoded as a reflection map (RM), from a conventional photograph. To achieve this, we propose a stacked convolutional neural network (SCNN) that predicts high dynamic range (HDR) 360° RMs with varying roughness from a limited field of view, low dynamic range photograph. The SCNN is progressively trained from high to low roughness to predict RMs at varying roughness levels, where each roughness level corresponds to a virtual object's roughness (from diffuse to glossy) for rendering. The predicted RM provides high-fidelity rendering of virtual objects to match with the background photograph. We illustrate the use of our method with indoor and outdoor scenes trained on separate indoor/outdoor SCNNs showing plausible rendering and composition of virtual objects in AR/MR. We show that our method has improved quality over previous methods with a comparative user study and error metrics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Corresponding lighting and reflectance between real and virtual objects is important for spatial presence in augmented and mixed reality (AR and MR) applications. We present a method to reconstruct real-world environmental lighting, encoded as a reflection map (RM), from a conventional photograph. To achieve this, we propose a stacked convolutional neural network (SCNN) that predicts high dynamic range (HDR) 360° RMs with varying roughness from a limited field of view, low dynamic range photograph. The SCNN is progressively trained from high to low roughness to predict RMs at varying roughness levels, where each roughness level corresponds to a virtual object's roughness (from diffuse to glossy) for rendering. The predicted RM provides high-fidelity rendering of virtual objects to match with the background photograph. We illustrate the use of our method with indoor and outdoor scenes trained on separate indoor/outdoor SCNNs showing plausible rendering and composition of virtual objects in AR/MR. We show that our method has improved quality over previous methods with a comparative user study and error metrics.",
"title": "Reconstructing Reflection Maps Using a Stacked-CNN for Mixed Reality Rendering",
"normalizedTitle": "Reconstructing Reflection Maps Using a Stacked-CNN for Mixed Reality Rendering",
"fno": "09115833",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Convolutional Neural Nets",
"Image Reconstruction",
"Learning Artificial Intelligence",
"Lighting",
"Rendering Computer Graphics",
"Stacked CNN",
"Mixed Reality Rendering",
"Reflectance",
"Virtual Objects",
"Spatial Presence",
"Augmented Reality",
"Environmental Lighting",
"Stacked Convolutional Neural Network",
"SCNN",
"Low Dynamic Range Photograph",
"Varying Roughness Levels",
"Predicted RM",
"High Fidelity Rendering",
"Background Photograph",
"Plausible Rendering",
"Reflection Map Reconstruction",
"High Dynamic Range",
"Lighting",
"Rendering Computer Graphics",
"Environmental Management",
"Dynamic Range",
"Real Time Systems",
"Convolutional Neural Networks",
"Light Estimation",
"Reflection Map",
"Environment Map",
"Image Based Lighting",
"Deep Learning",
"Mixed Reality"
],
"authors": [
{
"givenName": "Andrew",
"surname": "Chalmers",
"fullName": "Andrew Chalmers",
"affiliation": "Computational Media Innovation Centre (CMIC), Victoria University of Wellington, Wellington, New Zealand",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junhong",
"surname": "Zhao",
"fullName": "Junhong Zhao",
"affiliation": "Computational Media Innovation Centre (CMIC), Victoria University of Wellington, Wellington, New Zealand",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Medeiros",
"fullName": "Daniel Medeiros",
"affiliation": "Computational Media Innovation Centre (CMIC), Victoria University of Wellington, Wellington, New Zealand",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Taehyun",
"surname": "Rhee",
"fullName": "Taehyun Rhee",
"affiliation": "Computational Media Innovation Centre (CMIC), Victoria University of Wellington, Wellington, New Zealand",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "10",
"pubDate": "2021-10-01 00:00:00",
"pubType": "trans",
"pages": "4073-4084",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2017/5738/0/08031605",
"title": "Implicit Sphere Shadow Maps",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031605/12OmNxcMShN",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a072",
"title": "Synthesis of Environment Maps for Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a072/12OmNyfdON8",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671804",
"title": "Interactive exploration of augmented aerial scenes with free-viewpoint image generation from pre-rendered images",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671804/12OmNzsJ7ks",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446391",
"title": "Visual Perception of Real World Depth Map Resolution for Mixed Reality Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446391/13bd1eSlyst",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08226853",
"title": "Multi-Material Volume Rendering with a Physically-Based Surface Reflection Model",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08226853/14H4WMQegms",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g674",
"title": "CNN Based Learning Using Reflection and Retinex Models for Intrinsic Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g674/17D45WGGoLO",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09887904",
"title": "Fast and Accurate Illumination Estimation Using LDR Panoramic Images for Realistic Rendering",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09887904/1GBRnHyZ1bW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c812",
"title": "IRISformer: Dense Vision Transformers for Single-Image Inverse Rendering in Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c812/1H0OiAWLYsw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864523",
"title": "Interactive Cloud-based Global Illumination for Shared Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864523/1e5ZtHuwxdm",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2020/5745/0/09094615",
"title": "Optical Quality Control for Adaptive Polishing Processes",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2020/09094615/1jVQDKlDMBO",
"parentPublication": {
"id": "proceedings/ssiai/2020/5745/0",
"title": "2020 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09115285",
"articleId": "1kzC0PMrQXu",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1wzrNBCwclW",
"name": "ttg202110-09115833s1-supp1-3001917.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202110-09115833s1-supp1-3001917.pdf",
"extension": "pdf",
"size": "372 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNASILLc",
"title": "September/October",
"year": "2002",
"issueNum": "05",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "22",
"label": "September/October",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBJhoQ",
"doi": "10.1109/MCG.2002.1028726",
"abstract": "Archeoguide offers personalized augmented reality tours of archaeological sites. It uses outdoor tracking, mobile computing, 3D visualization, and augmented reality techniques to enhance information presentation, reconstruct ruined sites, and simulate ancient life.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Archeoguide offers personalized augmented reality tours of archaeological sites. It uses outdoor tracking, mobile computing, 3D visualization, and augmented reality techniques to enhance information presentation, reconstruct ruined sites, and simulate ancient life.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Archeoguide offers personalized augmented reality tours of archaeological sites. It uses outdoor tracking, mobile computing, 3D visualization, and augmented reality techniques to enhance information presentation, reconstruct ruined sites, and simulate ancient life.",
"title": "Archeoguide: An Augmented Reality Guide for Archaeological Sites",
"normalizedTitle": "Archeoguide: An Augmented Reality Guide for Archaeological Sites",
"fno": "mcg2002050052",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [],
"authors": [
{
"givenName": "Vassilios",
"surname": "Vlahakis",
"fullName": "Vassilios Vlahakis",
"affiliation": "Intracom S.A.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nikolaos",
"surname": "Ioannidis",
"fullName": "Nikolaos Ioannidis",
"affiliation": "Intracom S.A.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John",
"surname": "Karigiannis",
"fullName": "John Karigiannis",
"affiliation": "Intracom S.A.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Manolis",
"surname": "Tsotros",
"fullName": "Manolis Tsotros",
"affiliation": "Intracom S.A.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Gounaris",
"fullName": "Michael Gounaris",
"affiliation": "Intracom S.A.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Didier",
"surname": "Stricker",
"fullName": "Didier Stricker",
"affiliation": "Fraunhofer Institute for Computer Graphics",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tim",
"surname": "Gleue",
"fullName": "Tim Gleue",
"affiliation": "Center for Computer Graphics",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Patrick",
"surname": "Daehne",
"fullName": "Patrick Daehne",
"affiliation": "Center for Computer Graphics",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luís",
"surname": "Almeida",
"fullName": "Luís Almeida",
"affiliation": "Center for Computer Graphics",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2002-09-01 00:00:00",
"pubType": "mags",
"pages": "52-60",
"year": "2002",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "mcg2002050038",
"articleId": "13rRUNvPLch",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2002050061",
"articleId": "13rRUEgarpX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1HGJ6XQen96",
"title": "Nov.",
"year": "2022",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GjwGcGrRmg",
"doi": "10.1109/TVCG.2022.3203109",
"abstract": "Avatar-mediated symmetric Augmented Reality (AR) telepresence has emerged with the ability to empower users located in different remote spaces to interact with each other in 3D through avatars. However, different spaces have heterogeneous structures and features, which bring difficulties in synchronizing avatar motions with real user motions and adapting avatar motions to local scenes. To overcome these issues, existing methods generate mutual movable spaces or retarget the placement of avatars. However, these methods limit the telepresence experience in a small sub-area space, fix the positions of users and avatars, or adjust the beginning/ending positions of avatars without presenting smooth transitions. Moreover, the delay between the avatar retargeting and users' real transitions can break the semantic synchronization between users' verbal conversation and perceived avatar motion. In this paper, we first examine the impact of the aforementioned transition delay and explore the preferred transition style with the existence of such delay through user studies. With the results showing a significant negative effect of avatar transition delay and providing the design choice of the transition style, we propose a Predict-and-Drive controller to diminish the delay and present the smooth transition of the telepresence avatar. We also introduce a grouping component as an upgrade to immediately calculate a coarse virtual target once the user initiates a transition, which could further eliminate the avatar transition delay. Once having the coarse virtual target or an exactly predicted target, we find the corresponding target for the avatar according to the pre-constructed mapping of objects of interest between two spaces. The avatar control component maintains an artificial potential field of the space and drives the avatar towards the target while respecting the obstacles in the physical environment. We further conduct ablation studies to evaluate the effectiveness of our proposed components.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Avatar-mediated symmetric Augmented Reality (AR) telepresence has emerged with the ability to empower users located in different remote spaces to interact with each other in 3D through avatars. However, different spaces have heterogeneous structures and features, which bring difficulties in synchronizing avatar motions with real user motions and adapting avatar motions to local scenes. To overcome these issues, existing methods generate mutual movable spaces or retarget the placement of avatars. However, these methods limit the telepresence experience in a small sub-area space, fix the positions of users and avatars, or adjust the beginning/ending positions of avatars without presenting smooth transitions. Moreover, the delay between the avatar retargeting and users' real transitions can break the semantic synchronization between users' verbal conversation and perceived avatar motion. In this paper, we first examine the impact of the aforementioned transition delay and explore the preferred transition style with the existence of such delay through user studies. With the results showing a significant negative effect of avatar transition delay and providing the design choice of the transition style, we propose a Predict-and-Drive controller to diminish the delay and present the smooth transition of the telepresence avatar. We also introduce a grouping component as an upgrade to immediately calculate a coarse virtual target once the user initiates a transition, which could further eliminate the avatar transition delay. Once having the coarse virtual target or an exactly predicted target, we find the corresponding target for the avatar according to the pre-constructed mapping of objects of interest between two spaces. The avatar control component maintains an artificial potential field of the space and drives the avatar towards the target while respecting the obstacles in the physical environment. We further conduct ablation studies to evaluate the effectiveness of our proposed components.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Avatar-mediated symmetric Augmented Reality (AR) telepresence has emerged with the ability to empower users located in different remote spaces to interact with each other in 3D through avatars. However, different spaces have heterogeneous structures and features, which bring difficulties in synchronizing avatar motions with real user motions and adapting avatar motions to local scenes. To overcome these issues, existing methods generate mutual movable spaces or retarget the placement of avatars. However, these methods limit the telepresence experience in a small sub-area space, fix the positions of users and avatars, or adjust the beginning/ending positions of avatars without presenting smooth transitions. Moreover, the delay between the avatar retargeting and users' real transitions can break the semantic synchronization between users' verbal conversation and perceived avatar motion. In this paper, we first examine the impact of the aforementioned transition delay and explore the preferred transition style with the existence of such delay through user studies. With the results showing a significant negative effect of avatar transition delay and providing the design choice of the transition style, we propose a Predict-and-Drive controller to diminish the delay and present the smooth transition of the telepresence avatar. We also introduce a grouping component as an upgrade to immediately calculate a coarse virtual target once the user initiates a transition, which could further eliminate the avatar transition delay. Once having the coarse virtual target or an exactly predicted target, we find the corresponding target for the avatar according to the pre-constructed mapping of objects of interest between two spaces. The avatar control component maintains an artificial potential field of the space and drives the avatar towards the target while respecting the obstacles in the physical environment. We further conduct ablation studies to evaluate the effectiveness of our proposed components.",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"normalizedTitle": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"fno": "09873991",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Avatars",
"Telerobotics",
"Virtual Reality",
"Adapting Avatar Motions",
"Aforementioned Transition Delay",
"Avatar Control Component",
"Avatar Motion Adaption",
"Avatar Retargeting",
"Avatar Transition Delay",
"Avatar Mediated Symmetric Augmented Reality Telepresence",
"Different Remote Spaces",
"Different Spaces",
"Heterogeneous Spaces",
"Mutual Movable Spaces",
"Room Scale Augmented Reality Telepresence",
"Smooth Transition",
"Telepresence Avatar",
"User Motions",
"Users",
"Avatars",
"Telepresence",
"Delays",
"Aerospace Electronics",
"Semantics",
"Real Time Systems",
"Oral Communication",
"AR Telepresence",
"Avatar Motion Adaption",
"Heterogeneous Spaces",
"Redirected Walking"
],
"authors": [
{
"givenName": "Xuanyu",
"surname": "Wang",
"fullName": "Xuanyu Wang",
"affiliation": "MOEKLINNS Lab, School of Computer Science and Technology, Xi'an Jiaotong University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Ye",
"fullName": "Hui Ye",
"affiliation": "School of Creative Media, City University of Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian",
"surname": "Sandor",
"fullName": "Christian Sandor",
"affiliation": "Laboratoire Interdisciplinaire des Sciences du Numérique (LISN), Université Paris-Saclay / CNRS, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Weizhan",
"surname": "Zhang",
"fullName": "Weizhan Zhang",
"affiliation": "MOEKLINNS Lab, School of Computer Science and Technology, Xi'an Jiaotong University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "School of Creative Media, City University of Hong Kong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "3705-3714",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504684",
"title": "MMSpace: Kinetically-augmented telepresence for small group-to-group conversations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504684/12OmNvlg8fs",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a352",
"title": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a352/1J7WodvTPzy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a072",
"title": "Volumetric Avatar Reconstruction with Spatio-Temporally Offset RGBD Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a072/1MNgmRWwNUI",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797819",
"title": "Localizing Teleoperator Gaze in 360° Hosted Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797819/1cJ1d3MdShi",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089599",
"title": "An Optical Design for Avatar-User Co-axial Viewpoint Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089599/1jIx8SwZIuQ",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09257094",
"title": "Output-Sensitive Avatar Representations for Immersive Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09257094/1oFCABrJUmA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523831",
"title": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523831/1wpqru2GjIY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09873973",
"articleId": "1GjwGs0MSQg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09873974",
"articleId": "1GjwIr0uAfu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HGJ7eCyeRy",
"name": "ttg202211-09873991s1-supp1-3203109.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202211-09873991s1-supp1-3203109.mp4",
"extension": "mp4",
"size": "198 MB",
"__typename": "WebExtraType"
},
{
"id": "1HGJ76ex6zm",
"name": "ttg202211-09873991s1-supp2-3203109.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202211-09873991s1-supp2-3203109.pdf",
"extension": "pdf",
"size": "190 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyq0zFI",
"title": "May",
"year": "2020",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1hpPBuW1ahy",
"doi": "10.1109/TVCG.2020.2973077",
"abstract": "In Virtual Reality, a number of studies have been conducted to assess the influence of avatar appearance, avatar control and user point of view on the Sense of Embodiment (SoE) towards a virtual avatar. However, such studies tend to explore each factor in isolation. This paper aims to better understand the inter-relations among these three factors by conducting a subjective matching experiment. In the presented experiment (n=40), participants had to match a given “optimal” SoE avatar configuration (realistic avatar, full-body motion capture, first-person point of view), starting by a “minimal” SoE configuration (minimal avatar, no control, third-person point of view), by iteratively increasing the level of each factor. The choices of the participants provide insights about their preferences and perception over the three factors considered. Moreover, the subjective matching procedure was conducted in the context of four different interaction tasks with the goal of covering a wide range of actions an avatar can do in a VE. The paper also describes a baseline experiment (n=20) which was used to define the number and order of the different levels for each factor, prior to the subjective matching experiment (e.g. different degrees of realism ranging from abstract to personalised avatars for the visual appearance). The results of the subjective matching experiment show that point of view and control levels were consistently increased by users before appearance levels when it comes to enhancing the SoE. Second, several configurations were identified with equivalent SoE as the one felt in the optimal configuration, but vary between the tasks. Taken together, our results provide valuable insights about which factors to prioritize in order to enhance the SoE towards an avatar in different tasks, and about configurations which lead to fulfilling SoE in VE.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In Virtual Reality, a number of studies have been conducted to assess the influence of avatar appearance, avatar control and user point of view on the Sense of Embodiment (SoE) towards a virtual avatar. However, such studies tend to explore each factor in isolation. This paper aims to better understand the inter-relations among these three factors by conducting a subjective matching experiment. In the presented experiment (n=40), participants had to match a given “optimal” SoE avatar configuration (realistic avatar, full-body motion capture, first-person point of view), starting by a “minimal” SoE configuration (minimal avatar, no control, third-person point of view), by iteratively increasing the level of each factor. The choices of the participants provide insights about their preferences and perception over the three factors considered. Moreover, the subjective matching procedure was conducted in the context of four different interaction tasks with the goal of covering a wide range of actions an avatar can do in a VE. The paper also describes a baseline experiment (n=20) which was used to define the number and order of the different levels for each factor, prior to the subjective matching experiment (e.g. different degrees of realism ranging from abstract to personalised avatars for the visual appearance). The results of the subjective matching experiment show that point of view and control levels were consistently increased by users before appearance levels when it comes to enhancing the SoE. Second, several configurations were identified with equivalent SoE as the one felt in the optimal configuration, but vary between the tasks. Taken together, our results provide valuable insights about which factors to prioritize in order to enhance the SoE towards an avatar in different tasks, and about configurations which lead to fulfilling SoE in VE.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In Virtual Reality, a number of studies have been conducted to assess the influence of avatar appearance, avatar control and user point of view on the Sense of Embodiment (SoE) towards a virtual avatar. However, such studies tend to explore each factor in isolation. This paper aims to better understand the inter-relations among these three factors by conducting a subjective matching experiment. In the presented experiment (n=40), participants had to match a given “optimal” SoE avatar configuration (realistic avatar, full-body motion capture, first-person point of view), starting by a “minimal” SoE configuration (minimal avatar, no control, third-person point of view), by iteratively increasing the level of each factor. The choices of the participants provide insights about their preferences and perception over the three factors considered. Moreover, the subjective matching procedure was conducted in the context of four different interaction tasks with the goal of covering a wide range of actions an avatar can do in a VE. The paper also describes a baseline experiment (n=20) which was used to define the number and order of the different levels for each factor, prior to the subjective matching experiment (e.g. different degrees of realism ranging from abstract to personalised avatars for the visual appearance). The results of the subjective matching experiment show that point of view and control levels were consistently increased by users before appearance levels when it comes to enhancing the SoE. Second, several configurations were identified with equivalent SoE as the one felt in the optimal configuration, but vary between the tasks. Taken together, our results provide valuable insights about which factors to prioritize in order to enhance the SoE towards an avatar in different tasks, and about configurations which lead to fulfilling SoE in VE.",
"title": "Avatar and Sense of Embodiment: Studying the Relative Preference Between Appearance, Control and Point of View",
"normalizedTitle": "Avatar and Sense of Embodiment: Studying the Relative Preference Between Appearance, Control and Point of View",
"fno": "08998305",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Virtual Reality",
"Visual Appearance",
"Subjective Matching Experiment",
"Control Levels",
"Appearance Levels",
"Equivalent So E",
"Optimal Configuration",
"Relative Preference",
"Virtual Reality",
"Avatar Appearance",
"Avatar Control",
"User Point",
"Virtual Avatar",
"Realistic Avatar",
"Minimal So E Configuration",
"Minimal Avatar",
"Subjective Matching Procedure",
"Interaction Tasks",
"Baseline Experiment",
"Personalised Avatars",
"Optimal So E Avatar Configuration",
"Avatars",
"Task Analysis",
"Animation",
"Visualization",
"Three Dimensional Displays",
"Legged Locomotion",
"Avatar",
"Sense Of Embodiment",
"Immersive Virtual Reality",
"Psychophysics",
"Subjective Matching Technique"
],
"authors": [
{
"givenName": "Rebecca",
"surname": "Fribourg",
"fullName": "Rebecca Fribourg",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ferran",
"surname": "Argelaguet",
"fullName": "Ferran Argelaguet",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anatole",
"surname": "Lécuyer",
"fullName": "Anatole Lécuyer",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ludovic",
"surname": "Hoyet",
"fullName": "Ludovic Hoyet",
"affiliation": "Inria, Univ Rennes, CNRS, IRISA, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2020-05-01 00:00:00",
"pubType": "trans",
"pages": "2062-2072",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223406",
"title": "Self-characterstics and sound in immersive virtual reality — Estimating avatar weight from footstep sounds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223406/12OmNAlvHUH",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448293",
"title": "Studying the Sense of Embodiment in VR Shared Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448293/13bd1AIBM1S",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040591",
"title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040591/13rRUyYBlgz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09911682",
"title": "Effect of Vibrations on Impression of Walking and Embodiment With First- and Third-Person Avatar",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09911682/1HeiWQWKlTG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a503",
"title": "Studying “Avatar Transitions” in Augmented Reality: Influence on Sense of Embodiment and Physiological Activity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a503/1J7W9twFolO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a083",
"title": "I'm Transforming! Effects of Visual Transitions to Change of Avatar on the Sense of Embodiment in AR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a083/1MNgRmjl6Zq",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798263",
"title": "EEG Can Be Used to Measure Embodiment When Controlling a Walking Self-Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798263/1cJ1gj5NtQc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090634",
"title": "Rhythmic proprioceptive stimulation improves embodiment in a walking avatar when added to visual stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090634/1jIxkrgIlEY",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08999805",
"articleId": "1hpPCtKIAaA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08998379",
"articleId": "1hrXhy1IFpu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1iEfNeDRTKU",
"name": "ttg202005-08998305s1-supp1-2973077.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998305s1-supp1-2973077.mp4",
"extension": "mp4",
"size": "15.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1Ax5KStiZmU",
"title": "March",
"year": "2022",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1mtsbpUceNG",
"doi": "10.1109/TVCG.2020.3018458",
"abstract": "Rapidly developing technologies are realizing a 3D telepresence, in which geographically separated users can interact with each other through their virtual avatars. In this article, we present novel methods to determine the avatar’s position in an indoor space to preserve the semantics of the user’s position in a dissimilar indoor space with different space configurations and furniture layouts. To this end, we first perform a user survey on the preferred avatar placements for various indoor configurations and user placements, and identify a set of related attributes, including interpersonal relation, visual attention, pose, and spatial characteristics, and quantify these attributes with a set of features. By using the obtained dataset and identified features, we train a neural network that predicts the similarity between two placements. Next, we develop an avatar placement method that preserves the semantics of the placement of the remote user in a different space as much as possible. We show the effectiveness of our methods by implementing a prototype AR-based telepresence system and user evaluations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Rapidly developing technologies are realizing a 3D telepresence, in which geographically separated users can interact with each other through their virtual avatars. In this article, we present novel methods to determine the avatar’s position in an indoor space to preserve the semantics of the user’s position in a dissimilar indoor space with different space configurations and furniture layouts. To this end, we first perform a user survey on the preferred avatar placements for various indoor configurations and user placements, and identify a set of related attributes, including interpersonal relation, visual attention, pose, and spatial characteristics, and quantify these attributes with a set of features. By using the obtained dataset and identified features, we train a neural network that predicts the similarity between two placements. Next, we develop an avatar placement method that preserves the semantics of the placement of the remote user in a different space as much as possible. We show the effectiveness of our methods by implementing a prototype AR-based telepresence system and user evaluations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Rapidly developing technologies are realizing a 3D telepresence, in which geographically separated users can interact with each other through their virtual avatars. In this article, we present novel methods to determine the avatar’s position in an indoor space to preserve the semantics of the user’s position in a dissimilar indoor space with different space configurations and furniture layouts. To this end, we first perform a user survey on the preferred avatar placements for various indoor configurations and user placements, and identify a set of related attributes, including interpersonal relation, visual attention, pose, and spatial characteristics, and quantify these attributes with a set of features. By using the obtained dataset and identified features, we train a neural network that predicts the similarity between two placements. Next, we develop an avatar placement method that preserves the semantics of the placement of the remote user in a different space as much as possible. We show the effectiveness of our methods by implementing a prototype AR-based telepresence system and user evaluations.",
"title": "Placement Retargeting of Virtual Avatars to Dissimilar Indoor Environments",
"normalizedTitle": "Placement Retargeting of Virtual Avatars to Dissimilar Indoor Environments",
"fno": "09173828",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Furniture",
"Radiowave Propagation",
"Virtual Reality",
"Related Attributes",
"Interpersonal Relation",
"Avatar Placement Method",
"Semantics",
"Remote User",
"User Evaluations",
"Placement Retargeting",
"Virtual Avatars",
"Dissimilar Indoor Environments",
"Geographically Separated Users",
"Dissimilar Indoor Space",
"Different Space Configurations",
"Furniture Layouts",
"User Survey",
"Preferred Avatar Placements",
"Indoor Configurations",
"User Placements",
"Avatars",
"Telepresence",
"Semantics",
"Three Dimensional Displays",
"Layout",
"Prototypes",
"Indoor Environment",
"Telepresence",
"Avatar",
"Augmented Reality",
"Similarity Learning"
],
"authors": [
{
"givenName": "Leonard",
"surname": "Yoon",
"fullName": "Leonard Yoon",
"affiliation": "Korea Advanced Institute of Science and Technology (KAIST), Daejeon, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dongseok",
"surname": "Yang",
"fullName": "Dongseok Yang",
"affiliation": "Korea Advanced Institute of Science and Technology (KAIST), Daejeon, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jaehyun",
"surname": "Kim",
"fullName": "Jaehyun Kim",
"affiliation": "Korea Advanced Institute of Science and Technology (KAIST), Daejeon, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "ChoongHo",
"surname": "Chung",
"fullName": "ChoongHo Chung",
"affiliation": "Korea Advanced Institute of Science and Technology (KAIST), Daejeon, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sung-Hee",
"surname": "Lee",
"fullName": "Sung-Hee Lee",
"affiliation": "Korea Advanced Institute of Science and Technology (KAIST), Daejeon, South Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1619-1633",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/dcoss/2012/4707/0/4707a209",
"title": "Optimal Relay Placement for Indoor Sensor Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dcoss/2012/4707a209/12OmNzXFoHZ",
"parentPublication": {
"id": "proceedings/dcoss/2012/4707/0",
"title": "2012 IEEE 8th International Conference on Distributed Computing in Sensor Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523447",
"title": "Retargeting Human-Object Interaction to Virtual Avatars",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523447/13rRUzp02ot",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a352",
"title": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a352/1J7WodvTPzy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797819",
"title": "Localizing Teleoperator Gaze in 360° Hosted Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797819/1cJ1d3MdShi",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089612",
"title": "Effects of volumetric capture avatars on social presence in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089612/1jIxdAmCCJi",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523831",
"title": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523831/1wpqru2GjIY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09186170",
"articleId": "1mP2AYgyLQY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09382912",
"articleId": "1saZsRW0LYY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Ax5XJ7Xx5e",
"name": "ttg202203-09173828s1-supp1-3018458.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202203-09173828s1-supp1-3018458.mp4",
"extension": "mp4",
"size": "30.6 MB",
"__typename": "WebExtraType"
},
{
"id": "1Ax5Y7u3x4s",
"name": "ttg202203-09173828s1-supp2-3018458.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202203-09173828s1-supp2-3018458.mp4",
"extension": "mp4",
"size": "9.25 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1DGRZtSiOdy",
"title": "July",
"year": "2022",
"issueNum": "07",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1oFCABrJUmA",
"doi": "10.1109/TVCG.2020.3037360",
"abstract": "In this article, we propose a system design and implementation for output-sensitive reconstruction, transmission and rendering of 3D video avatars in distributed virtual environments. In our immersive telepresence system, users are captured by multiple RGBD sensors connected to a server that performs geometry reconstruction based on viewing feedback from remote telepresence parties. This feedback and reconstruction loop enables visibility-aware level-of-detail reconstruction of video avatars regarding geometry and texture data, and considers individual and groups of collocated users. Our evaluation reveals that our approach leads to a significant reduction of reconstruction times, network bandwidth requirements and round-trip times as well as rendering costs in many situations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this article, we propose a system design and implementation for output-sensitive reconstruction, transmission and rendering of 3D video avatars in distributed virtual environments. In our immersive telepresence system, users are captured by multiple RGBD sensors connected to a server that performs geometry reconstruction based on viewing feedback from remote telepresence parties. This feedback and reconstruction loop enables visibility-aware level-of-detail reconstruction of video avatars regarding geometry and texture data, and considers individual and groups of collocated users. Our evaluation reveals that our approach leads to a significant reduction of reconstruction times, network bandwidth requirements and round-trip times as well as rendering costs in many situations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this article, we propose a system design and implementation for output-sensitive reconstruction, transmission and rendering of 3D video avatars in distributed virtual environments. In our immersive telepresence system, users are captured by multiple RGBD sensors connected to a server that performs geometry reconstruction based on viewing feedback from remote telepresence parties. This feedback and reconstruction loop enables visibility-aware level-of-detail reconstruction of video avatars regarding geometry and texture data, and considers individual and groups of collocated users. Our evaluation reveals that our approach leads to a significant reduction of reconstruction times, network bandwidth requirements and round-trip times as well as rendering costs in many situations.",
"title": "Output-Sensitive Avatar Representations for Immersive Telepresence",
"normalizedTitle": "Output-Sensitive Avatar Representations for Immersive Telepresence",
"fno": "09257094",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Image Reconstruction",
"Image Sensors",
"Rendering Computer Graphics",
"Virtual Reality",
"Rendering",
"Video Avatars",
"Distributed Virtual Environments",
"Immersive Telepresence System",
"Multiple RGBD Sensors",
"Geometry Reconstruction",
"Viewing Feedback",
"Remote Telepresence Parties",
"Reconstruction Loop",
"Visibility Aware Level Of Detail Reconstruction",
"Texture Data",
"Collocated Users",
"Reconstruction Times",
"Output Sensitive Avatar Representations",
"System Design",
"Three Dimensional Displays",
"Avatars",
"Telepresence",
"Rendering Computer Graphics",
"Streaming Media",
"Geometry",
"Image Reconstruction",
"Immersive Telepresence",
"Avatars",
"Output Sensitive Rendering",
"Distributed Virtual Environments"
],
"authors": [
{
"givenName": "Adrian",
"surname": "Kreskowski",
"fullName": "Adrian Kreskowski",
"affiliation": "Virtual Reality and Visualization Research Group, Bauhaus-Universität Weimar, Weimar, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stephan",
"surname": "Beck",
"fullName": "Stephan Beck",
"affiliation": "Virtual Reality and Visualization Research Group, Bauhaus-Universität Weimar, Weimar, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bernd",
"surname": "Froehlich",
"fullName": "Bernd Froehlich",
"affiliation": "Virtual Reality and Visualization Research Group, Bauhaus-Universität Weimar, Weimar, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "2697-2709",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2022/05/09714051",
"title": "Augmenting Immersive Telepresence Experience with a Virtual Body",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714051/1B0Y0I5xWyk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a072",
"title": "Volumetric Avatar Reconstruction with Spatio-Temporally Offset RGBD Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a072/1MNgmRWwNUI",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089599",
"title": "An Optical Design for Avatar-User Co-axial Viewpoint Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089599/1jIx8SwZIuQ",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090433",
"title": "Virtual Tour: An Immersive Low Cost Telepresence System",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090433/1jIxrSY8cZa",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523831",
"title": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523831/1wpqru2GjIY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900i645",
"title": "Dynamic Neural Radiance Fields for Monocular 4D Facial Avatar Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900i645/1yeHVNYk40M",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a451",
"title": "The Owl: Immersive Telepresence Communication for Hybrid Conferences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a451/1yeQG4fi6Dm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09262073",
"articleId": "1oPzXkgPH0I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09266764",
"articleId": "1oZxEim72LK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DGS05PJ3Es",
"name": "ttg202207-09257094s1-supp1-3037360.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202207-09257094s1-supp1-3037360.mp4",
"extension": "mp4",
"size": "240 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwoxSiH",
"title": "January-March",
"year": "1999",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "5",
"label": "January-March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5nf",
"doi": "10.1109/2945.764872",
"abstract": "Abstract—In this paper, we describe a new method for surgery simulation including a volumetric model built from medical images and an elastic modeling of the deformations. The physical model is based on elasticity theory which suitably links the shape of deformable bodies and the forces associated with the deformation. A real-time computation of the deformation is possible thanks to a preprocessing of elementary deformations derived from a finite element method. This method has been implemented in a system including a force feedback device and a collision detection algorithm. The simulator works in real-time with a high resolution liver model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—In this paper, we describe a new method for surgery simulation including a volumetric model built from medical images and an elastic modeling of the deformations. The physical model is based on elasticity theory which suitably links the shape of deformable bodies and the forces associated with the deformation. A real-time computation of the deformation is possible thanks to a preprocessing of elementary deformations derived from a finite element method. This method has been implemented in a system including a force feedback device and a collision detection algorithm. The simulator works in real-time with a high resolution liver model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—In this paper, we describe a new method for surgery simulation including a volumetric model built from medical images and an elastic modeling of the deformations. The physical model is based on elasticity theory which suitably links the shape of deformable bodies and the forces associated with the deformation. A real-time computation of the deformation is possible thanks to a preprocessing of elementary deformations derived from a finite element method. This method has been implemented in a system including a force feedback device and a collision detection algorithm. The simulator works in real-time with a high resolution liver model.",
"title": "Real-Time Elastic Deformations of Soft Tissues for Surgery Simulation",
"normalizedTitle": "Real-Time Elastic Deformations of Soft Tissues for Surgery Simulation",
"fno": "v0062",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Surgery Simulation",
"Deformable Models",
"Real Time",
"Force Feedback",
"Soft Tissue Modeling",
"Finite Element"
],
"authors": [
{
"givenName": "Stéphane",
"surname": "Cotin",
"fullName": "Stéphane Cotin",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hervé",
"surname": "Delingette",
"fullName": "Hervé Delingette",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nicholas",
"surname": "Ayache",
"fullName": "Nicholas Ayache",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "1999-01-01 00:00:00",
"pubType": "trans",
"pages": "62-73",
"year": "1999",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0047",
"articleId": "13rRUILLkve",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0074",
"articleId": "13rRUynHuiT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzV70s0",
"title": "May",
"year": "2015",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyuegp8",
"doi": "10.1109/TVCG.2014.2377772",
"abstract": "This paper presents a method for real-time augmented reality of internal liver structures during minimally invasive hepatic surgery. Vessels and tumors computed from pre-operative CT scans can be overlaid onto the laparoscopic view for surgery guidance. Compared to current methods, our method is able to locate the in-depth positions of the tumors based on partial three-dimensional liver tissue motion using a real-time biomechanical model. This model permits to properly handle the motion of internal structures even in the case of anisotropic or heterogeneous tissues, as it is the case for the liver and many anatomical structures. Experimentations conducted on phantom liver permits to measure the accuracy of the augmentation while real-time augmentation on in vivo human liver during real surgery shows the benefits of such an approach for minimally invasive surgery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a method for real-time augmented reality of internal liver structures during minimally invasive hepatic surgery. Vessels and tumors computed from pre-operative CT scans can be overlaid onto the laparoscopic view for surgery guidance. Compared to current methods, our method is able to locate the in-depth positions of the tumors based on partial three-dimensional liver tissue motion using a real-time biomechanical model. This model permits to properly handle the motion of internal structures even in the case of anisotropic or heterogeneous tissues, as it is the case for the liver and many anatomical structures. Experimentations conducted on phantom liver permits to measure the accuracy of the augmentation while real-time augmentation on in vivo human liver during real surgery shows the benefits of such an approach for minimally invasive surgery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a method for real-time augmented reality of internal liver structures during minimally invasive hepatic surgery. Vessels and tumors computed from pre-operative CT scans can be overlaid onto the laparoscopic view for surgery guidance. Compared to current methods, our method is able to locate the in-depth positions of the tumors based on partial three-dimensional liver tissue motion using a real-time biomechanical model. This model permits to properly handle the motion of internal structures even in the case of anisotropic or heterogeneous tissues, as it is the case for the liver and many anatomical structures. Experimentations conducted on phantom liver permits to measure the accuracy of the augmentation while real-time augmentation on in vivo human liver during real surgery shows the benefits of such an approach for minimally invasive surgery.",
"title": "Impact of Soft Tissue Heterogeneity on Augmented Reality for Liver Surgery",
"normalizedTitle": "Impact of Soft Tissue Heterogeneity on Augmented Reality for Liver Surgery",
"fno": "06987340",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Surgery",
"Computational Modeling",
"Liver",
"Biological System Modeling",
"Three Dimensional Displays",
"Deformable Models",
"Biomechanics",
"Computer Assisted Surgery",
"Image Guided Simulation",
"Biomechanical Modeling",
"Real Time Augmented Reality",
"Computer Assisted Surgery",
"Image Guided Simulation",
"Biomechanical Modeling",
"Real Time Augmented Reality"
],
"authors": [
{
"givenName": "Nazim",
"surname": "Haouchine",
"fullName": "Nazim Haouchine",
"affiliation": ", INRIA and Lille University, Villeneuve-d’Ascq, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stephane",
"surname": "Cotin",
"fullName": "Stephane Cotin",
"affiliation": ", INRIA and Lille University, Villeneuve-d’Ascq, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Igor",
"surname": "Peterlik",
"fullName": "Igor Peterlik",
"affiliation": "Institut Hospitalier Universitaire (IHU) de Strasbourg, Masaryk University, Strasbourg, France and is also with the CERIT-SC, ICS",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jeremie",
"surname": "Dequidt",
"fullName": "Jeremie Dequidt",
"affiliation": ", INRIA and Lille University, Villeneuve-d’Ascq, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mario Sanz",
"surname": "Lopez",
"fullName": "Mario Sanz Lopez",
"affiliation": ", INRIA and Lille University, Villeneuve-d’Ascq, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Erwan",
"surname": "Kerrien",
"fullName": "Erwan Kerrien",
"affiliation": ", INRIA and Lorraine University, Nancy, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marie-Odile",
"surname": "Berger",
"fullName": "Marie-Odile Berger",
"affiliation": ", INRIA and Lorraine University, Nancy, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2015-05-01 00:00:00",
"pubType": "trans",
"pages": "584-597",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iciibms/2017/6664/0/08279688",
"title": "Verification of accuracy of knife tip position estimation in liver surgery support system",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2017/08279688/12OmNAk5HQk",
"parentPublication": {
"id": "proceedings/iciibms/2017/6664/0",
"title": "2017 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671780",
"title": "Image-guided simulation of heterogeneous tissue deformation for augmented reality during hepatic surgery",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671780/12OmNAtK4hi",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robio/2006/0570/0/04142051",
"title": "Research on Control System of Radio Frequency Ablation Surgical Robot",
"doi": null,
"abstractUrl": "/proceedings-article/robio/2006/04142051/12OmNB8CiXM",
"parentPublication": {
"id": "proceedings/robio/2006/0570/0",
"title": "IEEE International Conference on Robotics and Biomimetics - ROBIO2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761741",
"title": "Real-time update of 3D deformable models for computer aided liver surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761741/12OmNrMHOmG",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260039",
"title": "Microfabricated Instruments for Fetal Cardiac Surgery: Experiments on Haptic Tissue Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260039/12OmNvCRgkA",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a264",
"title": "The Simulation of Delineation and Splitting in Virtual Liver Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a264/12OmNyPQ4SW",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2006/06/mcg2006060036",
"title": "Liver Surgery Planning Using Virtual Reality",
"doi": null,
"abstractUrl": "/magazine/cg/2006/06/mcg2006060036/13rRUx0xPCD",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669823",
"title": "CC-DenseUNet: Densely Connected U-Net with Criss-Cross Attention for Liver and Tumor Segmentation in CT Volumes",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669823/1A9WrYktM2I",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbe/2021/0099/0/009900a304",
"title": "Design and 3D Printing of Liver Surgical Guide Template Based on Mimics Liver Model Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icitbe/2021/009900a304/1AH7MiebmZq",
"parentPublication": {
"id": "proceedings/icitbe/2021/0099/0",
"title": "2021 International Conference on Information Technology and Biomedical Engineering (ICITBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a768",
"title": "Collaborative VR for Liver Surgery Planning using Wearable Data Gloves: An Interactive Demonstration",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a768/1tnXmv1kxNe",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06912003",
"articleId": "13rRUwInvsU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07000596",
"articleId": "13rRUwbJD4M",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNrFBPWq",
"title": "September-October",
"year": "2006",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "12",
"label": "September-October",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvJ9",
"doi": "10.1109/TVCG.2006.121",
"abstract": "Centralized techniques have been used until now when automatically calibrating (both geometrically and photometrically) large high-resolution displays created by tiling multiple projectors in a 2D array. A centralized server managed all the projectors and also the camera(s) used to calibrate the display. In this paper, we propose an asynchronous distributed calibration methodology via a display unit called the plug-and-play projector (PPP). The PPP consists of a projector, camera, computation and communication unit, thus creating a self-sufficient module that enables an asynchronous distributed architecture for multi-projector displays. We present a single-program-multiple-data (SPMD) calibration algorithm that runs on each PPP and achieves a truly scalable and reconfigurable display without any input from the user. It instruments novel capabilities like adding/removing PPPs from the display dynamically, detecting faults, and reshaping the display to a reasonable rectangular shape to react to the addition/removal/faults. To the best of our knowledge, this is the first attempt to realize a completely asynchronous and distributed calibration architecture and methodology for multi-projector displays.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Centralized techniques have been used until now when automatically calibrating (both geometrically and photometrically) large high-resolution displays created by tiling multiple projectors in a 2D array. A centralized server managed all the projectors and also the camera(s) used to calibrate the display. In this paper, we propose an asynchronous distributed calibration methodology via a display unit called the plug-and-play projector (PPP). The PPP consists of a projector, camera, computation and communication unit, thus creating a self-sufficient module that enables an asynchronous distributed architecture for multi-projector displays. We present a single-program-multiple-data (SPMD) calibration algorithm that runs on each PPP and achieves a truly scalable and reconfigurable display without any input from the user. It instruments novel capabilities like adding/removing PPPs from the display dynamically, detecting faults, and reshaping the display to a reasonable rectangular shape to react to the addition/removal/faults. To the best of our knowledge, this is the first attempt to realize a completely asynchronous and distributed calibration architecture and methodology for multi-projector displays.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Centralized techniques have been used until now when automatically calibrating (both geometrically and photometrically) large high-resolution displays created by tiling multiple projectors in a 2D array. A centralized server managed all the projectors and also the camera(s) used to calibrate the display. In this paper, we propose an asynchronous distributed calibration methodology via a display unit called the plug-and-play projector (PPP). The PPP consists of a projector, camera, computation and communication unit, thus creating a self-sufficient module that enables an asynchronous distributed architecture for multi-projector displays. We present a single-program-multiple-data (SPMD) calibration algorithm that runs on each PPP and achieves a truly scalable and reconfigurable display without any input from the user. It instruments novel capabilities like adding/removing PPPs from the display dynamically, detecting faults, and reshaping the display to a reasonable rectangular shape to react to the addition/removal/faults. To the best of our knowledge, this is the first attempt to realize a completely asynchronous and distributed calibration architecture and methodology for multi-projector displays.",
"title": "Asynchronous Distributed Calibration for Scalable and Reconfigurable Multi-Projector Displays",
"normalizedTitle": "Asynchronous Distributed Calibration for Scalable and Reconfigurable Multi-Projector Displays",
"fno": "v1101",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Calibration",
"Cameras",
"Client Server Systems",
"Computer Displays",
"Distributed Algorithms",
"Asynchronous Distributed Calibration",
"Reconfigurable Multiprojector Displays",
"Centralized Technique",
"High Resolution Displays",
"Camera",
"Plug And Play Projector",
"Single Program Multiple Data Calibration Algorithm",
"Calibration",
"Photometry",
"Two Dimensional Displays",
"Project Management",
"Cameras",
"Computer Displays",
"Distributed Computing",
"Computer Architecture",
"Instruments",
"Fault Detection",
"Multi Projector Displays",
"Projector Camera Systems",
"Geometric And Color Calibration",
"Distributed Algorithms"
],
"authors": [
{
"givenName": "Ezekiel S.",
"surname": "Bhasker",
"fullName": "Ezekiel S. Bhasker",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pinaki",
"surname": "Sinha",
"fullName": "Pinaki Sinha",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aditi",
"surname": "Majumder",
"fullName": "Aditi Majumder",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2006-09-01 00:00:00",
"pubType": "trans",
"pages": "1101-1108",
"year": "2006",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2004/2244/0/01410480",
"title": "A survey of multi-projector tiled display wall construction",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410480/12OmNAWH9up",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a063",
"title": "Practical and Precise Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a063/12OmNB7cjhR",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204317",
"title": "Geometric video projector auto-calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2000/6478/0/64780017",
"title": "Achieving Color Uniformity Across Multi-Projector Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780017/12OmNwlHSVv",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204322",
"title": "Color calibration of multi-projector displays through automatic optimization of hardware settings",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204322/12OmNy6Zs2G",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/2/01315159",
"title": "A flexible projector-camera system for multi-planar displays",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315159/12OmNzBwGyN",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/2",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1368",
"title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061317",
"title": "Color Seamlessness in Multi-Projector Displays Using Constrained Gamut Morphing",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061317/13rRUwgQpqH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v1093",
"articleId": "13rRUx0xPTJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v1109",
"articleId": "13rRUwcAqq6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNxvO04Q",
"title": "Jan.",
"year": "2017",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxZ0o1E",
"doi": "10.1109/TVCG.2016.2598430",
"abstract": "We present a method for interactive global illumination of both static and time-varying volumetric data based on reduction of the overhead associated with re-computation of photon maps. Our method uses the identification of photon traces invariant to changes of visual parameters such as the transfer function (TF), or data changes between time-steps in a 4D volume. This lets us operate on a variant subset of the entire photon distribution. The amount of computation required in the two stages of the photon mapping process, namely tracing and gathering, can thus be reduced to the subset that are affected by a data or visual parameter change. We rely on two different types of information from the original data to identify the regions that have changed. A low resolution uniform grid containing the minimum and maximum data values of the original data is derived for each time step. Similarly, for two consecutive time-steps, a low resolution grid containing the difference between the overlapping data is used. We show that this compact metadata can be combined with the transfer function to identify the regions that have changed. Each photon traverses the low-resolution grid to identify if it can be directly transferred to the next photon distribution state or if it needs to be recomputed. An efficient representation of the photon distribution is presented leading to an order of magnitude improved performance of the raycasting step. The utility of the method is demonstrated in several examples that show visual fidelity, as well as performance. The examples show that visual quality can be retained when the fraction of retraced photons is as low as 40%–50%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a method for interactive global illumination of both static and time-varying volumetric data based on reduction of the overhead associated with re-computation of photon maps. Our method uses the identification of photon traces invariant to changes of visual parameters such as the transfer function (TF), or data changes between time-steps in a 4D volume. This lets us operate on a variant subset of the entire photon distribution. The amount of computation required in the two stages of the photon mapping process, namely tracing and gathering, can thus be reduced to the subset that are affected by a data or visual parameter change. We rely on two different types of information from the original data to identify the regions that have changed. A low resolution uniform grid containing the minimum and maximum data values of the original data is derived for each time step. Similarly, for two consecutive time-steps, a low resolution grid containing the difference between the overlapping data is used. We show that this compact metadata can be combined with the transfer function to identify the regions that have changed. Each photon traverses the low-resolution grid to identify if it can be directly transferred to the next photon distribution state or if it needs to be recomputed. An efficient representation of the photon distribution is presented leading to an order of magnitude improved performance of the raycasting step. The utility of the method is demonstrated in several examples that show visual fidelity, as well as performance. The examples show that visual quality can be retained when the fraction of retraced photons is as low as 40%–50%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a method for interactive global illumination of both static and time-varying volumetric data based on reduction of the overhead associated with re-computation of photon maps. Our method uses the identification of photon traces invariant to changes of visual parameters such as the transfer function (TF), or data changes between time-steps in a 4D volume. This lets us operate on a variant subset of the entire photon distribution. The amount of computation required in the two stages of the photon mapping process, namely tracing and gathering, can thus be reduced to the subset that are affected by a data or visual parameter change. We rely on two different types of information from the original data to identify the regions that have changed. A low resolution uniform grid containing the minimum and maximum data values of the original data is derived for each time step. Similarly, for two consecutive time-steps, a low resolution grid containing the difference between the overlapping data is used. We show that this compact metadata can be combined with the transfer function to identify the regions that have changed. Each photon traverses the low-resolution grid to identify if it can be directly transferred to the next photon distribution state or if it needs to be recomputed. An efficient representation of the photon distribution is presented leading to an order of magnitude improved performance of the raycasting step. The utility of the method is demonstrated in several examples that show visual fidelity, as well as performance. The examples show that visual quality can be retained when the fraction of retraced photons is as low as 40%–50%.",
"title": "Correlated Photon Mapping for Interactive Global Illumination of Time-Varying Volumetric Data",
"normalizedTitle": "Correlated Photon Mapping for Interactive Global Illumination of Time-Varying Volumetric Data",
"fno": "07534852",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Photonics",
"Lighting",
"Visualization",
"Light Sources",
"Rendering Computer Graphics",
"Media",
"Correlation",
"Participating Media",
"Volume Rendering",
"Photon Mapping",
"Global Illumination"
],
"authors": [
{
"givenName": "Daniel",
"surname": "Jönsson",
"fullName": "Daniel Jönsson",
"affiliation": "Linköping University, Nörrköping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anders",
"surname": "Ynnerman",
"fullName": "Anders Ynnerman",
"affiliation": "Linköping University, Nörrköping, Sweden",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2017-01-01 00:00:00",
"pubType": "trans",
"pages": "901-910",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2015/7962/0/7962a033",
"title": "A Comparison of Global Illumination Methods Using Perceptual Quality Metrics",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2015/7962a033/12OmNBhHt9Z",
"parentPublication": {
"id": "proceedings/sibgrapi/2015/7962/0",
"title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiki/2016/5952/0/5952a022",
"title": "Distributed Global Illumination Method Based on Photon Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/iiki/2016/5952a022/12OmNBubOQf",
"parentPublication": {
"id": "proceedings/iiki/2016/5952/0",
"title": "2016 International Conference on Identification, Information and Knowledge in the Internet of Things (IIKI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156382",
"title": "Efficient volume illumination with multiple light sources through selective light updates",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156382/12OmNvDZF6A",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2014/4337/0/4337a283",
"title": "Photon Detection and Color Perception at Low Light Levels",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2014/4337a283/12OmNyRxFxW",
"parentPublication": {
"id": "proceedings/crv/2014/4337/0",
"title": "2014 Canadian Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/10/06781612",
"title": "Overestimation and Underestimation Biases in Photon Mapping with Non-Constant Kernels",
"doi": null,
"abstractUrl": "/journal/tg/2014/10/06781612/13rRUwjXZSg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/06/06671591",
"title": "Importance Driven Environment Map Sampling",
"doi": null,
"abstractUrl": "/journal/tg/2014/06/06671591/13rRUxlgxTj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122364",
"title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08600345",
"title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10077440",
"title": "NeRC: Rendering Planar Caustics by Learning Implicit Neural Representations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10077440/1LFQ6PMpeik",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07539643",
"articleId": "13rRUwInvJk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07539577",
"articleId": "13rRUwwaKtc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYjK5h",
"doi": "10.1109/TVCG.2012.232",
"abstract": "In this paper, we enable interactive volumetric global illumination by extending photon mapping techniques to handle interactive transfer function (TF) and material editing in the context of volume rendering. We propose novel algorithms and data structures for finding and evaluating parts of a scene affected by these parameter changes, and thus support efficient updates of the photon map. In direct volume rendering (DVR) the ability to explore volume data using parameter changes, such as editable TFs, is of key importance. Advanced global illumination techniques are in most cases computationally too expensive, as they prevent the desired interactivity. Our technique decreases the amount of computation caused by parameter changes, by introducing Historygrams which allow us to efficiently reuse previously computed photon media interactions. Along the viewing rays, we utilize properties of the light transport equations to subdivide a view-ray into segments and independently update them when invalid. Unlike segments of a view-ray, photon scattering events within the volumetric medium needs to be sequentially updated. Using our Historygram approach, we can identify the first invalid photon interaction caused by a property change, and thus reuse all valid photon interactions. Combining these two novel concepts, supports interactive editing of parameters when using volumetric photon mapping in the context of DVR. As a consequence, we can handle arbitrarily shaped and positioned light sources, arbitrary phase functions, bidirectional reflectance distribution functions and multiple scattering which has previously not been possible in interactive DVR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we enable interactive volumetric global illumination by extending photon mapping techniques to handle interactive transfer function (TF) and material editing in the context of volume rendering. We propose novel algorithms and data structures for finding and evaluating parts of a scene affected by these parameter changes, and thus support efficient updates of the photon map. In direct volume rendering (DVR) the ability to explore volume data using parameter changes, such as editable TFs, is of key importance. Advanced global illumination techniques are in most cases computationally too expensive, as they prevent the desired interactivity. Our technique decreases the amount of computation caused by parameter changes, by introducing Historygrams which allow us to efficiently reuse previously computed photon media interactions. Along the viewing rays, we utilize properties of the light transport equations to subdivide a view-ray into segments and independently update them when invalid. Unlike segments of a view-ray, photon scattering events within the volumetric medium needs to be sequentially updated. Using our Historygram approach, we can identify the first invalid photon interaction caused by a property change, and thus reuse all valid photon interactions. Combining these two novel concepts, supports interactive editing of parameters when using volumetric photon mapping in the context of DVR. As a consequence, we can handle arbitrarily shaped and positioned light sources, arbitrary phase functions, bidirectional reflectance distribution functions and multiple scattering which has previously not been possible in interactive DVR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we enable interactive volumetric global illumination by extending photon mapping techniques to handle interactive transfer function (TF) and material editing in the context of volume rendering. We propose novel algorithms and data structures for finding and evaluating parts of a scene affected by these parameter changes, and thus support efficient updates of the photon map. In direct volume rendering (DVR) the ability to explore volume data using parameter changes, such as editable TFs, is of key importance. Advanced global illumination techniques are in most cases computationally too expensive, as they prevent the desired interactivity. Our technique decreases the amount of computation caused by parameter changes, by introducing Historygrams which allow us to efficiently reuse previously computed photon media interactions. Along the viewing rays, we utilize properties of the light transport equations to subdivide a view-ray into segments and independently update them when invalid. Unlike segments of a view-ray, photon scattering events within the volumetric medium needs to be sequentially updated. Using our Historygram approach, we can identify the first invalid photon interaction caused by a property change, and thus reuse all valid photon interactions. Combining these two novel concepts, supports interactive editing of parameters when using volumetric photon mapping in the context of DVR. As a consequence, we can handle arbitrarily shaped and positioned light sources, arbitrary phase functions, bidirectional reflectance distribution functions and multiple scattering which has previously not been possible in interactive DVR.",
"title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"normalizedTitle": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"fno": "ttg2012122364",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Interactive Systems",
"Lighting",
"Bidirectional Reflectance Distribution Functions",
"Historygram Approach",
"Direct Volume Rendering",
"Photon Mapping Techniques",
"Interactive Volumetric Global Illumination",
"Interactive Transfer Function",
"TF",
"Material Editing",
"DVR",
"Advanced Global Illumination Techniques",
"Parameter Change",
"Photon Media Interactions",
"Light Transport Equations",
"View Ray",
"Photon Scattering Events",
"Property Change",
"Light Sources",
"Arbitrary Phase Functions",
"Photonics",
"Lighting",
"Rendering Computer Graphics",
"Scattering",
"Volume Measurement",
"Participating Media",
"Volume Rendering",
"Photon Mapping",
"Global Illumination"
],
"authors": [
{
"givenName": "D.",
"surname": "Jonsson",
"fullName": "D. Jonsson",
"affiliation": "Linkoping Univ., Linkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J.",
"surname": "Kronander",
"fullName": "J. Kronander",
"affiliation": "Linkoping Univ., Linkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "T.",
"surname": "Ropinski",
"fullName": "T. Ropinski",
"affiliation": "Linkoping Univ., Linkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Ynnerman",
"fullName": "A. Ynnerman",
"affiliation": "Linkoping Univ., Linkoping, Sweden",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "2364-2371",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2011/4602/0/4602a275",
"title": "Direct Volume Rendering and Clipping Technology of Radar Beams",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a275/12OmNApLGRp",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498bergner",
"title": "Interactive Spectral Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498bergner/12OmNqEjhXN",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a181",
"title": "Progressive Point-Based Global Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a181/12OmNxXCGN1",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2000/6478/0/64780039",
"title": "Two-Level Volume Rendering-Fusing MIP and DVR",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780039/12OmNxzMnWP",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a071",
"title": "Real-Time Volume Caustics with Image-Based Photon Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a071/12OmNy7Qfu0",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011121922",
"title": "About the Influence of Illumination Models on Image Comprehension in Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011121922/13rRUILtJzv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122144",
"title": "An Efficient Direct Volume Rendering Approach for Dichromats",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122144/13rRUNvgz9H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030447",
"title": "Efficient Visibility Encoding for Dynamic Illumination in Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030447/13rRUxAATgu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122125",
"title": "Image Plane Sweep Volume Illumination",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122125/13rRUxjQyve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081317",
"title": "Real-Time Volume Rendering in Dynamic Lighting Environments Using Precomputed Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081317/13rRUynHuja",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012122355",
"articleId": "13rRUwjoNx2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122372",
"articleId": "13rRUwghd97",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesSz",
"name": "ttg2012122364s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2012122364s1.zip",
"extension": "zip",
"size": "1.91 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNC36tSf",
"title": "Aug.",
"year": "2013",
"issueNum": "08",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUynHuja",
"doi": "10.1109/TVCG.2013.17",
"abstract": "We present a framework for precomputed volume radiance transfer that achieves real-time rendering of global illumination effects for volume data sets such as multiple scattering, volumetric shadows, and so on. Our approach incorporates the volumetric photon mapping method into the classical precomputed radiance transfer pipeline. We contribute several techniques for light approximation, radiance transfer precomputation, and real-time radiance estimation, which are essential to make the approach practical and to achieve high frame rates. For light approximation, we propose a new discrete spherical function that has better performance for construction and evaluation when compared with existing rotational invariant spherical functions such as spherical harmonics and spherical radial basis functions. In addition, we present a fast splatting-based radiance transfer precomputation method and an early evaluation technique for real-time radiance estimation in the clustered principal component analysis space. Our techniques are validated through comprehensive evaluations and rendering tests. We also apply our rendering approach to volume visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a framework for precomputed volume radiance transfer that achieves real-time rendering of global illumination effects for volume data sets such as multiple scattering, volumetric shadows, and so on. Our approach incorporates the volumetric photon mapping method into the classical precomputed radiance transfer pipeline. We contribute several techniques for light approximation, radiance transfer precomputation, and real-time radiance estimation, which are essential to make the approach practical and to achieve high frame rates. For light approximation, we propose a new discrete spherical function that has better performance for construction and evaluation when compared with existing rotational invariant spherical functions such as spherical harmonics and spherical radial basis functions. In addition, we present a fast splatting-based radiance transfer precomputation method and an early evaluation technique for real-time radiance estimation in the clustered principal component analysis space. Our techniques are validated through comprehensive evaluations and rendering tests. We also apply our rendering approach to volume visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a framework for precomputed volume radiance transfer that achieves real-time rendering of global illumination effects for volume data sets such as multiple scattering, volumetric shadows, and so on. Our approach incorporates the volumetric photon mapping method into the classical precomputed radiance transfer pipeline. We contribute several techniques for light approximation, radiance transfer precomputation, and real-time radiance estimation, which are essential to make the approach practical and to achieve high frame rates. For light approximation, we propose a new discrete spherical function that has better performance for construction and evaluation when compared with existing rotational invariant spherical functions such as spherical harmonics and spherical radial basis functions. In addition, we present a fast splatting-based radiance transfer precomputation method and an early evaluation technique for real-time radiance estimation in the clustered principal component analysis space. Our techniques are validated through comprehensive evaluations and rendering tests. We also apply our rendering approach to volume visualization.",
"title": "Real-Time Volume Rendering in Dynamic Lighting Environments Using Precomputed Photon Mapping",
"normalizedTitle": "Real-Time Volume Rendering in Dynamic Lighting Environments Using Precomputed Photon Mapping",
"fno": "ttg2013081317",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lighting",
"Rendering Computer Graphics",
"Photonics",
"Scattering",
"Real Time Systems",
"Approximation Methods",
"Media",
"Volume Shadow",
"Volume Rendering",
"Precomputed Radiance Transfer",
"Volume Ray Casting",
"Multiple Scattering"
],
"authors": [
{
"givenName": null,
"surname": "Yubo Zhang",
"fullName": "Yubo Zhang",
"affiliation": "Dept. of Comput. Sci., Univ. of California, Davis, Davis, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Zhao Dong",
"fullName": "Zhao Dong",
"affiliation": "Program of Comput. Graphics, Cornell Univ., Ithaca, NY, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Kwan-Liu Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "Dept. of Comput. Sci., Univ. of California, Davis, Davis, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2013-08-01 00:00:00",
"pubType": "trans",
"pages": "1317-1330",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vv/2002/7641/0/76410131",
"title": "Shading for Fourier Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2002/76410131/12OmNCwlajb",
"parentPublication": {
"id": "proceedings/vv/2002/7641/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacific-graphics/2010/4205/0/4205a024",
"title": "Fast Height-Field Rendering under Image-Based Lighting",
"doi": null,
"abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a024/12OmNs0C9Uf",
"parentPublication": {
"id": "proceedings/pacific-graphics/2010/4205/0",
"title": "Pacific Conference on Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660038",
"title": "Scale-Invariant Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122936",
"title": "Ambient Volume Scattering",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122936/13rRUwcAqqh",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122946",
"title": "Lighting Design for Globally Illuminated Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122946/13rRUwvBy8U",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/02/ttg2008020454",
"title": "Spherical Piecewise Constant Basis Functions for All-Frequency Precomputed Radiance Transfer",
"doi": null,
"abstractUrl": "/journal/tg/2008/02/ttg2008020454/13rRUwvT9gl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030447",
"title": "Efficient Visibility Encoding for Dynamic Illumination in Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030447/13rRUxAATgu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/04/ttg2010040560",
"title": "Per-Pixel Opacity Modulation for Feature Enhancement in Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2010/04/ttg2010040560/13rRUxOdD8f",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0253",
"title": "Volume Illustration: Nonphotorealistic Rendering of Volume Models",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0253/13rRUxbTMyH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010043",
"title": "All-Frequency Lighting with Multiscale Spherical Radial Basis Functions",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010043/13rRUxjQybO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013081307",
"articleId": "13rRUxd2aZ0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013081331",
"articleId": "13rRUxly9dT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesY0",
"name": "ttg2013081317s.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013081317s.mov",
"extension": "mov",
"size": "31.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DSumaVNxG8",
"doi": "10.1109/TVCG.2022.3179766",
"abstract": "As one of the facial expression recognition techniques for Head-Mounted Display (HMD) users, embedded photo-reflective sensors have been used. In this paper, we investigate how gaze and face directions affect facial expression recognition using the embedded photo-reflective sensors. First, we collected a dataset of five facial expressions (Neutral, Happy, Angry, Sad, Surprised) while looking in diverse directions by moving 1) the eyes and 2) the head. Using the dataset, we analyzed the effect of gaze and face directions by constructing facial expression classifiers in five ways and evaluating the classification accuracy of each classifier. The results revealed that the single classifier that learned the data for all gaze points achieved the highest classification performance. Then, we investigated which facial part was affected by the gaze and face direction. The results showed that the gaze directions affected the upper facial parts, while the face directions affected the lower facial parts. In addition, by removing the bias of facial expression reproducibility, we investigated the pure effect of gaze and face directions in three conditions. The results showed that, in terms of gaze direction, building classifiers for each direction significantly improved the classification accuracy. However, in terms of face directions, there were slight differences between the classifier conditions. Our experimental results implied that multiple classifiers corresponding to multiple gaze and face directions improved facial expression recognition accuracy, but collecting the data of the vertical movement of gaze and face is a practical solution to improving facial expression recognition accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As one of the facial expression recognition techniques for Head-Mounted Display (HMD) users, embedded photo-reflective sensors have been used. In this paper, we investigate how gaze and face directions affect facial expression recognition using the embedded photo-reflective sensors. First, we collected a dataset of five facial expressions (Neutral, Happy, Angry, Sad, Surprised) while looking in diverse directions by moving 1) the eyes and 2) the head. Using the dataset, we analyzed the effect of gaze and face directions by constructing facial expression classifiers in five ways and evaluating the classification accuracy of each classifier. The results revealed that the single classifier that learned the data for all gaze points achieved the highest classification performance. Then, we investigated which facial part was affected by the gaze and face direction. The results showed that the gaze directions affected the upper facial parts, while the face directions affected the lower facial parts. In addition, by removing the bias of facial expression reproducibility, we investigated the pure effect of gaze and face directions in three conditions. The results showed that, in terms of gaze direction, building classifiers for each direction significantly improved the classification accuracy. However, in terms of face directions, there were slight differences between the classifier conditions. Our experimental results implied that multiple classifiers corresponding to multiple gaze and face directions improved facial expression recognition accuracy, but collecting the data of the vertical movement of gaze and face is a practical solution to improving facial expression recognition accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As one of the facial expression recognition techniques for Head-Mounted Display (HMD) users, embedded photo-reflective sensors have been used. In this paper, we investigate how gaze and face directions affect facial expression recognition using the embedded photo-reflective sensors. First, we collected a dataset of five facial expressions (Neutral, Happy, Angry, Sad, Surprised) while looking in diverse directions by moving 1) the eyes and 2) the head. Using the dataset, we analyzed the effect of gaze and face directions by constructing facial expression classifiers in five ways and evaluating the classification accuracy of each classifier. The results revealed that the single classifier that learned the data for all gaze points achieved the highest classification performance. Then, we investigated which facial part was affected by the gaze and face direction. The results showed that the gaze directions affected the upper facial parts, while the face directions affected the lower facial parts. In addition, by removing the bias of facial expression reproducibility, we investigated the pure effect of gaze and face directions in three conditions. The results showed that, in terms of gaze direction, building classifiers for each direction significantly improved the classification accuracy. However, in terms of face directions, there were slight differences between the classifier conditions. Our experimental results implied that multiple classifiers corresponding to multiple gaze and face directions improved facial expression recognition accuracy, but collecting the data of the vertical movement of gaze and face is a practical solution to improving facial expression recognition accuracy.",
"title": "Analyzing the Effect of Diverse Gaze and Head Direction on Facial Expression Recognition with Photo-Reflective Sensors Embedded in a Head-Mounted Display",
"normalizedTitle": "Analyzing the Effect of Diverse Gaze and Head Direction on Facial Expression Recognition with Photo-Reflective Sensors Embedded in a Head-Mounted Display",
"fno": "09786815",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Sensors",
"Face Recognition",
"Resists",
"Cameras",
"Avatars",
"Gravity",
"Optical Sensors",
"Facial Expression Recognition",
"Head Mounted Display",
"Embedded Photo Reflective Sensor",
"Gaze Direction",
"Face Direction"
],
"authors": [
{
"givenName": "Fumihiko",
"surname": "Nakamura",
"fullName": "Fumihiko Nakamura",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Masaaki",
"surname": "Murakami",
"fullName": "Masaaki Murakami",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Katsuhiro",
"surname": "Suzuki",
"fullName": "Katsuhiro Suzuki",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Masaaki",
"surname": "Fukuoka",
"fullName": "Masaaki Fukuoka",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Katsutoshi",
"surname": "Masai",
"fullName": "Katsutoshi Masai",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maki",
"surname": "Sugimoto",
"fullName": "Maki Sugimoto",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2008/2153/0/04813466",
"title": "A fast and robust 3D head pose and gaze estimation system",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813466/12OmNBqv2dy",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892245",
"title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2018/5892/0/08466462",
"title": "Image-based Attention Level Estimation of Interaction Scene by Head Pose and Gaze Information",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2018/08466462/13Jkr9SfNnG",
"parentPublication": {
"id": "proceedings/icis/2018/5892/0",
"title": "2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2020/02/08319988",
"title": "Emotion Recognition in Simulated Social Interactions",
"doi": null,
"abstractUrl": "/journal/ta/2020/02/08319988/13rRUB7a1ea",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c182",
"title": "Dynamic 3D Gaze from Afar: Deep Gaze Estimation from Temporal Eye-Head-Body Coordination",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c182/1H1mDm1L85i",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d453",
"title": "Fine Gaze Redirection Learning with Gaze Hardness-aware Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d453/1L8qk4xmpvW",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797925",
"title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797925/1cJ0J09XMdy",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797852",
"title": "Perception of Volumetric Characters' Eye-Gaze Direction in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a101",
"title": "Digital Full-Face Mask Display with Expression Recognition using Embedded Photo Reflective Sensor Arrays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a101/1pystZgPICk",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09785918",
"articleId": "1DPaEdHg6KQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09793626",
"articleId": "1E5LEepCqTC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Fz4SPLVTMY",
"doi": "10.1109/TVCG.2022.3196606",
"abstract": "Distances are commonly underperceived in virtual reality (VR), and this finding has been documented repeatedly over more than two decades of research. Yet, there is evidence that perceived distance is more accurate in modern compared to older head-mounted displays (HMDs). This meta-analysis of 131 studies describes egocentric distance perception across 20 HMDs, and also examines the relationship between perceived distance and technical HMD characteristics. Judged distance was positively associated with HMD field of view (FOV), positively associated with HMD resolution, and negatively associated with HMD weight. The effects of FOV and resolution were more pronounced among heavier HMDs. These findings suggest that future improvements in these technical characteristics may be central to resolving the problem of distance underperception in VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Distances are commonly underperceived in virtual reality (VR), and this finding has been documented repeatedly over more than two decades of research. Yet, there is evidence that perceived distance is more accurate in modern compared to older head-mounted displays (HMDs). This meta-analysis of 131 studies describes egocentric distance perception across 20 HMDs, and also examines the relationship between perceived distance and technical HMD characteristics. Judged distance was positively associated with HMD field of view (FOV), positively associated with HMD resolution, and negatively associated with HMD weight. The effects of FOV and resolution were more pronounced among heavier HMDs. These findings suggest that future improvements in these technical characteristics may be central to resolving the problem of distance underperception in VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Distances are commonly underperceived in virtual reality (VR), and this finding has been documented repeatedly over more than two decades of research. Yet, there is evidence that perceived distance is more accurate in modern compared to older head-mounted displays (HMDs). This meta-analysis of 131 studies describes egocentric distance perception across 20 HMDs, and also examines the relationship between perceived distance and technical HMD characteristics. Judged distance was positively associated with HMD field of view (FOV), positively associated with HMD resolution, and negatively associated with HMD weight. The effects of FOV and resolution were more pronounced among heavier HMDs. These findings suggest that future improvements in these technical characteristics may be central to resolving the problem of distance underperception in VR.",
"title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics",
"normalizedTitle": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics",
"fno": "09850416",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Resists",
"Image Resolution",
"Head",
"Virtual Environments",
"Head Mounted Displays",
"Task Analysis",
"Surface Texture",
"Distance Perception",
"Egocentric Distance",
"Field Of View",
"Head Mounted Display",
"Meta Analysis",
"Resolution",
"Virtual Environment",
"Virtual Reality",
"Weight"
],
"authors": [
{
"givenName": "Jonathan W.",
"surname": "Kelly",
"fullName": "Jonathan W. Kelly",
"affiliation": "Department of Psychology, Iowa State Universty, Ames, IA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2009/3779/0/3779a047",
"title": "Clinical Implementation of a Head-Mounted Display of Patient Vital Signs",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2009/3779a047/12OmNzlUKPY",
"parentPublication": {
"id": "proceedings/iswc/2009/3779/0",
"title": "2009 International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446345",
"title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a746",
"title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a105",
"title": "Real-Time Recognition of In-Place Body Actions and Head Gestures using Only a Head-Mounted Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a105/1MNgCnmbXyM",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797736",
"title": "Emotion Recognition in Gamers Wearing Head-mounted Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797736/1cJ0JubbA6A",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a542",
"title": "Field of View Effect on Distance Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a542/1tnXQ9aew80",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a109",
"title": "Generative RGB-D Face Completion for Head-Mounted Display Removal",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a109/1tnXncnHsIg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523894",
"title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523894/1wpqkPb7CSY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09850404",
"articleId": "1Fz4SEQnoiY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09852325",
"articleId": "1FFHd5L2SxG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "1zKXryr0JDG",
"title": "Feb.",
"year": "2022",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1lHjPSqVrpK",
"doi": "10.1109/TVCG.2020.3011468",
"abstract": "A commercial head-mounted display (HMD) for virtual reality (VR) presents three-dimensional imagery with a fixed focal distance. The VR HMD with a fixed focus can cause visual discomfort to an observer. In this article, we propose a novel design of a compact VR HMD supporting near-correct focus cues over a wide depth of field (from 18 cm to optical infinity). The proposed HMD consists of a low-resolution binary backlight, a liquid crystal display panel, and focus-tunable lenses. In the proposed system, the backlight locally illuminates the display panel that is floated by the focus-tunable lens at a specific distance. The illumination moment and the focus-tunable lens’ focal power are synchronized to generate focal blocks at the desired distances. The distance of each focal block is determined by depth information of three-dimensional imagery to provide near-correct focus cues. We evaluate the focus cue fidelity of the proposed system considering the fill factor and resolution of the backlight. Finally, we verify the display performance with experimental results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A commercial head-mounted display (HMD) for virtual reality (VR) presents three-dimensional imagery with a fixed focal distance. The VR HMD with a fixed focus can cause visual discomfort to an observer. In this article, we propose a novel design of a compact VR HMD supporting near-correct focus cues over a wide depth of field (from 18 cm to optical infinity). The proposed HMD consists of a low-resolution binary backlight, a liquid crystal display panel, and focus-tunable lenses. In the proposed system, the backlight locally illuminates the display panel that is floated by the focus-tunable lens at a specific distance. The illumination moment and the focus-tunable lens’ focal power are synchronized to generate focal blocks at the desired distances. The distance of each focal block is determined by depth information of three-dimensional imagery to provide near-correct focus cues. We evaluate the focus cue fidelity of the proposed system considering the fill factor and resolution of the backlight. Finally, we verify the display performance with experimental results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A commercial head-mounted display (HMD) for virtual reality (VR) presents three-dimensional imagery with a fixed focal distance. The VR HMD with a fixed focus can cause visual discomfort to an observer. In this article, we propose a novel design of a compact VR HMD supporting near-correct focus cues over a wide depth of field (from 18 cm to optical infinity). The proposed HMD consists of a low-resolution binary backlight, a liquid crystal display panel, and focus-tunable lenses. In the proposed system, the backlight locally illuminates the display panel that is floated by the focus-tunable lens at a specific distance. The illumination moment and the focus-tunable lens’ focal power are synchronized to generate focal blocks at the desired distances. The distance of each focal block is determined by depth information of three-dimensional imagery to provide near-correct focus cues. We evaluate the focus cue fidelity of the proposed system considering the fill factor and resolution of the backlight. Finally, we verify the display performance with experimental results.",
"title": "Volumetric Head-Mounted Display With Locally Adaptive Focal Blocks",
"normalizedTitle": "Volumetric Head-Mounted Display With Locally Adaptive Focal Blocks",
"fno": "09146716",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Helmet Mounted Displays",
"Holographic Optical Elements",
"Lenses",
"Liquid Crystal Displays",
"Optical Focusing",
"Virtual Reality",
"Volumetric Head Mounted Display",
"Locally Adaptive Focal Blocks",
"Commercial Head Mounted Display",
"Virtual Reality",
"Three Dimensional Imagery",
"Fixed Focal Distance",
"Fixed Focus",
"Visual Discomfort",
"Compact VR HMD",
"Near Correct Focus Cues",
"Wide Depth",
"Optical Infinity",
"Low Resolution Binary Backlight",
"Liquid Crystal Display Panel",
"Focus Tunable Lens",
"Specific Distance",
"Illumination Moment",
"Focal Block",
"Desired Distances",
"Depth Information",
"Focus Cue Fidelity",
"Display Performance",
"Size 18 0 Cm",
"Resists",
"Light Emitting Diodes",
"Visualization",
"Image Reconstruction",
"Liquid Crystal Displays",
"Retina",
"Optical Imaging",
"Virtual Reality",
"Head Mounted Display",
"Three Dimensional Display",
"Multifocal Display"
],
"authors": [
{
"givenName": "Dongheon",
"surname": "Yoo",
"fullName": "Dongheon Yoo",
"affiliation": "School of Electrical and Computer Engineering, Seoul National University, Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Seungjae",
"surname": "Lee",
"fullName": "Seungjae Lee",
"affiliation": "School of Electrical and Computer Engineering, Seoul National University, Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Youngjin",
"surname": "Jo",
"fullName": "Youngjin Jo",
"affiliation": "School of Electrical and Computer Engineering, Seoul National University, Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jaebum",
"surname": "Cho",
"fullName": "Jaebum Cho",
"affiliation": "School of Electrical and Computer Engineering, Seoul National University, Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Suyeon",
"surname": "Choi",
"fullName": "Suyeon Choi",
"affiliation": "Stanford University, Stanford, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Byoungho",
"surname": "Lee",
"fullName": "Byoungho Lee",
"affiliation": "School of Electrical and Computer Engineering, Seoul National University, Seoul, South Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1415-1427",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504749",
"title": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504749/12OmNBBhN9g",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccce/2016/2427/0/2427a126",
"title": "Halal Kit Identifier Using Radio Frequency Identification Technology",
"doi": null,
"abstractUrl": "/proceedings-article/iccce/2016/2427a126/12OmNrJAdPc",
"parentPublication": {
"id": "proceedings/iccce/2016/2427/0",
"title": "2016 International Conference on Computer and Communication Engineering (ICCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2008/2840/0/04637321",
"title": "An optical see-through head mounted display with addressable focal planes",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2008/04637321/12OmNwe2IAw",
"parentPublication": {
"id": "proceedings/ismar/2008/2840/0",
"title": "2008 7th IEEE/ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2016/9036/0/9036a355",
"title": "Applicability of LED-Based Light Sources for Diabetic Retinopathy Detection in Retinal Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2016/9036a355/12OmNxu6pbL",
"parentPublication": {
"id": "proceedings/cbms/2016/9036/0",
"title": "2016 IEEE 29th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2006/10/01715347",
"title": "HVS-aware dynamic backlight scaling in TFT-LCDs",
"doi": null,
"abstractUrl": "/journal/si/2006/10/01715347/13rRUxC0StC",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/03/ttg2010030381",
"title": "A Novel Prototype for an Optical See-Through Head-Mounted Display with Addressable Focus Cues",
"doi": null,
"abstractUrl": "/journal/tg/2010/03/ttg2010030381/13rRUyYSWsN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2018/8161/0/08633628",
"title": "A Novel and Robust GreenSoul-ed Lighting Controller",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2018/08633628/17D45WrVgdE",
"parentPublication": {
"id": "proceedings/iisa/2018/8161/0",
"title": "2018 9th International Conference on Information, Intelligence, Systems and Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-&-icivpr/2018/5163/0/08641067",
"title": "IoT Enabled Smart Bicycle Safety System",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-&-icivpr/2018/08641067/17PYEmtOUYp",
"parentPublication": {
"id": "proceedings/iciev-&-icivpr/2018/5163/0",
"title": "2018 Joint 7th International Conference on Informatics, Electronics & Vision (ICIEV) and 2018 2nd International Conference on Imaging, Vision & Pattern Recognition (icIVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08999805",
"title": "Illuminated Focus: Vision Augmentation using Spatial Defocusing via Focal Sweep Eyeglasses and High-Speed Projector",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08999805/1hpPCtKIAaA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09152170",
"articleId": "1lRhwZDVlpm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09143472",
"articleId": "1lxmwwX05lC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwMob9C",
"title": "April",
"year": "2018",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyft7D9",
"doi": "10.1109/TVCG.2018.2794222",
"abstract": "We present MRTouch, a novel multitouch input solution for head-mounted mixed reality systems. Our system enables users to reach out and directly manipulate virtual interfaces affixed to surfaces in their environment, as though they were touchscreens. Touch input offers precise, tactile and comfortable user input, and naturally complements existing popular modalities, such as voice and hand gesture. Our research prototype combines both depth and infrared camera streams together with real-time detection and tracking of surface planes to enable robust finger-tracking even when both the hand and head are in motion. Our technique is implemented on a commercial Microsoft HoloLens without requiring any additional hardware nor any user or environmental calibration. Through our performance evaluation, we demonstrate high input accuracy with an average positional error of 5.4 mm and 95% button size of 16 mm, across 17 participants, 2 surface orientations and 4 surface materials. Finally, we demonstrate the potential of our technique to enable on-world touch interactions through 5 example applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present MRTouch, a novel multitouch input solution for head-mounted mixed reality systems. Our system enables users to reach out and directly manipulate virtual interfaces affixed to surfaces in their environment, as though they were touchscreens. Touch input offers precise, tactile and comfortable user input, and naturally complements existing popular modalities, such as voice and hand gesture. Our research prototype combines both depth and infrared camera streams together with real-time detection and tracking of surface planes to enable robust finger-tracking even when both the hand and head are in motion. Our technique is implemented on a commercial Microsoft HoloLens without requiring any additional hardware nor any user or environmental calibration. Through our performance evaluation, we demonstrate high input accuracy with an average positional error of 5.4 mm and 95% button size of 16 mm, across 17 participants, 2 surface orientations and 4 surface materials. Finally, we demonstrate the potential of our technique to enable on-world touch interactions through 5 example applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present MRTouch, a novel multitouch input solution for head-mounted mixed reality systems. Our system enables users to reach out and directly manipulate virtual interfaces affixed to surfaces in their environment, as though they were touchscreens. Touch input offers precise, tactile and comfortable user input, and naturally complements existing popular modalities, such as voice and hand gesture. Our research prototype combines both depth and infrared camera streams together with real-time detection and tracking of surface planes to enable robust finger-tracking even when both the hand and head are in motion. Our technique is implemented on a commercial Microsoft HoloLens without requiring any additional hardware nor any user or environmental calibration. Through our performance evaluation, we demonstrate high input accuracy with an average positional error of 5.4 mm and 95% button size of 16 mm, across 17 participants, 2 surface orientations and 4 surface materials. Finally, we demonstrate the potential of our technique to enable on-world touch interactions through 5 example applications.",
"title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"normalizedTitle": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"fno": "08263123",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Haptic Interfaces",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Touch Sensitive Screens",
"Tracking",
"User Interfaces",
"MR Touch",
"Touch Input",
"Head Mounted Mixed Reality",
"Mixed Reality Systems",
"Virtual Interfaces",
"Infrared Camera Streams",
"Robust Finger Tracking",
"Commercial Microsoft Holo Lens",
"High Input Accuracy",
"On World Touch Interactions",
"Multitouch Input Solution",
"Microsoft Holo Lens",
"Virtual Reality",
"Thumb",
"Cameras",
"Tracking",
"Engines",
"Sensors",
"Augmented Reality",
"Touch Interaction",
"Depth Sensing",
"Sensor Fusion",
"On World Interaction"
],
"authors": [
{
"givenName": "Robert",
"surname": "Xiao",
"fullName": "Robert Xiao",
"affiliation": "Microsoft Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Julia",
"surname": "Schwarz",
"fullName": "Julia Schwarz",
"affiliation": "Microsoft",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nick",
"surname": "Throm",
"fullName": "Nick Throm",
"affiliation": "Microsoft",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andrew D.",
"surname": "Wilson",
"fullName": "Andrew D. Wilson",
"affiliation": "Microsoft Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hrvoje",
"surname": "Benko",
"fullName": "Hrvoje Benko",
"affiliation": "Microsoft Research",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2018-04-01 00:00:00",
"pubType": "trans",
"pages": "1653-1660",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2013/6097/0/06550188",
"title": "ForceExtension: Extending isotonic position-controlled multi-touch gestures with rate-controlled force sensing for 3D manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550188/12OmNAsk4A7",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223324",
"title": "3DTouch: A wearable 3D input device for 3D applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223324/12OmNCvLXZ7",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097812",
"title": "Virtual keyboard for head mounted display-based wearable devices",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097812/12OmNqzu6VX",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2016/4871/0/07916962",
"title": "Integration of Hand Gesture and Multi Touch Gesture with Glove Type Device",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2016/07916962/12OmNrGsDoo",
"parentPublication": {
"id": "proceedings/acit-csi/2016/4871/0",
"title": "2016 4th Intl. Conf. on Applied Computing and Information Technology (ACIT), 3rd Intl. Conf. on Computational Science/Intelligence and Applied Informatics (CSII), and 1st Intl. Conf. on Big Data, Cloud Computing, Data Science & Engineering (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836518",
"title": "AR Tabletop Interface using a Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836518/12OmNyoiYW4",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2012/1204/0/06184206",
"title": "Poster: Head gesture 3D interface using a head mounted camera",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2012/06184206/12OmNzayNwg",
"parentPublication": {
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a690",
"title": "Lightweight Wearable AR System using Head-mounted Projector for Work Support",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a690/1J7Wqal3Fkc",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798255",
"title": "HapticSphere: Physical Support To Enable Precision Touch Interaction in Mobile Mixed-Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798255/1cJ0Uje3t8Q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798362",
"title": "Match the Cube: Investigation of the Head-coupled Input with a Spherical Fish Tank Virtual Reality Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798362/1cJ16r0nRSM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a528",
"title": "VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a528/1tnXg447e7e",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08263407",
"articleId": "13rRUILtJqW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08260971",
"articleId": "13rRUwcAqqn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1lxvp4KjCAE",
"name": "ttg201804-08263123s1-supp1-2794222.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201804-08263123s1-supp1-2794222.mp4",
"extension": "mp4",
"size": "108 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNxvO04X",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tp",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LUpwXZtAe4",
"doi": "10.1109/TPAMI.2023.3262817",
"abstract": "Visual-LiDAR odometry and mapping (V-LOAM), which fuses complementary information of a camera and a LiDAR, is an attractive solution for accurate and robust pose estimation and mapping. However, existing systems could suffer nontrivial tracking errors arising from 1) association between 3D LiDAR points and sparse 2D features (i.e. 3D-2D depth association) and 2) obvious drifts in the vertical direction in the 6-degree of freedom (DOF) sweep-to-map optimization. In this paper, we present SDV-LOAM which incorporates a semi-direct visual odometry and an adaptive sweep-to-map LiDAR odometry to effectively avoid the above-mentioned errors and in turn achieve high tracking accuracy. The visual module of our SDV-LOAM directly extracts high-gradient pixels where 3D LiDAR points project on for tracking. To avoid the problem of large scale difference between matching frames in the VO, we design a novel point matching with propagation method to propagate points of a host frame to an intermediate keyframe which is closer to the current frame to reduce scale differences. To reduce the pose estimation drifts in the vertical direction, our LiDAR module employs an adaptive sweep-to-map optimization method which automatically choose to optimize 3 horizontal DOF or 6 full DOF pose according to the richness of geometric constraints in the vertical direction. In addition, we propose a novel sweep reconstruction method which can increase the input frequency of LiDAR point clouds to the same frequency as the camera images, and in turn yield a high frequency output of the LiDAR odometry in theory. Experimental results demonstrate that our SDV-LOAM ranks 8th on the KITTI odometry benchmark which outperforms most LiDAR/visual-LiDAR odometry systems. In addition, our visual module outperforms state-of-the-art visual odometry and our adaptive sweep-to-map optimization can improve the performance of several existing open-sourced LiDAR odometry systems. Moreover, we demonstrate our SDV-LOAM on a custom-built hardware platform in large-scale environments which achieves both a high accuracy and output frequency. We have released the source code of our SDV-LOAM for the development of the community.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual-LiDAR odometry and mapping (V-LOAM), which fuses complementary information of a camera and a LiDAR, is an attractive solution for accurate and robust pose estimation and mapping. However, existing systems could suffer nontrivial tracking errors arising from 1) association between 3D LiDAR points and sparse 2D features (i.e. 3D-2D depth association) and 2) obvious drifts in the vertical direction in the 6-degree of freedom (DOF) sweep-to-map optimization. In this paper, we present SDV-LOAM which incorporates a semi-direct visual odometry and an adaptive sweep-to-map LiDAR odometry to effectively avoid the above-mentioned errors and in turn achieve high tracking accuracy. The visual module of our SDV-LOAM directly extracts high-gradient pixels where 3D LiDAR points project on for tracking. To avoid the problem of large scale difference between matching frames in the VO, we design a novel point matching with propagation method to propagate points of a host frame to an intermediate keyframe which is closer to the current frame to reduce scale differences. To reduce the pose estimation drifts in the vertical direction, our LiDAR module employs an adaptive sweep-to-map optimization method which automatically choose to optimize 3 horizontal DOF or 6 full DOF pose according to the richness of geometric constraints in the vertical direction. In addition, we propose a novel sweep reconstruction method which can increase the input frequency of LiDAR point clouds to the same frequency as the camera images, and in turn yield a high frequency output of the LiDAR odometry in theory. Experimental results demonstrate that our SDV-LOAM ranks 8th on the KITTI odometry benchmark which outperforms most LiDAR/visual-LiDAR odometry systems. In addition, our visual module outperforms state-of-the-art visual odometry and our adaptive sweep-to-map optimization can improve the performance of several existing open-sourced LiDAR odometry systems. Moreover, we demonstrate our SDV-LOAM on a custom-built hardware platform in large-scale environments which achieves both a high accuracy and output frequency. We have released the source code of our SDV-LOAM for the development of the community.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual-LiDAR odometry and mapping (V-LOAM), which fuses complementary information of a camera and a LiDAR, is an attractive solution for accurate and robust pose estimation and mapping. However, existing systems could suffer nontrivial tracking errors arising from 1) association between 3D LiDAR points and sparse 2D features (i.e. 3D-2D depth association) and 2) obvious drifts in the vertical direction in the 6-degree of freedom (DOF) sweep-to-map optimization. In this paper, we present SDV-LOAM which incorporates a semi-direct visual odometry and an adaptive sweep-to-map LiDAR odometry to effectively avoid the above-mentioned errors and in turn achieve high tracking accuracy. The visual module of our SDV-LOAM directly extracts high-gradient pixels where 3D LiDAR points project on for tracking. To avoid the problem of large scale difference between matching frames in the VO, we design a novel point matching with propagation method to propagate points of a host frame to an intermediate keyframe which is closer to the current frame to reduce scale differences. To reduce the pose estimation drifts in the vertical direction, our LiDAR module employs an adaptive sweep-to-map optimization method which automatically choose to optimize 3 horizontal DOF or 6 full DOF pose according to the richness of geometric constraints in the vertical direction. In addition, we propose a novel sweep reconstruction method which can increase the input frequency of LiDAR point clouds to the same frequency as the camera images, and in turn yield a high frequency output of the LiDAR odometry in theory. Experimental results demonstrate that our SDV-LOAM ranks 8th on the KITTI odometry benchmark which outperforms most LiDAR/visual-LiDAR odometry systems. In addition, our visual module outperforms state-of-the-art visual odometry and our adaptive sweep-to-map optimization can improve the performance of several existing open-sourced LiDAR odometry systems. Moreover, we demonstrate our SDV-LOAM on a custom-built hardware platform in large-scale environments which achieves both a high accuracy and output frequency. We have released the source code of our SDV-LOAM for the development of the community.",
"title": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping",
"normalizedTitle": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping",
"fno": "10086694",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"Laser Radar",
"Visualization",
"Pose Estimation",
"Three Dimensional Displays",
"Point Cloud Compression",
"Visual Odometry",
"Cameras",
"Mapping",
"Pose Estimation",
"Visual Li DAR Sensor System"
],
"authors": [
{
"givenName": "Zikang",
"surname": "Yuan",
"fullName": "Zikang Yuan",
"affiliation": "Institute of Artificial Intelligence, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qingjie",
"surname": "Wang",
"fullName": "Qingjie Wang",
"affiliation": "Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ken",
"surname": "Cheng",
"fullName": "Ken Cheng",
"affiliation": "Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tianyu",
"surname": "Hao",
"fullName": "Tianyu Hao",
"affiliation": "Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Yang",
"fullName": "Xin Yang",
"affiliation": "Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2011/4362/0/4362a086",
"title": "Visual Odometry Using 3-Dimensional Video Input",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2011/4362a086/12OmNA0vnQM",
"parentPublication": {
"id": "proceedings/crv/2011/4362/0",
"title": "2011 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2018/6481/0/648101a166",
"title": "Learning a Bias Correction for Lidar-Only Motion Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2018/648101a166/17D45XwUALp",
"parentPublication": {
"id": "proceedings/crv/2018/6481/0",
"title": "2018 15th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09893384",
"title": "Efficient 3D Deep LiDAR Odometry",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09893384/1GGLsotG8IU",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlccim/2022/9858/0/985800a447",
"title": "Optimization Method of SLAM Mapping Based on Plane Fitting and Intensity Information",
"doi": null,
"abstractUrl": "/proceedings-article/mlccim/2022/985800a447/1IAKhwnFTEc",
"parentPublication": {
"id": "proceedings/mlccim/2022/9858/0",
"title": "2022 International Conference on Machine Learning, Cloud Computing and Intelligent Mining (MLCCIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2022/7260/0/726000a072",
"title": "Experimental Assessment of Feature-based Lidar Odometry and Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2022/726000a072/1Kckj73Kc4U",
"parentPublication": {
"id": "proceedings/irc/2022/7260/0",
"title": "2022 Sixth IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300i465",
"title": "LO-Net: Deep Real-Time Lidar Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300i465/1gyrab0i1os",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2019/5045/0/504500a483",
"title": "AGV Localization Based on Odometry and LiDAR",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2019/504500a483/1hHLuZFKwco",
"parentPublication": {
"id": "proceedings/wcmeim/2019/5045/0",
"title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmu/2019/41/0/09006646",
"title": "Edge Computing for Mobile Robots: Multi-Robot Feature-Based Lidar Odometry with FPGAs",
"doi": null,
"abstractUrl": "/proceedings-article/icmu/2019/09006646/1hJtvzQZBOo",
"parentPublication": {
"id": "proceedings/icmu/2019/41/0",
"title": "2019 Twelfth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700d843",
"title": "Self-supervised Visual-LiDAR Odometry with Flip Consistency",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700d843/1uqGj8rD1bq",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5905",
"title": "PWCLO-Net: Deep LiDAR Odometry in 3D Point Clouds Using Hierarchical Embedding Mask Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5905/1yeKrD5i31m",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10086616",
"articleId": "1LUpwPhS7Xq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10086692",
"articleId": "1LUpxbhMHXq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNAXPyfp",
"title": "Nov.-Dec.",
"year": "2011",
"issueNum": "06",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "31",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly9gf",
"doi": "10.1109/MCG.2011.32",
"abstract": "Researchers have used depth images to approximate scene geometry in a variety of interactive 3D graphics applications. Previous researchers constructed images using orthographic or perspective projection, which limits the approximation quality to what's visible along a single view direction or from a single viewpoint. Images constructed with nonpinhole cameras can improve approximation quality at little additional cost, if the camera offers fast projection. For such a camera, the fundamental operation of ray-and-depth-image intersection proceeds efficiently by searching along the 1D projection of the ray onto the image. A proposed method extends epipolar geometry constraints to nonpinhole cameras for two-camera configurations. Researchers have demonstrated nonpinhole depth images' advantages in the context of reflections, refractions, relief texture mapping, and ambient occlusion. The Web extra is a video that shows how nonpinhole depth images provide advantages regarding reflection, refraction, relief texture mapping, and ambient occlusion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Researchers have used depth images to approximate scene geometry in a variety of interactive 3D graphics applications. Previous researchers constructed images using orthographic or perspective projection, which limits the approximation quality to what's visible along a single view direction or from a single viewpoint. Images constructed with nonpinhole cameras can improve approximation quality at little additional cost, if the camera offers fast projection. For such a camera, the fundamental operation of ray-and-depth-image intersection proceeds efficiently by searching along the 1D projection of the ray onto the image. A proposed method extends epipolar geometry constraints to nonpinhole cameras for two-camera configurations. Researchers have demonstrated nonpinhole depth images' advantages in the context of reflections, refractions, relief texture mapping, and ambient occlusion. The Web extra is a video that shows how nonpinhole depth images provide advantages regarding reflection, refraction, relief texture mapping, and ambient occlusion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Researchers have used depth images to approximate scene geometry in a variety of interactive 3D graphics applications. Previous researchers constructed images using orthographic or perspective projection, which limits the approximation quality to what's visible along a single view direction or from a single viewpoint. Images constructed with nonpinhole cameras can improve approximation quality at little additional cost, if the camera offers fast projection. For such a camera, the fundamental operation of ray-and-depth-image intersection proceeds efficiently by searching along the 1D projection of the ray onto the image. A proposed method extends epipolar geometry constraints to nonpinhole cameras for two-camera configurations. Researchers have demonstrated nonpinhole depth images' advantages in the context of reflections, refractions, relief texture mapping, and ambient occlusion. The Web extra is a video that shows how nonpinhole depth images provide advantages regarding reflection, refraction, relief texture mapping, and ambient occlusion.",
"title": "Nonpinhole Approximations for Interactive Rendering",
"normalizedTitle": "Nonpinhole Approximations for Interactive Rendering",
"fno": "mcg2011060068",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Cameras",
"Geometry",
"Approximation Methods",
"Rendering Computer Graphics",
"Image Segmentation",
"Graphics And Multimedia",
"Nonpinhole Camera",
"Single Pole Occlusion Camera",
"Graph Camera",
"Depth Image",
"Impostor",
"Epipolar Constraints",
"Reflection",
"Refraction",
"Relief Texture Mapping",
"Ambient Occlusion",
"Interactive 3 D Graphics",
"Computer Graphics"
],
"authors": [
{
"givenName": "C.",
"surname": "Wyman",
"fullName": "C. Wyman",
"affiliation": "Univ. of Iowa, Iowa City, IA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "K.",
"surname": "Hayward",
"fullName": "K. Hayward",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "V.",
"surname": "Popescu",
"fullName": "V. Popescu",
"affiliation": "Purdue Univ., West Lafayette, IN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "P.",
"surname": "Rosen",
"fullName": "P. Rosen",
"affiliation": "Univ. of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2011-11-01 00:00:00",
"pubType": "mags",
"pages": "68-83",
"year": "2011",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2012/1611/0/06239344",
"title": "Geometry-corrected light field rendering for creating a holographic stereogram",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239344/12OmNBh8gW6",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870011",
"title": "Interactive Maximum Projection Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870011/12OmNzZmZv2",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06803934",
"title": "Second-Order Feed-Forward Renderingfor Specular and Glossy Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06803934/13rRUwInvyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/05/v0966",
"title": "RTcams: A New Perspective on Nonphotorealistic Rendering from Photographs",
"doi": null,
"abstractUrl": "/journal/tg/2007/05/v0966/13rRUyuvRxi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g393",
"title": "Aperture Supervision for Monocular Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g393/17D45WIXbNB",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546330",
"title": "Dynamic Projected Segmentation Networks For Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546330/17D45X7VTgX",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/09008541",
"title": "Monocular Neural Image Based Rendering With Continuous View Control",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/09008541/1hVlbVEAL3a",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d417",
"title": "Depth-Guided Dense Dynamic Filtering Network for Bokeh Effect Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d417/1i5mB3Uf3gI",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h827",
"title": "Neural Point Cloud Rendering via Multi-Plane Projection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h827/1m3nmJqUK8o",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900c398",
"title": "Stacked Deep Multi-Scale Hierarchical Network for Fast Bokeh Effect Rendering from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900c398/1yJYmavkLUA",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcg2011060056",
"articleId": "13rRUxASujY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2011060084",
"articleId": "13rRUyv53HP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesOG",
"name": "mcg2011060068s.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/mcg2011060068s.mov",
"extension": "mov",
"size": "100 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNyq0zFI",
"title": "May",
"year": "2020",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1hrXe0Hbv0I",
"doi": "10.1109/TVCG.2020.2973443",
"abstract": "Occlusion is a powerful visual cue that is crucial for depth perception and realism in optical see-through augmented reality (OST-AR). However, existing OST-AR systems additively overlay physical and digital content with beam combiners - an approach that does not easily support mutual occlusion, resulting in virtual objects that appear semi-transparent and unrealistic. In this work, we propose a new type of occlusion-capable OST-AR system. Rather than additively combining the real and virtual worlds, we employ a single digital micromirror device (DMD) to merge the respective light paths in a multiplicative manner. This unique approach allows us to simultaneously block light incident from the physical scene on a pixel-by-pixel basis while also modulating the light emitted by a light-emitting diode (LED) to display digital content. Our technique builds on mixed binary/continuous factorization algorithms to optimize time-multiplexed binary DMD patterns and their corresponding LED colors to approximate a target augmented reality (AR) scene. In simulations and with a prototype benchtop display, we demonstrate hard-edge occlusions, plausible shadows, and also gaze-contingent optimization of this novel display mode, which only requires a single spatial light modulator.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Occlusion is a powerful visual cue that is crucial for depth perception and realism in optical see-through augmented reality (OST-AR). However, existing OST-AR systems additively overlay physical and digital content with beam combiners - an approach that does not easily support mutual occlusion, resulting in virtual objects that appear semi-transparent and unrealistic. In this work, we propose a new type of occlusion-capable OST-AR system. Rather than additively combining the real and virtual worlds, we employ a single digital micromirror device (DMD) to merge the respective light paths in a multiplicative manner. This unique approach allows us to simultaneously block light incident from the physical scene on a pixel-by-pixel basis while also modulating the light emitted by a light-emitting diode (LED) to display digital content. Our technique builds on mixed binary/continuous factorization algorithms to optimize time-multiplexed binary DMD patterns and their corresponding LED colors to approximate a target augmented reality (AR) scene. In simulations and with a prototype benchtop display, we demonstrate hard-edge occlusions, plausible shadows, and also gaze-contingent optimization of this novel display mode, which only requires a single spatial light modulator.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Occlusion is a powerful visual cue that is crucial for depth perception and realism in optical see-through augmented reality (OST-AR). However, existing OST-AR systems additively overlay physical and digital content with beam combiners - an approach that does not easily support mutual occlusion, resulting in virtual objects that appear semi-transparent and unrealistic. In this work, we propose a new type of occlusion-capable OST-AR system. Rather than additively combining the real and virtual worlds, we employ a single digital micromirror device (DMD) to merge the respective light paths in a multiplicative manner. This unique approach allows us to simultaneously block light incident from the physical scene on a pixel-by-pixel basis while also modulating the light emitted by a light-emitting diode (LED) to display digital content. Our technique builds on mixed binary/continuous factorization algorithms to optimize time-multiplexed binary DMD patterns and their corresponding LED colors to approximate a target augmented reality (AR) scene. In simulations and with a prototype benchtop display, we demonstrate hard-edge occlusions, plausible shadows, and also gaze-contingent optimization of this novel display mode, which only requires a single spatial light modulator.",
"title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"normalizedTitle": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"fno": "08998139",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"LED Displays",
"Micromirrors",
"Spatial Light Modulators",
"Gaze Contingent Optimization",
"LED Colors",
"Mixed Binary Continuous Factorization Algorithms",
"Optical See Through Augmented Reality Display",
"Single Spatial Light Modulator Occlusion",
"Factored Occlusion",
"Display Mode",
"Hard Edge Occlusions",
"Prototype Benchtop Display",
"Augmented Reality Scene",
"Time Multiplexed Binary DMD Patterns",
"Digital Content",
"Light Emitting Diode",
"Pixel By Pixel Basis",
"Physical Scene",
"Light Incident",
"Light Paths",
"Single Digital Micromirror Device",
"Virtual Worlds",
"Real Worlds",
"Occlusion Capable OST AR System",
"Virtual Objects",
"Mutual Occlusion",
"Beam Combiners",
"Depth Perception",
"Image Color Analysis",
"Optical Diffraction",
"Mirrors",
"Light Emitting Diodes",
"Optical Imaging",
"Augmented Reality",
"Modulation",
"Augmented Reality",
"Computational Displays",
"Mutual Occlusion"
],
"authors": [
{
"givenName": "Brooke",
"surname": "Krajancich",
"fullName": "Brooke Krajancich",
"affiliation": "Stanford University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nitish",
"surname": "Padmanaban",
"fullName": "Nitish Padmanaban",
"affiliation": "Stanford University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gordon",
"surname": "Wetzstein",
"fullName": "Gordon Wetzstein",
"affiliation": "Stanford University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2020-05-01 00:00:00",
"pubType": "trans",
"pages": "1871-1879",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007218",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a800",
"title": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a800/1CJeADcapNK",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10050791",
"title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a237",
"title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08827571",
"title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09416829",
"title": "Design of a Pupil-Matched Occlusion-Capable Optical See-Through Wearable Display",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09416829/1t8VUXSYL2E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a422",
"title": "Blending Shadows: Casting Shadows in Virtual and Real using Occlusion-Capable Augmented Reality Near-Eye Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a422/1yeD2Kh0vxS",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08998307",
"articleId": "1hpPBi8EjJe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08998133",
"articleId": "1hrXcnyAOzu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvTBB89",
"title": "Feb.",
"year": "2018",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0gezW",
"doi": "10.1109/TVCG.2017.2657766",
"abstract": "Handheld scanning using commodity depth cameras provides a flexible and low-cost manner to get 3D models. The existing methods scan a target by densely fusing all the captured depth images, yet most frames are redundant. The jittering frames inevitably embedded in handheld scanning process will cause feature blurring on the reconstructed model and even trigger the scan failure (i.e., camera tracking losing). To address these problems, in this paper, we propose a novel sparse-sequence fusion (SSF) algorithm for handheld scanning using commodity depth cameras. It first extracts related measurements for analyzing camera motion. Then based on these measurements, we progressively construct a supporting subset for the captured depth image sequence to decrease the data redundancy and the interference from jittering frames. Since SSF will reveal the intrinsic heavy noise of the original depth images, our method introduces a refinement process to eliminate the raw noise and recover geometric features for the depth images selected into the supporting subset. We finally obtain the fused result by integrating the refined depth images into the truncated signed distance field (TSDF) of the target. Multiple comparison experiments are conducted and the results verify the feasibility and validity of SSF for handheld scanning with a commodity depth camera.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Handheld scanning using commodity depth cameras provides a flexible and low-cost manner to get 3D models. The existing methods scan a target by densely fusing all the captured depth images, yet most frames are redundant. The jittering frames inevitably embedded in handheld scanning process will cause feature blurring on the reconstructed model and even trigger the scan failure (i.e., camera tracking losing). To address these problems, in this paper, we propose a novel sparse-sequence fusion (SSF) algorithm for handheld scanning using commodity depth cameras. It first extracts related measurements for analyzing camera motion. Then based on these measurements, we progressively construct a supporting subset for the captured depth image sequence to decrease the data redundancy and the interference from jittering frames. Since SSF will reveal the intrinsic heavy noise of the original depth images, our method introduces a refinement process to eliminate the raw noise and recover geometric features for the depth images selected into the supporting subset. We finally obtain the fused result by integrating the refined depth images into the truncated signed distance field (TSDF) of the target. Multiple comparison experiments are conducted and the results verify the feasibility and validity of SSF for handheld scanning with a commodity depth camera.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Handheld scanning using commodity depth cameras provides a flexible and low-cost manner to get 3D models. The existing methods scan a target by densely fusing all the captured depth images, yet most frames are redundant. The jittering frames inevitably embedded in handheld scanning process will cause feature blurring on the reconstructed model and even trigger the scan failure (i.e., camera tracking losing). To address these problems, in this paper, we propose a novel sparse-sequence fusion (SSF) algorithm for handheld scanning using commodity depth cameras. It first extracts related measurements for analyzing camera motion. Then based on these measurements, we progressively construct a supporting subset for the captured depth image sequence to decrease the data redundancy and the interference from jittering frames. Since SSF will reveal the intrinsic heavy noise of the original depth images, our method introduces a refinement process to eliminate the raw noise and recover geometric features for the depth images selected into the supporting subset. We finally obtain the fused result by integrating the refined depth images into the truncated signed distance field (TSDF) of the target. Multiple comparison experiments are conducted and the results verify the feasibility and validity of SSF for handheld scanning with a commodity depth camera.",
"title": "Surface Reconstruction via Fusing Sparse-Sequence of Depth Images",
"normalizedTitle": "Surface Reconstruction via Fusing Sparse-Sequence of Depth Images",
"fno": "07833201",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Image Reconstruction",
"Surface Reconstruction",
"Three Dimensional Displays",
"Image Sequences",
"Image Segmentation",
"Solid Modeling",
"Depth Image Refinement",
"Handheld Scanning",
"Sparse Sequence Fusion",
"Surface Reconstruction",
"Supporting Subset"
],
"authors": [
{
"givenName": "Long",
"surname": "Yang",
"fullName": "Long Yang",
"affiliation": "Computer School, Wuhan University, Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qingan",
"surname": "Yan",
"fullName": "Qingan Yan",
"affiliation": "State Key Lab of Software Engineering, Computer School, Wuhan University, Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yanping",
"surname": "Fu",
"fullName": "Yanping Fu",
"affiliation": "State Key Lab of Software Engineering, Computer School, Wuhan University, Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chunxia",
"surname": "Xiao",
"fullName": "Chunxia Xiao",
"affiliation": "State Key Lab of Software Engineering, Computer School, Wuhan University, Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2018-02-01 00:00:00",
"pubType": "trans",
"pages": "1190-1203",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/msn/2013/5159/0/06726381",
"title": "Depth Mapping Using the Hierarchical Reconstruction of Multiple Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2013/06726381/12OmNClQ0Bv",
"parentPublication": {
"id": "proceedings/msn/2013/5159/0",
"title": "2013 Ninth International Conference on Mobile Ad-hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcomp/2014/5711/0/07043853",
"title": "Enabling 3D online shopping with affordable depth scanned models",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2014/07043853/12OmNwHz07o",
"parentPublication": {
"id": "proceedings/smartcomp/2014/5711/0",
"title": "2014 International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a057",
"title": "OctNetFusion: Learning Depth Fusion from Data",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a057/12OmNxFJXuz",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2014/4311/0/4311a055",
"title": "When Specular Object Meets RGB-D Camera 3D Scanning: Color Image Plus Fragmented Depth Map",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a055/12OmNyUWR8A",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcabes/2015/6593/0/6593a352",
"title": "Quick Capture and Reconstruction for 3D Head",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2015/6593a352/12OmNyUnEKB",
"parentPublication": {
"id": "proceedings/dcabes/2015/6593/0",
"title": "2015 14th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341144",
"title": "Toward global surface reconstruction by purposive viewpoint adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341144/12OmNyo1o7g",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a910",
"title": "BodyFusion: Real-Time Capture of Human Motion and Surface Geometry Using a Single Depth Camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a910/12OmNzT7Otl",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/05/ttp2013051039",
"title": "Algorithms for 3D Shape Scanning with a Depth Camera",
"doi": null,
"abstractUrl": "/journal/tp/2013/05/ttp2013051039/13rRUxNW1UZ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a012",
"title": "Surface Light Field Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a012/17D45WODasr",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/02/09184024",
"title": "GeoNet++: Iterative Geometric Neural Network with Edge-Aware Refinement for Joint Depth and Surface Normal Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2022/02/09184024/1mLHVYnhWko",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07792706",
"articleId": "13rRUxDqS8m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07862917",
"articleId": "13rRUwInvsY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgIN",
"name": "ttg201802-07833201s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201802-07833201s1.zip",
"extension": "zip",
"size": "14.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1uSOz8w9kGc",
"title": "Aug.",
"year": "2021",
"issueNum": "08",
"idPrefix": "tp",
"pubType": "journal",
"volume": "43",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1hGqrsQbjPO",
"doi": "10.1109/TPAMI.2020.2976065",
"abstract": "We present a framework for real-time 3D reconstruction of non-rigidly moving surfaces captured with a single RGB-D camera. Based on the variational level set method, it warps a given truncated signed distance field (TSDF) to a target TSDF via gradient flow without explicit correspondence search. We optimize an energy that contains a data term which steers towards voxel-wise alignment. To ensure geometrically consistent reconstructions, we develop and compare different strategies, namely an approximately Killing vector field regularizer, gradient flow in Sobolev space and newly devised accelerated optimization. The underlying TSDF evolution makes our approach capable of capturing rapid motions, topological changes and interacting agents, but entails loss of data association. To recover correspondences, we propose to utilize the lowest-frequency Laplacian eigenfunctions of the TSDFs, which encode inherent deformation patterns. For moderate motions we are able to obtain implicit associations via a term that imposes voxel-wise eigenfunction alignment. This is not sufficient for larger motions, so we explicitly estimate voxel correspondences via signature matching of lower-dimensional eigenfunction embeddings. We carry out qualitative and quantitative evaluation of our geometric reconstruction fidelity and voxel correspondence accuracy, demonstrating advantages over related techniques in handling topological changes and fast motions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a framework for real-time 3D reconstruction of non-rigidly moving surfaces captured with a single RGB-D camera. Based on the variational level set method, it warps a given truncated signed distance field (TSDF) to a target TSDF via gradient flow without explicit correspondence search. We optimize an energy that contains a data term which steers towards voxel-wise alignment. To ensure geometrically consistent reconstructions, we develop and compare different strategies, namely an approximately Killing vector field regularizer, gradient flow in Sobolev space and newly devised accelerated optimization. The underlying TSDF evolution makes our approach capable of capturing rapid motions, topological changes and interacting agents, but entails loss of data association. To recover correspondences, we propose to utilize the lowest-frequency Laplacian eigenfunctions of the TSDFs, which encode inherent deformation patterns. For moderate motions we are able to obtain implicit associations via a term that imposes voxel-wise eigenfunction alignment. This is not sufficient for larger motions, so we explicitly estimate voxel correspondences via signature matching of lower-dimensional eigenfunction embeddings. We carry out qualitative and quantitative evaluation of our geometric reconstruction fidelity and voxel correspondence accuracy, demonstrating advantages over related techniques in handling topological changes and fast motions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a framework for real-time 3D reconstruction of non-rigidly moving surfaces captured with a single RGB-D camera. Based on the variational level set method, it warps a given truncated signed distance field (TSDF) to a target TSDF via gradient flow without explicit correspondence search. We optimize an energy that contains a data term which steers towards voxel-wise alignment. To ensure geometrically consistent reconstructions, we develop and compare different strategies, namely an approximately Killing vector field regularizer, gradient flow in Sobolev space and newly devised accelerated optimization. The underlying TSDF evolution makes our approach capable of capturing rapid motions, topological changes and interacting agents, but entails loss of data association. To recover correspondences, we propose to utilize the lowest-frequency Laplacian eigenfunctions of the TSDFs, which encode inherent deformation patterns. For moderate motions we are able to obtain implicit associations via a term that imposes voxel-wise eigenfunction alignment. This is not sufficient for larger motions, so we explicitly estimate voxel correspondences via signature matching of lower-dimensional eigenfunction embeddings. We carry out qualitative and quantitative evaluation of our geometric reconstruction fidelity and voxel correspondence accuracy, demonstrating advantages over related techniques in handling topological changes and fast motions.",
"title": "Variational Level Set Evolution for Non-Rigid 3D Reconstruction From a Single Depth Camera",
"normalizedTitle": "Variational Level Set Evolution for Non-Rigid 3D Reconstruction From a Single Depth Camera",
"fno": "09007740",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"Computational Geometry",
"Eigenvalues And Eigenfunctions",
"Gradient Methods",
"Image Colour Analysis",
"Image Motion Analysis",
"Image Reconstruction",
"Optimisation",
"Vectors",
"Variational Level Set Evolution",
"Nonrigid 3 D Reconstruction",
"Single Depth Camera",
"Real Time 3 D Reconstruction",
"RGB D Camera",
"Distance Field",
"Gradient Flow",
"Voxel Wise Alignment",
"Geometrically Consistent Reconstructions",
"Sobolev Space",
"Topological Changes",
"Interacting Agents",
"Data Association",
"Lowest Frequency Laplacian Eigenfunctions",
"Inherent Deformation Patterns",
"Implicit Associations",
"Voxel Wise Eigenfunction Alignment",
"Voxel Correspondences",
"Geometric Reconstruction Fidelity",
"TSDF Evolution",
"Killing Vector Field Regularizer",
"Nonrigidly Moving Surfaces",
"Truncated Signed Distance Field",
"Accelerated Optimization",
"Signature Matching",
"Lower Dimensional Eigenfunction Embeddings",
"Qualitative Evaluation",
"Quantitative Evaluation",
"Three Dimensional Displays",
"Eigenvalues And Eigenfunctions",
"Level Set",
"Cameras",
"Image Reconstruction",
"Laplace Equations",
"Surface Reconstruction",
"Non Rigid 3 D Reconstruction",
"Signed Distance Field Evolution",
"Laplacian Eigenfunctions"
],
"authors": [
{
"givenName": "Miroslava",
"surname": "Slavcheva",
"fullName": "Miroslava Slavcheva",
"affiliation": "TUM CAMP, Garching, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maximilian",
"surname": "Baust",
"fullName": "Maximilian Baust",
"affiliation": "TUM CAMP, NVIDIA, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Slobodan",
"surname": "Ilic",
"fullName": "Slobodan Ilic",
"affiliation": "TUM CAMP, Siemens CT, Munich, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2021-08-01 00:00:00",
"pubType": "trans",
"pages": "2838-2850",
"year": "2021",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034a833",
"title": "Towards Implicit Correspondence in Signed Distance Field Evolution",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a833/12OmNsd6vmm",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a371",
"title": "Repeatable Local Coordinate Frames for 3D Human Motion Tracking: From Rigid to Non-rigid",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a371/12OmNvoWV1f",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981743",
"title": "Laplace-Beltrami eigenfunction metrics and geodesic shape distance features for shape matching in synthetic aperture sonar",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981743/12OmNwkhTgo",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587538",
"title": "Articulated shape matching using Laplacian eigenfunctions and unsupervised point registration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587538/12OmNyQph7m",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118a446",
"title": "Efficient Computation of Relative Pose for Multi-camera Systems",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a446/12OmNzBwGy2",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a042",
"title": "Patch-Based Non-rigid 3D Reconstruction from a Single Depth Stream",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a042/17D45WGGoME",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c646",
"title": "SobolevFusion: 3D Reconstruction of Scenes Undergoing Free Non-rigid Motion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c646/17D45WKWnIc",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a596",
"title": "TwinFusion: High Framerate Non-rigid Fusion through Fast Correspondence Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a596/17D45WXIkHD",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09770416",
"title": "Learning-based Intrinsic Reflectional Symmetry Detection",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09770416/1D9G4zI0NIQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2016/4847/0/07900223",
"title": "Non-rigid dense bijective maps",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2016/07900223/1fw1GJciVeE",
"parentPublication": {
"id": "proceedings/icpr/2016/4847/0",
"title": "2016 23rd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09020130",
"articleId": "1hS2KQzaeEU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09257100",
"articleId": "1oFCybN9YxG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1uSOCo1muzK",
"name": "ttp202108-09007740s1-supp2-2976065.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttp202108-09007740s1-supp2-2976065.mp4",
"extension": "mp4",
"size": "46.9 MB",
"__typename": "WebExtraType"
},
{
"id": "1uSOCGFd6es",
"name": "ttp202108-09007740s1-supp1-2976065.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttp202108-09007740s1-supp1-2976065.pdf",
"extension": "pdf",
"size": "1.45 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNBBhN8N",
"title": "Dec.",
"year": "2020",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1niUpdweh2g",
"doi": "10.1109/TVCG.2020.3023634",
"abstract": "We present a real-time monocular 3D reconstruction system on a mobile phone, called Mobile3DRecon. Using an embedded monocular camera, our system provides an online mesh generation capability on back end together with real-time 6DoF pose tracking on front end for users to achieve realistic AR effects and interactions on mobile phones. Unlike most existing state-of-the-art systems which produce only point cloud based 3D models online or surface mesh offline, we propose a novel online incremental mesh generation approach to achieve fast online dense surface mesh reconstruction to satisfy the demand of real-time AR applications. For each keyframe of 6DoF tracking, we perform a robust monocular depth estimation, with a multi-view semi-global matching method followed by a depth refinement post-processing. The proposed mesh generation module incrementally fuses each estimated keyframe depth map to an online dense surface mesh, which is useful for achieving realistic AR effects such as occlusions and collisions. We verify our real-time reconstruction results on two mid-range mobile platforms. The experiments with quantitative and qualitative evaluation demonstrate the effectiveness of the proposed monocular 3D reconstruction system, which can handle the occlusions and collisions between virtual objects and real scenes to achieve realistic AR effects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a real-time monocular 3D reconstruction system on a mobile phone, called Mobile3DRecon. Using an embedded monocular camera, our system provides an online mesh generation capability on back end together with real-time 6DoF pose tracking on front end for users to achieve realistic AR effects and interactions on mobile phones. Unlike most existing state-of-the-art systems which produce only point cloud based 3D models online or surface mesh offline, we propose a novel online incremental mesh generation approach to achieve fast online dense surface mesh reconstruction to satisfy the demand of real-time AR applications. For each keyframe of 6DoF tracking, we perform a robust monocular depth estimation, with a multi-view semi-global matching method followed by a depth refinement post-processing. The proposed mesh generation module incrementally fuses each estimated keyframe depth map to an online dense surface mesh, which is useful for achieving realistic AR effects such as occlusions and collisions. We verify our real-time reconstruction results on two mid-range mobile platforms. The experiments with quantitative and qualitative evaluation demonstrate the effectiveness of the proposed monocular 3D reconstruction system, which can handle the occlusions and collisions between virtual objects and real scenes to achieve realistic AR effects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a real-time monocular 3D reconstruction system on a mobile phone, called Mobile3DRecon. Using an embedded monocular camera, our system provides an online mesh generation capability on back end together with real-time 6DoF pose tracking on front end for users to achieve realistic AR effects and interactions on mobile phones. Unlike most existing state-of-the-art systems which produce only point cloud based 3D models online or surface mesh offline, we propose a novel online incremental mesh generation approach to achieve fast online dense surface mesh reconstruction to satisfy the demand of real-time AR applications. For each keyframe of 6DoF tracking, we perform a robust monocular depth estimation, with a multi-view semi-global matching method followed by a depth refinement post-processing. The proposed mesh generation module incrementally fuses each estimated keyframe depth map to an online dense surface mesh, which is useful for achieving realistic AR effects such as occlusions and collisions. We verify our real-time reconstruction results on two mid-range mobile platforms. The experiments with quantitative and qualitative evaluation demonstrate the effectiveness of the proposed monocular 3D reconstruction system, which can handle the occlusions and collisions between virtual objects and real scenes to achieve realistic AR effects.",
"title": "Mobile3DRecon: Real-time Monocular 3D Reconstruction on a Mobile Phone",
"normalizedTitle": "Mobile3DRecon: Real-time Monocular 3D Reconstruction on a Mobile Phone",
"fno": "09201064",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Image Matching",
"Image Reconstruction",
"Mesh Generation",
"Pose Estimation",
"Robot Vision",
"Stereo Image Processing",
"Mid Range Mobile Platforms",
"Real Time Reconstruction Results",
"Online Dense Surface Mesh",
"Estimated Keyframe Depth Map",
"Mesh Generation Module",
"Depth Refinement Post Processing",
"Multiview Semiglobal Matching Method",
"Robust Monocular Depth Estimation",
"6 Do F Tracking",
"Real Time AR Applications",
"Dense Surface Mesh Reconstruction",
"Mesh Generation Approach",
"3 D Models Online",
"Existing State Of The Art Systems",
"Realistic AR Effects",
"Real Time 6 Do F",
"Online Mesh Generation Capability",
"Embedded Monocular Camera",
"Called Mobile 3 D Recon",
"Real Time Monocular 3 D Reconstruction System",
"Mobile Phone",
"Real Time Systems",
"Three Dimensional Displays",
"Mobile Handsets",
"Cameras",
"Surface Reconstruction",
"Estimation",
"Mesh Generation",
"Real Time Reconstruction",
"Monocular Depth Estimation",
"Incremental Mesh Generation"
],
"authors": [
{
"givenName": "Xingbin",
"surname": "Yang",
"fullName": "Xingbin Yang",
"affiliation": "Sensetime Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Liyang",
"surname": "Zhou",
"fullName": "Liyang Zhou",
"affiliation": "Sensetime Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanqing",
"surname": "Jiang",
"fullName": "Hanqing Jiang",
"affiliation": "Sensetime Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhongliang",
"surname": "Tang",
"fullName": "Zhongliang Tang",
"affiliation": "Sensetime Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuanbo",
"surname": "Wang",
"fullName": "Yuanbo Wang",
"affiliation": "Sensetime Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hujun",
"surname": "Bao",
"fullName": "Hujun Bao",
"affiliation": "State Key Lab of CAD&CGZhejiang University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guofeng",
"surname": "Zhang",
"fullName": "Guofeng Zhang",
"affiliation": "State Key Lab of CAD&CGZhejiang University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2020-12-01 00:00:00",
"pubType": "trans",
"pages": "3446-3456",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034a912",
"title": "3D Scene Mesh from CNN Depth Predictions and Sparse Monocular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a912/12OmNvD8RuE",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a037",
"title": "Monocular, Real-Time Surface Reconstruction Using Dynamic Level of Detail",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a037/12OmNxdm4uL",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032e659",
"title": "Monocular Dense 3D Reconstruction of a Complex Dynamic Scene from Two Perspective Frames",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032e659/12OmNzmclLj",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-smartcity-dss/2017/2588/0/08291962",
"title": "Integrated Quality Mesh Generation for Poisson Surface Reconstruction in HPC Applications",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2017/08291962/17D45VsBTYE",
"parentPublication": {
"id": "proceedings/hpcc-smartcity-dss/2017/2588/0",
"title": "2017 IEEE 19th International Conference on High Performance Computing and Communications; IEEE 15th International Conference on Smart City; IEEE 3rd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699273",
"title": "CNN-MonoFusion: Online Monocular Dense Reconstruction Using Learned Depth from Single View",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699273/19F1QKV77QQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f595",
"title": "SelfRecon: Self Reconstruction Your Digital Avatar from Monocular Video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f595/1H0NDIYvq0g",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctt/2020/8575/0/857500a158",
"title": "Monocular Instance Level 3D Object Reconstruction based on Mesh R-CNN",
"doi": null,
"abstractUrl": "/proceedings-article/isctt/2020/857500a158/1rHeLb3yn4I",
"parentPublication": {
"id": "proceedings/isctt/2020/8575/0",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428299",
"title": "Capturing Implicit Spatial Cues for Monocular 3d Hand Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428299/1uimbE1R1Ze",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900b737",
"title": "Learning monocular 3D reconstruction of articulated categories from motion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900b737/1yeHRUzCkF2",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5593",
"title": "NeuralRecon: Real-Time Coherent 3D Reconstruction from Monocular Video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5593/1yeKZKrc88w",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09199573",
"articleId": "1ncgrpZIBi0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09199560",
"articleId": "1ncguu1AZdS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNBhpS2B",
"title": "April",
"year": "2014",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjoNx4",
"doi": "10.1109/TVCG.2014.34",
"abstract": "Redirected walking algorithms imperceptibly rotate a virtual scene about users of immersive virtual environment systems in order to guide them away from tracking area boundaries. Ideally, these distortions permit users to explore large unbounded virtual worlds while walking naturally within a physically limited space. Many potential virtual worlds are composed of corridors, passageways, or aisles. Assuming users are not expected to walk through walls or other objects within the virtual world, these constrained worlds limit the directions of travel and as well as the number of opportunities to change direction. The resulting differences in user movement characteristics within the physical world have an impact on redirected walking algorithm performance. This work presents a comparison of generalized RDW algorithm performance within a constrained virtual world. In contrast to previous studies involving unconstrained virtual worlds, experimental results indicate that the steer-to-orbit keeps users in a smaller area than the steer-to-center algorithm. Moreover, in comparison to steer-to-center, steer-to-orbit is shown to reduce potential wall contacts by over 29%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking algorithms imperceptibly rotate a virtual scene about users of immersive virtual environment systems in order to guide them away from tracking area boundaries. Ideally, these distortions permit users to explore large unbounded virtual worlds while walking naturally within a physically limited space. Many potential virtual worlds are composed of corridors, passageways, or aisles. Assuming users are not expected to walk through walls or other objects within the virtual world, these constrained worlds limit the directions of travel and as well as the number of opportunities to change direction. The resulting differences in user movement characteristics within the physical world have an impact on redirected walking algorithm performance. This work presents a comparison of generalized RDW algorithm performance within a constrained virtual world. In contrast to previous studies involving unconstrained virtual worlds, experimental results indicate that the steer-to-orbit keeps users in a smaller area than the steer-to-center algorithm. Moreover, in comparison to steer-to-center, steer-to-orbit is shown to reduce potential wall contacts by over 29%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking algorithms imperceptibly rotate a virtual scene about users of immersive virtual environment systems in order to guide them away from tracking area boundaries. Ideally, these distortions permit users to explore large unbounded virtual worlds while walking naturally within a physically limited space. Many potential virtual worlds are composed of corridors, passageways, or aisles. Assuming users are not expected to walk through walls or other objects within the virtual world, these constrained worlds limit the directions of travel and as well as the number of opportunities to change direction. The resulting differences in user movement characteristics within the physical world have an impact on redirected walking algorithm performance. This work presents a comparison of generalized RDW algorithm performance within a constrained virtual world. In contrast to previous studies involving unconstrained virtual worlds, experimental results indicate that the steer-to-orbit keeps users in a smaller area than the steer-to-center algorithm. Moreover, in comparison to steer-to-center, steer-to-orbit is shown to reduce potential wall contacts by over 29%.",
"title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"normalizedTitle": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"fno": "ttg201404579",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Navigation",
"Orbits",
"Rendering Computer Graphics",
"Tracking",
"Extraterrestrial Measurements",
"Virtual Environments",
"Virtual Environments Redirected Walking Navigation Locomotion Interface Algorithm Comparison"
],
"authors": [
{
"givenName": "Eric",
"surname": "Hodgson",
"fullName": "Eric Hodgson",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eric",
"surname": "Bachmann",
"fullName": "Eric Bachmann",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tyler",
"surname": "Thrash",
"fullName": "Tyler Thrash",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2014-04-01 00:00:00",
"pubType": "trans",
"pages": "579-587",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549395",
"title": "Flexible and general redirected walking for head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pive/2012/1218/0/06229795",
"title": "Adaptive redirected walking in a virtual world",
"doi": null,
"abstractUrl": "/proceedings-article/pive/2012/06229795/12OmNzUxOk4",
"parentPublication": {
"id": "proceedings/pive/2012/1218/0",
"title": "2012 IEEE VR Workshop on Perceptual Illusions in Virtual Environments",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448288",
"title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111872",
"title": "Optimizing Constrained-Environment Redirected Walking Instructions Using Search Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111872/13rRUIM2VBH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040634",
"title": "Comparing Four Approaches to Generalized Redirected Walking: Simulation and Live User Data",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040634/13rRUx0Pqpx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798121",
"title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523832",
"title": "Redirected Walking in Static and Dynamic Scenes Using Visibility Polygons",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523832/1wpqjiNuSqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201404569",
"articleId": "13rRUxAASTb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201404588",
"articleId": "13rRUyuegh9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0Pqpx",
"doi": "10.1109/TVCG.2013.28",
"abstract": "Redirected walking algorithms imperceptibly rotate a virtual scene and scale movements to guide users of immersive virtual environment systems away from tracking area boundaries. These distortions ideally permit users to explore large and potentially unbounded virtual worlds while walking naturally through a physically limited space. Estimates of the physical space required to perform effective redirected walking have been based largely on the ability of humans to perceive the distortions introduced by redirected walking and have not examined the impact the overall steering strategy used. This work compares four generalized redirected walking algorithms, including Steer-to-Center, Steer-to-Orbit, Steer-to-Multiple-Targets and Steer-to-Multiple+Center. Two experiments are presented based on simulated navigation as well as live-user navigation carried out in a large immersive virtual environment facility. Simulations were conducted with both synthetic paths and previously-logged user data. Primary comparison metrics include mean and maximum distances from the tracking area center for each algorithm, number of wall contacts, and mean rates of redirection. Results indicated that Steer-to-Center out-performed all other algorithms relative to these metrics. Steer-to-Orbit also performed well in some circumstances.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking algorithms imperceptibly rotate a virtual scene and scale movements to guide users of immersive virtual environment systems away from tracking area boundaries. These distortions ideally permit users to explore large and potentially unbounded virtual worlds while walking naturally through a physically limited space. Estimates of the physical space required to perform effective redirected walking have been based largely on the ability of humans to perceive the distortions introduced by redirected walking and have not examined the impact the overall steering strategy used. This work compares four generalized redirected walking algorithms, including Steer-to-Center, Steer-to-Orbit, Steer-to-Multiple-Targets and Steer-to-Multiple+Center. Two experiments are presented based on simulated navigation as well as live-user navigation carried out in a large immersive virtual environment facility. Simulations were conducted with both synthetic paths and previously-logged user data. Primary comparison metrics include mean and maximum distances from the tracking area center for each algorithm, number of wall contacts, and mean rates of redirection. Results indicated that Steer-to-Center out-performed all other algorithms relative to these metrics. Steer-to-Orbit also performed well in some circumstances.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking algorithms imperceptibly rotate a virtual scene and scale movements to guide users of immersive virtual environment systems away from tracking area boundaries. These distortions ideally permit users to explore large and potentially unbounded virtual worlds while walking naturally through a physically limited space. Estimates of the physical space required to perform effective redirected walking have been based largely on the ability of humans to perceive the distortions introduced by redirected walking and have not examined the impact the overall steering strategy used. This work compares four generalized redirected walking algorithms, including Steer-to-Center, Steer-to-Orbit, Steer-to-Multiple-Targets and Steer-to-Multiple+Center. Two experiments are presented based on simulated navigation as well as live-user navigation carried out in a large immersive virtual environment facility. Simulations were conducted with both synthetic paths and previously-logged user data. Primary comparison metrics include mean and maximum distances from the tracking area center for each algorithm, number of wall contacts, and mean rates of redirection. Results indicated that Steer-to-Center out-performed all other algorithms relative to these metrics. Steer-to-Orbit also performed well in some circumstances.",
"title": "Comparing Four Approaches to Generalized Redirected Walking: Simulation and Live User Data",
"normalizedTitle": "Comparing Four Approaches to Generalized Redirected Walking: Simulation and Live User Data",
"fno": "ttg2013040634",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Orbits",
"Navigation",
"Algorithm Design And Analysis",
"Space Vehicles",
"Visualization",
"Tracking",
"Simulation",
"Redirected Walking",
"Virtual Environments",
"Navigation",
"Human Computer Interaction",
"Live Users"
],
"authors": [
{
"givenName": "E.",
"surname": "Hodgson",
"fullName": "E. Hodgson",
"affiliation": "Smale Interactive Visualization Center, Miami Univ., Miami, OH, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "E.",
"surname": "Bachmann",
"fullName": "E. Bachmann",
"affiliation": "Comput. Sci. & Software Eng., Miami Univ., Miami, OH, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "634-643",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802053",
"title": "An enhanced steering algorithm for redirected walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404579",
"title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798121",
"title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798319",
"title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797818",
"title": "Effects of Tracking Area Shape and Size on Artificial Potential Field Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797818/1cJ1htJ7ArK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2016/0840/0/07859537",
"title": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2016/07859537/1h0Jm3Gvypy",
"parentPublication": {
"id": "proceedings/wevr/2016/0840/0",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998570",
"title": "A Steering Algorithm for Redirected Walking Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998570/1hx2DxYanDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523832",
"title": "Redirected Walking in Static and Dynamic Scenes Using Visibility Polygons",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523832/1wpqjiNuSqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040626",
"articleId": "13rRUwd9CG2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040644",
"articleId": "13rRUxC0SEg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNz5apx8",
"title": "April",
"year": "2015",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxcKzVm",
"doi": "10.1109/TVCG.2015.2391864",
"abstract": "Redirected walking allows users to walk through a large-scale immersive virtual environment (IVE) while physically remaining in a reasonably small workspace. Therefore, manipulations are applied to virtual camera motions so that the user's self-motion in the virtual world differs from movements in the real world. Previous work found that the human perceptual system tolerates a certain amount of inconsistency between proprioceptive, vestibular and visual sensation in IVEs, and even compensates for slight discrepancies with recalibrated motor commands. Experiments showed that users are not able to detect an inconsistency if their physical path is bent with a radius of at least 22 meters during virtual straightforward movements. If redirected walking is applied in a smaller workspace, manipulations become noticeable, but users are still able to move through a potentially infinitely large virtual world by walking. For this semi-natural form of locomotion, the question arises if such manipulations impose cognitive demands on the user, which may compete with other tasks in IVEs for finite cognitive resources. In this article we present an experiment in which we analyze the mutual influence between redirected walking and verbal as well as spatial working memory tasks using a dual-tasking method. The results show an influence of redirected walking on verbal as well as spatial working memory tasks, and we also found an effect of cognitive tasks on walking behavior. We discuss the implications and provide guidelines for using redirected walking in virtual reality laboratories.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking allows users to walk through a large-scale immersive virtual environment (IVE) while physically remaining in a reasonably small workspace. Therefore, manipulations are applied to virtual camera motions so that the user's self-motion in the virtual world differs from movements in the real world. Previous work found that the human perceptual system tolerates a certain amount of inconsistency between proprioceptive, vestibular and visual sensation in IVEs, and even compensates for slight discrepancies with recalibrated motor commands. Experiments showed that users are not able to detect an inconsistency if their physical path is bent with a radius of at least 22 meters during virtual straightforward movements. If redirected walking is applied in a smaller workspace, manipulations become noticeable, but users are still able to move through a potentially infinitely large virtual world by walking. For this semi-natural form of locomotion, the question arises if such manipulations impose cognitive demands on the user, which may compete with other tasks in IVEs for finite cognitive resources. In this article we present an experiment in which we analyze the mutual influence between redirected walking and verbal as well as spatial working memory tasks using a dual-tasking method. The results show an influence of redirected walking on verbal as well as spatial working memory tasks, and we also found an effect of cognitive tasks on walking behavior. We discuss the implications and provide guidelines for using redirected walking in virtual reality laboratories.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking allows users to walk through a large-scale immersive virtual environment (IVE) while physically remaining in a reasonably small workspace. Therefore, manipulations are applied to virtual camera motions so that the user's self-motion in the virtual world differs from movements in the real world. Previous work found that the human perceptual system tolerates a certain amount of inconsistency between proprioceptive, vestibular and visual sensation in IVEs, and even compensates for slight discrepancies with recalibrated motor commands. Experiments showed that users are not able to detect an inconsistency if their physical path is bent with a radius of at least 22 meters during virtual straightforward movements. If redirected walking is applied in a smaller workspace, manipulations become noticeable, but users are still able to move through a potentially infinitely large virtual world by walking. For this semi-natural form of locomotion, the question arises if such manipulations impose cognitive demands on the user, which may compete with other tasks in IVEs for finite cognitive resources. In this article we present an experiment in which we analyze the mutual influence between redirected walking and verbal as well as spatial working memory tasks using a dual-tasking method. The results show an influence of redirected walking on verbal as well as spatial working memory tasks, and we also found an effect of cognitive tasks on walking behavior. We discuss the implications and provide guidelines for using redirected walking in virtual reality laboratories.",
"title": "Cognitive Resource Demands of Redirected Walking",
"normalizedTitle": "Cognitive Resource Demands of Redirected Walking",
"fno": "07036075",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cognitive Systems",
"Virtual Reality",
"Cognitive Resource Demands",
"Redirected Walking",
"Large Scale Immersive Virtual Environment",
"Large Scale IVE",
"Virtual Camera Motions",
"Human Perceptual System",
"Proprioceptive Sensation",
"Vestibular Sensation",
"Visual Sensation",
"Virtual Straightforward Movements",
"Finite Cognitive Resources",
"Verbal Working Memory Tasks",
"Spatial Working Memory Tasks",
"Dual Tasking Method",
"Virtual Reality Laboratories",
"Legged Locomotion",
"Laboratories",
"Standards",
"Visualization",
"Virtual Environments",
"Cameras",
"Wireless Sensor Networks",
"Redirected Walking",
"Cognitive Demands",
"Locomotion",
"Virtual Environments",
"Redirected Walking",
"Cognitive Demands",
"Locomotion",
"Virtual Environments"
],
"authors": [
{
"givenName": "Gerd",
"surname": "Bruder",
"fullName": "Gerd Bruder",
"affiliation": "Department of Computer ScienceHuman-Computer Interaction Group, University of Hamburg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Paul",
"surname": "Lubas",
"fullName": "Paul Lubas",
"affiliation": "Department of Computer ScienceHuman-Computer Interaction Group, University of Hamburg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frank",
"surname": "Steinicke",
"fullName": "Frank Steinicke",
"affiliation": "Department of Computer ScienceHuman-Computer Interaction Group, University of Hamburg, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2015-04-01 00:00:00",
"pubType": "trans",
"pages": "539-544",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549395",
"title": "Flexible and general redirected walking for head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446579",
"title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049730",
"title": "Monte-Carlo Redirected Walking: Gain Selection Through Simulated Walks",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049730/1KYowitu5OM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798319",
"title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2016/0840/0/07859537",
"title": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2016/07859537/1h0Jm3Gvypy",
"parentPublication": {
"id": "proceedings/wevr/2016/0840/0",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/02/09364750",
"title": "Multi-Technique Redirected Walking Method",
"doi": null,
"abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07014255",
"articleId": "13rRUwdIOUP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07010955",
"articleId": "13rRUNvyatk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BENJyPkx5S",
"doi": "10.1109/TVCG.2022.3158609",
"abstract": "In this work, we proposed a new out-of-place resetting strategy that guides users to optimal physical locations with the most potential for free movement and a smaller amount of resetting required for their further movements. For this purpose, we calculate a heat map of the walking area according to the average walking distance using a simulation of the used RDW algorithm. Based on this heat map we identify the most suitable position for a one-step reset within a predefined searching range and use the one as the reset point. The results show that our method increases the average moving distance within one cycle of resetting. Furthermore, our resetting method can be applied to any physical area with obstacles. That means that RDW methods that were not suitable for such environments (e.g. Steer to Center) combined with our resetting can also be extended to such complex walking areas. In addition, we also present a resetting user interface to instruct users to move the nearby point, by using light spots to bring user a sense of relative displacement while the virtual scenario is still.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we proposed a new out-of-place resetting strategy that guides users to optimal physical locations with the most potential for free movement and a smaller amount of resetting required for their further movements. For this purpose, we calculate a heat map of the walking area according to the average walking distance using a simulation of the used RDW algorithm. Based on this heat map we identify the most suitable position for a one-step reset within a predefined searching range and use the one as the reset point. The results show that our method increases the average moving distance within one cycle of resetting. Furthermore, our resetting method can be applied to any physical area with obstacles. That means that RDW methods that were not suitable for such environments (e.g. Steer to Center) combined with our resetting can also be extended to such complex walking areas. In addition, we also present a resetting user interface to instruct users to move the nearby point, by using light spots to bring user a sense of relative displacement while the virtual scenario is still.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we proposed a new out-of-place resetting strategy that guides users to optimal physical locations with the most potential for free movement and a smaller amount of resetting required for their further movements. For this purpose, we calculate a heat map of the walking area according to the average walking distance using a simulation of the used RDW algorithm. Based on this heat map we identify the most suitable position for a one-step reset within a predefined searching range and use the one as the reset point. The results show that our method increases the average moving distance within one cycle of resetting. Furthermore, our resetting method can be applied to any physical area with obstacles. That means that RDW methods that were not suitable for such environments (e.g. Steer to Center) combined with our resetting can also be extended to such complex walking areas. In addition, we also present a resetting user interface to instruct users to move the nearby point, by using light spots to bring user a sense of relative displacement while the virtual scenario is still.",
"title": "One-step out-of-place resetting for redirected walking in VR",
"normalizedTitle": "One-step out-of-place resetting for redirected walking in VR",
"fno": "09733261",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Virtual Environments",
"Aerospace Electronics",
"User Interfaces",
"Tracking",
"Teleportation",
"Reinforcement Learning",
"Redirected Walking",
"Out Of Place Resetting And Two Arrows Indicator"
],
"authors": [
{
"givenName": "Song-Hai",
"surname": "Zhang",
"fullName": "Song-Hai Zhang",
"affiliation": "Computer Science and Technology, Tsinghua University, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chia-Hao",
"surname": "Chen",
"fullName": "Chia-Hao Chen",
"affiliation": "Computer Science and Technology, Tsinghua University, 12442 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefanie",
"surname": "Zollmann",
"fullName": "Stefanie Zollmann",
"affiliation": "Computer Science, University of Otago, 2495 Dunedin, Otago, New Zealand, 9054",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a655",
"title": "Optimal Pose Guided Redirected Walking with Pose Score Precomputation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a655/1CJbHdnVzd6",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10058042",
"title": "Multi-User Redirected Walking in Separate Physical Spaces for Online VR Scenarios",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10058042/1LbFn8YmYjC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a201",
"title": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a201/1pBMkbxS3F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09669131",
"title": "Adaptive Optimization Algorithm for Resetting Techniques in Obstacle-Ridden Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09669131/1zTg06F4VTq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09732236",
"articleId": "1BBtNDKgNFe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09733942",
"articleId": "1BJIbG1OGqc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNzA6GUv",
"title": "May",
"year": "2019",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "17PYElBjW00",
"doi": "10.1109/TVCG.2019.2899228",
"abstract": "Real walking is the most natural way to locomote in virtual reality (VR), but a confined physical walking space limits its applicability. Redirected walking (RDW) is a collection of techniques to solve this problem. One of these techniques aims to imperceptibly rotate the user's view of the virtual scene in order to steer her along a confined path whilst giving the impression of walking in a straight line in a large virtual space. Measurement of perceptual thresholds for the detection of such a modified curvature gain have indicated a radius that is still larger than most room sizes. Since the brain is an adaptive system and thresholds usually depend on previous stimulations, we tested if prolonged exposure to an immersive virtual environment (IVE) with increased curvature gain produces adaptation to that gain and modifies thresholds such that, over time, larger curvature gains can be applied for RDW. Therefore, participants first completed a measurement of their perceptual threshold for curvature gain. In a second session, the same participants were exposed to an IVE with a constant curvature gain in which they walked between two targets for about 20 minutes. Afterwards, their perceptual thresholds were measured again. The results show that the psychometric curves shifted after the exposure session and perceptual thresholds for increased curvature gain further increased. The increase of the detection threshold suggests that participants adapt to the manipulation and stronger curvature gains can be applied in RDW, and therefore improves its applicability in such situations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Real walking is the most natural way to locomote in virtual reality (VR), but a confined physical walking space limits its applicability. Redirected walking (RDW) is a collection of techniques to solve this problem. One of these techniques aims to imperceptibly rotate the user's view of the virtual scene in order to steer her along a confined path whilst giving the impression of walking in a straight line in a large virtual space. Measurement of perceptual thresholds for the detection of such a modified curvature gain have indicated a radius that is still larger than most room sizes. Since the brain is an adaptive system and thresholds usually depend on previous stimulations, we tested if prolonged exposure to an immersive virtual environment (IVE) with increased curvature gain produces adaptation to that gain and modifies thresholds such that, over time, larger curvature gains can be applied for RDW. Therefore, participants first completed a measurement of their perceptual threshold for curvature gain. In a second session, the same participants were exposed to an IVE with a constant curvature gain in which they walked between two targets for about 20 minutes. Afterwards, their perceptual thresholds were measured again. The results show that the psychometric curves shifted after the exposure session and perceptual thresholds for increased curvature gain further increased. The increase of the detection threshold suggests that participants adapt to the manipulation and stronger curvature gains can be applied in RDW, and therefore improves its applicability in such situations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Real walking is the most natural way to locomote in virtual reality (VR), but a confined physical walking space limits its applicability. Redirected walking (RDW) is a collection of techniques to solve this problem. One of these techniques aims to imperceptibly rotate the user's view of the virtual scene in order to steer her along a confined path whilst giving the impression of walking in a straight line in a large virtual space. Measurement of perceptual thresholds for the detection of such a modified curvature gain have indicated a radius that is still larger than most room sizes. Since the brain is an adaptive system and thresholds usually depend on previous stimulations, we tested if prolonged exposure to an immersive virtual environment (IVE) with increased curvature gain produces adaptation to that gain and modifies thresholds such that, over time, larger curvature gains can be applied for RDW. Therefore, participants first completed a measurement of their perceptual threshold for curvature gain. In a second session, the same participants were exposed to an IVE with a constant curvature gain in which they walked between two targets for about 20 minutes. Afterwards, their perceptual thresholds were measured again. The results show that the psychometric curves shifted after the exposure session and perceptual thresholds for increased curvature gain further increased. The increase of the detection threshold suggests that participants adapt to the manipulation and stronger curvature gains can be applied in RDW, and therefore improves its applicability in such situations.",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"normalizedTitle": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"fno": "08645699",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Gait Analysis",
"Psychometric Testing",
"Virtual Reality",
"Redirected Walking",
"Virtual Reality",
"RDW",
"Virtual Scene",
"Virtual Space",
"Immersive Virtual Environment",
"Curvature Gains",
"Shrinking Circles",
"Physical Walking Space",
"IVE",
"Psychometric Curves",
"Legged Locomotion",
"Resists",
"Tracking",
"Gain Measurement",
"Visualization",
"Glass",
"Atmospheric Measurements",
"Virtual Reality",
"Locomotion",
"Redirected Walking",
"Psychophysical Experiments"
],
"authors": [
{
"givenName": "Luke",
"surname": "Bölling",
"fullName": "Luke Bölling",
"affiliation": "University of Muenster",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Stein",
"fullName": "Niklas Stein",
"affiliation": "University of Muenster",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frank",
"surname": "Steinicke",
"fullName": "Frank Steinicke",
"affiliation": "University of Muenster",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Markus",
"surname": "Lappe",
"fullName": "Markus Lappe",
"affiliation": "University of Muenster",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2019-05-01 00:00:00",
"pubType": "trans",
"pages": "2032-2039",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892279",
"title": "Curvature gains in redirected walking: A closer look",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759454",
"title": "Velocity-dependent dynamic curvature gain for redirected walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759454/12OmNC8MsBR",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446479",
"title": "Adopting the Roll Manipulation for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446225",
"title": "Effect of Environment Size on Curvature Redirected Walking Thresholds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049692",
"title": "FREE-RDW: A Multi-user Redirected Walking Method for Supporting Non-forward Steps",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049692/1KYopXwY5Vu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798231",
"title": "The Effect of Hanger Reflex on Virtual Reality Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798231/1cJ0KBrAUYE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090671",
"title": "The Influence of Full-Body Representation on Translation and Curvature Gain",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090671/1jIxqcIwi64",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523890",
"title": "Redirected Walking using Continuous Curvature Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523890/1wpqBpgOKUE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08645818",
"articleId": "17PYEiVyc2v",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08642347",
"articleId": "17PYEjbrJk7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1KYoAYFd0m4",
"doi": "10.1109/TVCG.2023.3244359",
"abstract": "Redirected walking (RDW) and omnidirectional treadmill (ODT) are two effective solutions to the natural locomotion interface in virtual reality. ODT fully compresses the physical space and can be used as the integration carrier of all kinds of devices. However, the user experience varies in different directions of ODT, and the premise of interaction between users and integrated devices is a good match between virtual and real objects. RDW technology uses visual cues to guide the user's location in physical space. Based on this principle, combining RDW technology with ODT to guide the user's walking direction through visual cues can effectively improve user experience on ODT and make full use of various devices integrated on ODT. This paper explores the novel prospects of combining RDW technology with ODT and formally puts forward the concept of O-RDW (ODT-based RDW). Two baseline algorithms, i.e., OS2MD (ODT-based steer to multi-direction), and OS2MT (ODT-based steer to multi-target), are proposed to combine the merits of both RDW and ODT. With the help of the simulation environment, this paper quantitatively analyzes the applicable scenarios of the two algorithms and the influence of several main factors on the performance. Based on the conclusions of the simulation experiments, the two O-RDW algorithms are successfully applied in the practical application case of multi-target haptic feedback. Combined with the user study, the practicability and effectiveness of O-RDW technology in practical use are further verified.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking (RDW) and omnidirectional treadmill (ODT) are two effective solutions to the natural locomotion interface in virtual reality. ODT fully compresses the physical space and can be used as the integration carrier of all kinds of devices. However, the user experience varies in different directions of ODT, and the premise of interaction between users and integrated devices is a good match between virtual and real objects. RDW technology uses visual cues to guide the user's location in physical space. Based on this principle, combining RDW technology with ODT to guide the user's walking direction through visual cues can effectively improve user experience on ODT and make full use of various devices integrated on ODT. This paper explores the novel prospects of combining RDW technology with ODT and formally puts forward the concept of O-RDW (ODT-based RDW). Two baseline algorithms, i.e., OS2MD (ODT-based steer to multi-direction), and OS2MT (ODT-based steer to multi-target), are proposed to combine the merits of both RDW and ODT. With the help of the simulation environment, this paper quantitatively analyzes the applicable scenarios of the two algorithms and the influence of several main factors on the performance. Based on the conclusions of the simulation experiments, the two O-RDW algorithms are successfully applied in the practical application case of multi-target haptic feedback. Combined with the user study, the practicability and effectiveness of O-RDW technology in practical use are further verified.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking (RDW) and omnidirectional treadmill (ODT) are two effective solutions to the natural locomotion interface in virtual reality. ODT fully compresses the physical space and can be used as the integration carrier of all kinds of devices. However, the user experience varies in different directions of ODT, and the premise of interaction between users and integrated devices is a good match between virtual and real objects. RDW technology uses visual cues to guide the user's location in physical space. Based on this principle, combining RDW technology with ODT to guide the user's walking direction through visual cues can effectively improve user experience on ODT and make full use of various devices integrated on ODT. This paper explores the novel prospects of combining RDW technology with ODT and formally puts forward the concept of O-RDW (ODT-based RDW). Two baseline algorithms, i.e., OS2MD (ODT-based steer to multi-direction), and OS2MT (ODT-based steer to multi-target), are proposed to combine the merits of both RDW and ODT. With the help of the simulation environment, this paper quantitatively analyzes the applicable scenarios of the two algorithms and the influence of several main factors on the performance. Based on the conclusions of the simulation experiments, the two O-RDW algorithms are successfully applied in the practical application case of multi-target haptic feedback. Combined with the user study, the practicability and effectiveness of O-RDW technology in practical use are further verified.",
"title": "Redirected Walking On Omnidirectional Treadmill",
"normalizedTitle": "Redirected Walking On Omnidirectional Treadmill",
"fno": "10049511",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Haptic Interfaces",
"Visualization",
"User Experience",
"Resists",
"Thermal Stability",
"Stability Analysis",
"Redirected Walking",
"Omnidirectional Treadmill",
"Haptic Feedback",
"Device Integration",
"Locomotion Interfaces"
],
"authors": [
{
"givenName": "Ziyao",
"surname": "Wang",
"fullName": "Ziyao Wang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yiye",
"surname": "Wang",
"fullName": "Yiye Wang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shiqi",
"surname": "Yan",
"fullName": "Shiqi Yan",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhongzheng",
"surname": "Zhu",
"fullName": "Zhongzheng Zhu",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "KanJian",
"surname": "Zhang",
"fullName": "KanJian Zhang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haikun",
"surname": "Wei",
"fullName": "Haikun Wei",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-02-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg201404579",
"title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798231",
"title": "The Effect of Hanger Reflex on Virtual Reality Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798231/1cJ0KBrAUYE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797818",
"title": "Effects of Tracking Area Shape and Size on Artificial Potential Field Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797818/1cJ1htJ7ArK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998570",
"title": "A Steering Algorithm for Redirected Walking Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998570/1hx2DxYanDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089532",
"title": "Optimal Planning for Redirected Walking Based on Reinforcement Learning in Multi-user Environment with Irregularly Shaped Physical Space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089532/1jIx7m6wYKc",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a201",
"title": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a201/1pBMkbxS3F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a184",
"title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10049688",
"articleId": "1KYoraK6mLm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10052758",
"articleId": "1L1HY1xpNvi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1L03bKknib6",
"name": "ttg555501-010049511s1-supp4-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp4-3244359.mp4",
"extension": "mp4",
"size": "71.2 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03cJBKccE",
"name": "ttg555501-010049511s1-supp5-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp5-3244359.mp4",
"extension": "mp4",
"size": "67.3 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03akJ8tmE",
"name": "ttg555501-010049511s1-supp1-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp1-3244359.mp4",
"extension": "mp4",
"size": "8.45 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03fapzrFK",
"name": "ttg555501-010049511s1-supp2-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp2-3244359.mp4",
"extension": "mp4",
"size": "117 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03aVPc8lq",
"name": "ttg555501-010049511s1-supp3-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp3-3244359.mp4",
"extension": "mp4",
"size": "69.4 MB",
"__typename": "WebExtraType"
},
{
"id": "1L03e7qhshW",
"name": "ttg555501-010049511s1-supp6-3244359.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010049511s1-supp6-3244359.mp4",
"extension": "mp4",
"size": "73.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNxvwoNX",
"title": "July/August",
"year": "2010",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "July/August",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIM2VGZ",
"doi": "10.1109/TVCG.2009.93",
"abstract": "We report a series of experiments conducted to investigate the effects of travel technique on information gathering and cognition in complex virtual environments. In the first experiment, participants completed a non-branching multilevel 3D maze at their own pace using either real walking or one of two virtual travel techniques. In the second experiment, we constructed a real-world maze with branching pathways and modeled an identical virtual environment. Participants explored either the real or virtual maze for a predetermined amount of time using real walking or a virtual travel technique. Our results across experiments suggest that for complex environments requiring a large number of turns, virtual travel is an acceptable substitute for real walking if the goal of the application involves learning or reasoning based on information presented in the virtual world. However, for applications that require fast, efficient navigation or travel that closely resembles real-world behavior, real walking has advantages over common joystick-based virtual travel techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We report a series of experiments conducted to investigate the effects of travel technique on information gathering and cognition in complex virtual environments. In the first experiment, participants completed a non-branching multilevel 3D maze at their own pace using either real walking or one of two virtual travel techniques. In the second experiment, we constructed a real-world maze with branching pathways and modeled an identical virtual environment. Participants explored either the real or virtual maze for a predetermined amount of time using real walking or a virtual travel technique. Our results across experiments suggest that for complex environments requiring a large number of turns, virtual travel is an acceptable substitute for real walking if the goal of the application involves learning or reasoning based on information presented in the virtual world. However, for applications that require fast, efficient navigation or travel that closely resembles real-world behavior, real walking has advantages over common joystick-based virtual travel techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We report a series of experiments conducted to investigate the effects of travel technique on information gathering and cognition in complex virtual environments. In the first experiment, participants completed a non-branching multilevel 3D maze at their own pace using either real walking or one of two virtual travel techniques. In the second experiment, we constructed a real-world maze with branching pathways and modeled an identical virtual environment. Participants explored either the real or virtual maze for a predetermined amount of time using real walking or a virtual travel technique. Our results across experiments suggest that for complex environments requiring a large number of turns, virtual travel is an acceptable substitute for real walking if the goal of the application involves learning or reasoning based on information presented in the virtual world. However, for applications that require fast, efficient navigation or travel that closely resembles real-world behavior, real walking has advantages over common joystick-based virtual travel techniques.",
"title": "Evaluation of the Cognitive Effects of Travel Technique in Complex Real and Virtual Environments",
"normalizedTitle": "Evaluation of the Cognitive Effects of Travel Technique in Complex Real and Virtual Environments",
"fno": "ttg2010040690",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Learning Artificial Intelligence",
"Virtual Reality",
"Travel Technique",
"Information Gathering",
"Learning",
"Cognitive Effects",
"Multilevel 3 D Maze",
"Virtual Travel Techniques",
"Virtual Environment",
"Legged Locomotion",
"Navigation",
"Space Technology",
"Space Exploration",
"Cognition",
"User Interfaces",
"Tracking",
"Electromagnetic Devices",
"Application Software",
"Virtual Reality",
"Travel Techniques",
"Navigation",
"Real Walking",
"User Study"
],
"authors": [
{
"givenName": "Evan",
"surname": "Suma",
"fullName": "Evan Suma",
"affiliation": "University of North Carolina at Charlotte, Charlotte",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Samantha",
"surname": "Finkelstein",
"fullName": "Samantha Finkelstein",
"affiliation": "University of North Carolina at Charlotte, Charlotte",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Myra",
"surname": "Reid",
"fullName": "Myra Reid",
"affiliation": "University of North Carolina at Charlotte, Charlotte",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sabarish",
"surname": "Babu",
"fullName": "Sabarish Babu",
"affiliation": "Clemson University, Clemson",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Amy",
"surname": "Ulinski",
"fullName": "Amy Ulinski",
"affiliation": "Clemson University, Clemson",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Larry F.",
"surname": "Hodges",
"fullName": "Larry F. Hodges",
"affiliation": "Clemson University, Clemson",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2010-07-01 00:00:00",
"pubType": "trans",
"pages": "690-702",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2009/3943/0/04811037",
"title": "Real Walking Increases Simulator Sickness in Navigationally Complex Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811037/12OmNAoDilQ",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2007/0907/0/04142859",
"title": "Comparison of Travel Techniques in a Complex, Multi-Level 3D Environment",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142859/12OmNqGA55e",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2012/1204/0/06184180",
"title": "From virtual to actual mobility: Assessing the benefits of active locomotion through an immersive virtual environment using a motorized wheelchair",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2012/06184180/12OmNxdDFLw",
"parentPublication": {
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2004/8415/0/84150149",
"title": "Effects of travel technique on cognition in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2004/84150149/12OmNzVXNOx",
"parentPublication": {
"id": "proceedings/vr/2004/8415/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131766",
"title": "A multi-touch finger gesture based low-fatigue VR travel framework",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131766/12OmNzayNeN",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/06/v0694",
"title": "Comparison of path visualizations and cognitive measures relative to travel technique in a virtual environment",
"doi": null,
"abstractUrl": "/journal/tg/2005/06/v0694/13rRUxYrbUt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258065",
"title": "Personalized travel mode detection with smartphone sensors",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258065/17D45Vw15uz",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09785918",
"title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09785918/1DPaEdHg6KQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09893374",
"title": "A Segmented Redirection Mapping Method for Roadmaps of Large Constrained Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09893374/1GGLIh8KmSA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a627",
"title": "The Cognitive Load and Usability of Three Walking Metaphors for Consumer Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a627/1pysyecdlzq",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2010040676",
"articleId": "13rRUwcS1CR",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwI5TQZ",
"doi": "10.1109/TVCG.2013.205",
"abstract": "Spatial organization has been proposed as a compelling approach to externalizing the sensemaking process. However, there are two ways in which space can be provided to the user: by creating a physical workspace that the user can interact with directly, such as can be provided by a large, high-resolution display, or through the use of a virtual workspace that the user navigates using virtual navigation techniques such as zoom and pan. In this study we explicitly examined the use of spatial sensemaking techniques within these two environments. The results demonstrate that these two approaches to providing sensemaking space are not equivalent, and that the greater embodiment afforded by the physical workspace changes how the space is perceived and used, leading to increased externalization of the sensemaking process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Spatial organization has been proposed as a compelling approach to externalizing the sensemaking process. However, there are two ways in which space can be provided to the user: by creating a physical workspace that the user can interact with directly, such as can be provided by a large, high-resolution display, or through the use of a virtual workspace that the user navigates using virtual navigation techniques such as zoom and pan. In this study we explicitly examined the use of spatial sensemaking techniques within these two environments. The results demonstrate that these two approaches to providing sensemaking space are not equivalent, and that the greater embodiment afforded by the physical workspace changes how the space is perceived and used, leading to increased externalization of the sensemaking process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Spatial organization has been proposed as a compelling approach to externalizing the sensemaking process. However, there are two ways in which space can be provided to the user: by creating a physical workspace that the user can interact with directly, such as can be provided by a large, high-resolution display, or through the use of a virtual workspace that the user navigates using virtual navigation techniques such as zoom and pan. In this study we explicitly examined the use of spatial sensemaking techniques within these two environments. The results demonstrate that these two approaches to providing sensemaking space are not equivalent, and that the greater embodiment afforded by the physical workspace changes how the space is perceived and used, leading to increased externalization of the sensemaking process.",
"title": "The Impact of Physical Navigation on Spatial Organization for Sensemaking",
"normalizedTitle": "The Impact of Physical Navigation on Spatial Organization for Sensemaking",
"fno": "ttg2013122207",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Navigation",
"Visual Analytics",
"Browsers",
"Image Color Analysis",
"Embodiment Large",
"Navigation",
"Visual Analytics",
"Browsers",
"Image Color Analysis",
"High Resolution Displays",
"Sensemaking",
"Visual Analytics",
"Physical Navigation"
],
"authors": [
{
"givenName": "Christopher",
"surname": "Andrews",
"fullName": "Christopher Andrews",
"affiliation": "Middlebury Coll., USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "North",
"fullName": "Chris North",
"affiliation": "Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2207-2216",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400559",
"title": "Analyst's Workspace: An embodied sensemaking environment for large, high-resolution displays",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400559/12OmNwF0BJt",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400558",
"title": "SocialNetSense: Supporting sensemaking of social and structural features in networks with interactive visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400558/12OmNxdm4ya",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2015/8657/0/8657a177",
"title": "Guidelines for Sensemaking in Intelligence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2015/8657a177/12OmNzTYBR1",
"parentPublication": {
"id": "proceedings/eisic/2015/8657/0",
"title": "2015 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07194834",
"title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122879",
"title": "Semantic Interaction for Sensemaking: Inferring Analytical Reasoning for Model Steering",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122879/13rRUwdIOUL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585484",
"title": "CRICTO: Supporting Sensemaking through Crowdsourced Information Schematization",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585484/17D45Wc1ILV",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09894094",
"title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09894094/1GIqpC6j7na",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2018/6861/0/08802424",
"title": "The Effect of Semantic Interaction on Foraging in Text Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2018/08802424/1cJ6XAJz7gc",
"parentPublication": {
"id": "proceedings/vast/2018/6861/0",
"title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08889811",
"title": "Provenance Analysis for Sensemaking",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08889811/1eBul1FAEIE",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a181",
"title": "Narrative Sensemaking: Strategies for Narrative Maps Construction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122198",
"articleId": "13rRUwInvsQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122217",
"articleId": "13rRUxlgy3I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgGi",
"name": "ttg2013122207s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122207s.zip",
"extension": "zip",
"size": "416 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "1LUpyYLBfeo",
"title": "May",
"year": "2023",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1L1HXLrXmqA",
"doi": "10.1109/TVCG.2023.3247462",
"abstract": "360° videos provide an immersive experience, especially when watched in virtual reality (VR). Yet, even though the video data is inherently three-dimensional, interfaces to access datasets of such videos in VR almost always use two-dimensional thumbnails shown in a grid on a flat or curved plane. We claim that using spherical and cube-shaped 3D thumbnails may provide a better user experience and be more effective at conveying the high-level subject matter of a video or when searching for a specific item in it. A comparative study against the most used existing representation, that is, 2D equirectangular projections, showed that the spherical 3D thumbnails did indeed provide the best user experience, whereas traditional 2D equirectangular projections still performed better for high-level classification tasks. Yet, they were outperformed by spherical thumbnails when participants had to search for details within the videos. Our results thus confirm a potential benefit of 3D thumbnail representations for 360-degree videos in VR, especially with respect to user experience and detailed content search and suggest a mixed interface design providing both options to the users. Supplemental materials about the user study and used data are available at https://osf.io/5vk49/.",
"abstracts": [
{
"abstractType": "Regular",
"content": "360° videos provide an immersive experience, especially when watched in virtual reality (VR). Yet, even though the video data is inherently three-dimensional, interfaces to access datasets of such videos in VR almost always use two-dimensional thumbnails shown in a grid on a flat or curved plane. We claim that using spherical and cube-shaped 3D thumbnails may provide a better user experience and be more effective at conveying the high-level subject matter of a video or when searching for a specific item in it. A comparative study against the most used existing representation, that is, 2D equirectangular projections, showed that the spherical 3D thumbnails did indeed provide the best user experience, whereas traditional 2D equirectangular projections still performed better for high-level classification tasks. Yet, they were outperformed by spherical thumbnails when participants had to search for details within the videos. Our results thus confirm a potential benefit of 3D thumbnail representations for 360-degree videos in VR, especially with respect to user experience and detailed content search and suggest a mixed interface design providing both options to the users. Supplemental materials about the user study and used data are available at https://osf.io/5vk49/.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "360° videos provide an immersive experience, especially when watched in virtual reality (VR). Yet, even though the video data is inherently three-dimensional, interfaces to access datasets of such videos in VR almost always use two-dimensional thumbnails shown in a grid on a flat or curved plane. We claim that using spherical and cube-shaped 3D thumbnails may provide a better user experience and be more effective at conveying the high-level subject matter of a video or when searching for a specific item in it. A comparative study against the most used existing representation, that is, 2D equirectangular projections, showed that the spherical 3D thumbnails did indeed provide the best user experience, whereas traditional 2D equirectangular projections still performed better for high-level classification tasks. Yet, they were outperformed by spherical thumbnails when participants had to search for details within the videos. Our results thus confirm a potential benefit of 3D thumbnail representations for 360-degree videos in VR, especially with respect to user experience and detailed content search and suggest a mixed interface design providing both options to the users. Supplemental materials about the user study and used data are available at https://osf.io/5vk49/.",
"title": "Introducing 3D Thumbnails to Access 360-Degree Videos in Virtual Reality",
"normalizedTitle": "Introducing 3D Thumbnails to Access 360-Degree Videos in Virtual Reality",
"fno": "10053631",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Classification",
"Image Representation",
"User Experience",
"Video Signal Processing",
"Virtual Reality",
"2 D Equirectangular Projections",
"360 Degree Videos",
"3 D Thumbnail Representations",
"Curved Plane",
"Flat Plane",
"High Level Classification Tasks",
"Immersive Experience",
"Mixed Interface Design",
"Spherical 3 D Thumbnails",
"Two Dimensional Thumbnails",
"Used Data",
"User Experience",
"Video Data",
"Virtual Reality",
"VR",
"Videos",
"Three Dimensional Displays",
"Navigation",
"Distortion",
"Visualization",
"User Experience",
"Task Analysis",
"360 Degree Video",
"Video Search",
"360 Degree Video Interaction",
"Interfaces For Video Collections"
],
"authors": [
{
"givenName": "Alissa",
"surname": "Vermast",
"fullName": "Alissa Vermast",
"affiliation": "Utrecht University, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Hürst",
"fullName": "Wolfgang Hürst",
"affiliation": "Utrecht University, Netherlands",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2023-05-01 00:00:00",
"pubType": "trans",
"pages": "2547-2556",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2018/4886/0/488601b405",
"title": "Stabilizing First Person 360 Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b405/12OmNAWpyow",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2018/6871/0/687101b557",
"title": "Low Latency Edge Rendering Scheme for Interactive 360 Degree Virtual Reality Gaming",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2018/687101b557/12OmNBTJIKk",
"parentPublication": {
"id": "proceedings/icdcs/2018/6871/0",
"title": "2018 IEEE 38th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019492",
"title": "Spherical domain rate-distortion optimization for 360-degree video coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019492/12OmNzEmFHn",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446523",
"title": "COP: A New Continuous Packing Layout for 360 VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446523/13bd1fKQxs3",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486537",
"title": "A Subjective Study of Viewer Navigation Behaviors When Watching 360-Degree Videos on Computers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486537/14jQfTvagGm",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcn/2018/4413/0/08638092",
"title": "Plato: Learning-based Adaptive Streaming of 360-Degree Videos",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2018/08638092/18rqIpj1b3i",
"parentPublication": {
"id": "proceedings/lcn/2018/4413/0",
"title": "2018 IEEE 43rd Conference on Local Computer Networks (LCN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/5555/01/09920013",
"title": "Could Head Motions Affect Quality When Viewing 360-Degree Videos?",
"doi": null,
"abstractUrl": "/magazine/mu/5555/01/09920013/1HxSle7FJHW",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798261",
"title": "Hybrid Projection For Encoding 360 VR Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798261/1cJ0Wb1xK4E",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a081",
"title": "User Experience Study of 360° Music Videos on Computer Monitor and Virtual Reality Goggles",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a081/1cMFaY4kg6I",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09497715",
"title": "Spherical DNNs and Their Applications in 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> Images and Videos",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09497715/1vzY9kuYnwA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10049652",
"articleId": "1KYoxzkht3W",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10049707",
"articleId": "1KYoumHTB72",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNqJZgI1",
"title": "April",
"year": "1987",
"issueNum": "04",
"idPrefix": "co",
"pubType": "magazine",
"volume": "20",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUB7a149",
"doi": "10.1109/MC.1987.1663536",
"abstract": "Provides a listing of upcoming conference events of interest to practitioners and researchers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Provides a listing of upcoming conference events of interest to practitioners and researchers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Provides a listing of upcoming conference events of interest to practitioners and researchers.",
"title": "Eurographics'87",
"normalizedTitle": "Eurographics'87",
"fno": "01663536",
"hasPdf": true,
"idPrefix": "co",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "1987-04-01 00:00:00",
"pubType": "mags",
"pages": "60",
"year": "1987",
"issn": "0018-9162",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "01663530",
"articleId": "13rRUygT7i3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01663526",
"articleId": "13rRUwx1xKq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |