diff --git a/_data/pub.json b/_data/pub.json index 0609bf0f..859de8c3 100644 --- a/_data/pub.json +++ b/_data/pub.json @@ -1,7 +1,7 @@ [ { "key": "4R7ATE42", - "version": 30399, + "version": 30807, "library": { "type": "group", "id": 10058, @@ -41,14 +41,25 @@ } } }, + "lastModifiedByUser": { + "id": 112658, + "username": "gonuke", + "name": "", + "links": { + "alternate": { + "href": "https://www.zotero.org/gonuke", + "type": "text/html" + } + } + }, "creatorSummary": "Mummah and Wilson", "parsedDate": "2024-06-19", "numChildren": 1 }, - "bibtex": "\n@inproceedings{mummah_cyclus_2024,\n\taddress = {Las Vegas, NV},\n\ttitle = {Cyclus {Toolkit} {Enhancements} to {Simulate} {Nuclear} {Material} {Buying} {Patterns}},\n\tvolume = {130},\n\tbooktitle = {Transactions of the {American} {Nuclear} {Society}, {Annual} {Meeting} 2024},\n\tauthor = {Mummah, Kathryn A. and Wilson, Paul P.H.},\n\tmonth = jun,\n\tyear = {2024},\n\tnote = {(accepted)},\n}\n", + "bibtex": "\n@inproceedings{mummah_cyclus_2024,\n\taddress = {Las Vegas, NV},\n\ttitle = {Cyclus {Toolkit} {Enhancements} to {Simulate} {Nuclear} {Material} {Buying} {Patterns}},\n\tvolume = {130},\n\turl = {https://www.ans.org/pubs/transactions/article-55965/},\n\tbooktitle = {Transactions of the {American} {Nuclear} {Society}, {Annual} {Meeting} 2024},\n\tauthor = {Mummah, Kathryn A. and Wilson, Paul P.H.},\n\tmonth = jun,\n\tyear = {2024},\n\tnote = {(accepted)},\n}\n", "data": { "key": "4R7ATE42", - "version": 30399, + "version": 30807, "itemType": "conferencePaper", "title": "Cyclus Toolkit Enhancements to Simulate Nuclear Material Buying Patterns", "creators": [ @@ -76,7 +87,7 @@ "DOI": "", "ISBN": "", "shortTitle": "", - "url": "", + "url": "https://www.ans.org/pubs/transactions/article-55965/", "accessDate": "", "archive": "", "archiveLocation": "", @@ -92,7 +103,7 @@ "owl:sameAs": "http://zotero.org/groups/10058/items/9PR5P2RR" }, "dateAdded": "2024-03-20T19:47:42Z", - "dateModified": "2024-07-23T14:49:03Z" + "dateModified": "2024-09-09T14:48:20Z" } }, { @@ -349,7 +360,7 @@ }, { "key": "WTKDTRL7", - "version": 29705, + "version": 30812, "library": { "type": "group", "id": 10058, @@ -369,6 +380,12 @@ "alternate": { "href": "https://www.zotero.org/groups/10058/items/WTKDTRL7", "type": "text/html" + }, + "attachment": { + "href": "https://api.zotero.org/groups/10058/items/DLUFAQQI", + "type": "application/json", + "attachmentType": "application/pdf", + "attachmentSize": 441719 } }, "meta": { @@ -383,14 +400,25 @@ } } }, + "lastModifiedByUser": { + "id": 112658, + "username": "gonuke", + "name": "", + "links": { + "alternate": { + "href": "https://www.zotero.org/gonuke", + "type": "text/html" + } + } + }, "creatorSummary": "Mummah", "parsedDate": "2023-11", - "numChildren": 0 + "numChildren": 1 }, - "bibtex": "\n@inproceedings{mummah_bridging_2023,\n\taddress = {Washington D.C.},\n\ttitle = {Bridging the {Fidelity} {Gap} in {System}-{Scale} {Nuclear} {Fuel} {Cycle} {Simulations} for {Realistic} {State}-{Level} {Nuclear} {Material} {Accounting}},\n\tauthor = {Mummah, Kathryn A.},\n\tmonth = nov,\n\tyear = {2023},\n}\n", + "bibtex": "\n@inproceedings{mummah_bridging_2023,\n\taddress = {Washington D.C.},\n\ttitle = {Bridging the {Fidelity} {Gap} in {System}-{Scale} {Nuclear} {Fuel} {Cycle} {Simulations} for {Realistic} {State}-{Level} {Nuclear} {Material} {Accounting}},\n\turl = {https://www.ans.org/pubs/proceedings/article-55005/},\n\tbooktitle = {Proceedings of {Advances} in {Nonproliferation} {Technology} and {Policy} {Conference} 2023},\n\tauthor = {Mummah, Kathryn A.},\n\tmonth = nov,\n\tyear = {2023},\n}\n", "data": { "key": "WTKDTRL7", - "version": 29705, + "version": 30812, "itemType": "conferencePaper", "title": "Bridging the Fidelity Gap in System-Scale Nuclear Fuel Cycle Simulations for Realistic State-Level Nuclear Material Accounting", "creators": [ @@ -402,7 +430,7 @@ ], "abstractNote": "", "date": "November 2023", - "proceedingsTitle": "", + "proceedingsTitle": "Proceedings of Advances in Nonproliferation Technology and Policy Conference 2023", "conferenceName": "Advances in Nonproliferation Technology and Policy Conference 2023", "place": "Washington D.C.", "publisher": "", @@ -413,7 +441,7 @@ "DOI": "", "ISBN": "", "shortTitle": "", - "url": "", + "url": "https://www.ans.org/pubs/proceedings/article-55005/", "accessDate": "", "archive": "", "archiveLocation": "", @@ -427,7 +455,7 @@ ], "relations": {}, "dateAdded": "2024-03-20T19:47:42Z", - "dateModified": "2024-03-20T19:47:42Z" + "dateModified": "2024-09-09T14:49:15Z" } }, { @@ -933,7 +961,7 @@ }, { "key": "UBSWG585", - "version": 27218, + "version": 30815, "library": { "type": "group", "id": 10058, @@ -976,10 +1004,10 @@ "creatorSummary": "D'Angelo and WILSON", "numChildren": 1 }, - "bibtex": "\n@inproceedings{dangelo_sdr_2022,\n\taddress = {Seattle, WA, USA},\n\ttitle = {{SDR} {Calculations} {Involving} {Geometry} {Movement} {After} {Shutdown}},\n\tauthor = {D'Angelo, Chelsea and WILSON, Paul P. H.},\n\tmonth = sep,\n\tyear = {2022},\n}\n", + "bibtex": "\n@inproceedings{dangelo_sdr_2022,\n\taddress = {Seattle, WA, USA},\n\ttitle = {{SDR} {Calculations} {Involving} {Geometry} {Movement} {After} {Shutdown}},\n\turl = {https://www.ans.org/pubs/proceedings/article-52048/},\n\tbooktitle = {Porceedings of the 14th {International} {Conference} on {Radiation} {Shielding} and 21st {Topical} {Meeting} of the {Radiation} {Protection} and {Shielding} {Division}},\n\tauthor = {D'Angelo, Chelsea and WILSON, Paul P. H.},\n\tmonth = sep,\n\tyear = {2022},\n}\n", "data": { "key": "UBSWG585", - "version": 27218, + "version": 30815, "itemType": "conferencePaper", "title": "SDR Calculations Involving Geometry Movement After Shutdown", "creators": [ @@ -996,7 +1024,7 @@ ], "abstractNote": "", "date": "2022-09-25 9/25/22-9/29/22", - "proceedingsTitle": "", + "proceedingsTitle": "Porceedings of the 14th International Conference on Radiation Shielding and 21st Topical Meeting of the Radiation Protection and Shielding Division", "conferenceName": "14th International Conference on Radiation Shielding and 21st Topical Meeting of the Radiation Protection and Shielding Division", "place": "Seattle, WA, USA", "publisher": "", @@ -1007,7 +1035,7 @@ "DOI": "", "ISBN": "", "shortTitle": "", - "url": "", + "url": "https://www.ans.org/pubs/proceedings/article-52048/", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1021,7 +1049,7 @@ ], "relations": {}, "dateAdded": "2022-11-02T11:00:29Z", - "dateModified": "2022-11-02T11:02:28Z" + "dateModified": "2024-09-09T14:50:11Z" } }, { @@ -1697,7 +1725,7 @@ }, { "key": "QW38HRSJ", - "version": 25069, + "version": 30816, "library": { "type": "group", "id": 10058, @@ -1737,14 +1765,25 @@ } } }, + "lastModifiedByUser": { + "id": 112658, + "username": "gonuke", + "name": "", + "links": { + "alternate": { + "href": "https://www.zotero.org/gonuke", + "type": "text/html" + } + } + }, "creatorSummary": "Mummah and WIlson", "parsedDate": "2020-07", - "numChildren": 1 + "numChildren": 2 }, - "bibtex": "\n@inproceedings{mummah_integrating_2020,\n\ttitle = {Integrating {Acquisition} {Pathway} {Analysis} {Into} {The} {Cyclus} {Fuel} {Cycle} {Simulator}},\n\tabstract = {The IAEA considers a State’s entire fuel cycle capability when evaluating and implementing safeguards, a process known as the State-Level Approach. Conducting Acquisition Path Analysis (APA) is one aspect of ensuring efficient use of safeguards resources and an objective evaluation of member States. APA is designed to identify, characterize, and rank technically-feasible pathways through a fuel cycle to produce weapons-usable material. This paper covers the integration of APA techniques into the Cyclus fuel cycle simulator. Material flowing through a nuclear fuel cycle can be represented by a directed graph (digraph) with vertices V(D) repre-\nsenting facilities and edges E(D) representing trade or material transport. In a Cyclus input file, a user defines a set of facility prototypes and the commodities that can be traded between them.\nFrom this user-specified fuel cycle, a digraph is generated representing all possible commodity trades between facilities. Graph traversal techniques are used to enumerate all pathways for\nmaterial to flow through the given fuel cycle. Pathways that produce weapons-usable material are filtered and further analyzed. Due to the flexibility of the Cyclus fuel cycle simulator, this\nmethod works for any fuel cycle, including ones that use closed facility models that are not part of the open source Cyclus and Cycamore facility libraries.},\n\tbooktitle = {Proceedings of the 61st {INMM} {Meeting}},\n\tauthor = {Mummah, Kathryn and WIlson, P. P.H},\n\tmonth = jul,\n\tyear = {2020},\n}\n", + "bibtex": "\n@inproceedings{mummah_integrating_2020,\n\ttitle = {Integrating {Acquisition} {Pathway} {Analysis} {Into} {The} {Cyclus} {Fuel} {Cycle} {Simulator}},\n\turl = {https://resources.inmm.org/annual-meeting-proceedings/integrating-acquisition-pathway-analysis-cyclus-fuel-cycle-simulator},\n\tabstract = {The IAEA considers a State’s entire fuel cycle capability when evaluating and implementing safeguards, a process known as the State-Level Approach. Conducting Acquisition Path Analysis (APA) is one aspect of ensuring efficient use of safeguards resources and an objective evaluation of member States. APA is designed to identify, characterize, and rank technically-feasible pathways through a fuel cycle to produce weapons-usable material. This paper covers the integration of APA techniques into the Cyclus fuel cycle simulator. Material flowing through a nuclear fuel cycle can be represented by a directed graph (digraph) with vertices V(D) repre-\nsenting facilities and edges E(D) representing trade or material transport. In a Cyclus input file, a user defines a set of facility prototypes and the commodities that can be traded between them.\nFrom this user-specified fuel cycle, a digraph is generated representing all possible commodity trades between facilities. Graph traversal techniques are used to enumerate all pathways for\nmaterial to flow through the given fuel cycle. Pathways that produce weapons-usable material are filtered and further analyzed. Due to the flexibility of the Cyclus fuel cycle simulator, this\nmethod works for any fuel cycle, including ones that use closed facility models that are not part of the open source Cyclus and Cycamore facility libraries.},\n\tbooktitle = {Proceedings of the 61st {INMM} {Meeting}},\n\tauthor = {Mummah, Kathryn and WIlson, P. P.H},\n\tmonth = jul,\n\tyear = {2020},\n}\n", "data": { "key": "QW38HRSJ", - "version": 25069, + "version": 30816, "itemType": "conferencePaper", "title": "Integrating Acquisition Pathway Analysis Into The Cyclus Fuel Cycle Simulator", "creators": [ @@ -1772,7 +1811,7 @@ "DOI": "", "ISBN": "", "shortTitle": "", - "url": "", + "url": "https://resources.inmm.org/annual-meeting-proceedings/integrating-acquisition-pathway-analysis-cyclus-fuel-cycle-simulator", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1786,12 +1825,12 @@ ], "relations": {}, "dateAdded": "2020-11-12T15:04:00Z", - "dateModified": "2021-01-21T05:47:23Z" + "dateModified": "2024-09-09T14:51:38Z" } }, { "key": "H4VPVVT5", - "version": 25203, + "version": 30817, "library": { "type": "group", "id": 10058, @@ -1813,10 +1852,10 @@ "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/HCUMB62F", + "href": "https://api.zotero.org/groups/10058/items/WFMNLVJP", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 290296 + "attachmentSize": 409270 } }, "meta": { @@ -1831,14 +1870,25 @@ } } }, + "lastModifiedByUser": { + "id": 112658, + "username": "gonuke", + "name": "", + "links": { + "alternate": { + "href": "https://www.zotero.org/gonuke", + "type": "text/html" + } + } + }, "creatorSummary": "Park et al.", "parsedDate": "2020-04-29", - "numChildren": 1 + "numChildren": 2 }, - "bibtex": "\n@inproceedings{park_evaluation_2020,\n\taddress = {Cambridge, United Kingdom},\n\ttitle = {Evaluation of {Critical} {Experiments} in the {University} of {Wisconsin} {Nuclear} {Reactor} ({UWNR}) with {Uncertainty} {Quantification}},\n\tisbn = {978-1-5272-6447-2},\n\tabstract = {An improved computational model for the University of Wisconsin Nuclear Reactor (UWNR) has been developed to facilitate automated input generation, data provenance, and modularity for alternate representations. This development was initiated as part of efforts to evaluate recent data acquired during an experimental campaign conducted at UWNR to generate benchmark data for validation. Specifically, this evaluation effort aims to contribute a number of fresh and depleted critical (CRIT) configurations of UWNR as well as steady-state and transient reaction-rate (RRATE) measurements. Previous efforts led to a scripted UWNR model that supports automated generation of inputs for MCNP and Serpent. Recently, this capability was extended to SCALE/KENO, which required significant changes to the underlying geometry and material representations. All three Monte Carlo tools (MCNP, Serpent, and KENO) are being used to evaluate a variety of zero-power, fresh-critical configuration and will be used to model burnup for evaluation of depleted-critical configurations. The inclusion of SCALE/KENO input generation makes possible a variety of sensitivity and uncertainty analyses using the TSUNAMI and SAMPLER modules of SCALE. In addition, an automated mesh-generation option was added based on the UW-developed, MCNP-to-CAD plugin. As a result, a meshed geometry for use with deterministic tools (e.g., MAMMOTH/Rattlesnake) can be produced that is fully consistent with the Monte Carlo models. Work is ongoing to develop a full core model in MAMMOTH/Rattlesnake, which is a deterministic code based on the MOOSE framework. This model will be used for the evaluation of several transient experiments conducted at UWNR. Preliminary results of fresh-critical configurations show a good agreement among the four codes and experimental data. Also, preliminary results of depleted-critical configurations indicate that the depleted core model successfully tracks core reactivity over time as long as an initial (but relatively small) reactivity bias is eliminated. Formal uncertainty quantification will be carried out using SCALE to study the impact of model uncertainties on the effective multiplication factor and other observables. In conclusion, the evaluation of UWNR benchmark data provides increased confidence in various states/configurations of the UWNR computational model and will provide a unique model for use by other analysts.},\n\tbooktitle = {Proceedings of the {PHYSOR} 2020},\n\tauthor = {Park, YoungHui and Cheng, Ye and Elzohery, Rabab and Wilson, Paul P.H. and Roberts, Jeremy A. and DeHart, Mark D.},\n\tmonth = apr,\n\tyear = {2020},\n\tpages = {10},\n}\n", + "bibtex": "\n@inproceedings{park_evaluation_2020,\n\taddress = {Cambridge, United Kingdom},\n\ttitle = {Evaluation of {Critical} {Experiments} in the {University} of {Wisconsin} {Nuclear} {Reactor} ({UWNR}) with {Uncertainty} {Quantification}},\n\tisbn = {978-1-5272-6447-2},\n\turl = {https://doi.org/10.1051/epjconf/202124710032},\n\tabstract = {An improved computational model for the University of Wisconsin Nuclear Reactor (UWNR) has been developed to facilitate automated input generation, data provenance, and modularity for alternate representations. This development was initiated as part of efforts to evaluate recent data acquired during an experimental campaign conducted at UWNR to generate benchmark data for validation. Specifically, this evaluation effort aims to contribute a number of fresh and depleted critical (CRIT) configurations of UWNR as well as steady-state and transient reaction-rate (RRATE) measurements. Previous efforts led to a scripted UWNR model that supports automated generation of inputs for MCNP and Serpent. Recently, this capability was extended to SCALE/KENO, which required significant changes to the underlying geometry and material representations. All three Monte Carlo tools (MCNP, Serpent, and KENO) are being used to evaluate a variety of zero-power, fresh-critical configuration and will be used to model burnup for evaluation of depleted-critical configurations. The inclusion of SCALE/KENO input generation makes possible a variety of sensitivity and uncertainty analyses using the TSUNAMI and SAMPLER modules of SCALE. In addition, an automated mesh-generation option was added based on the UW-developed, MCNP-to-CAD plugin. As a result, a meshed geometry for use with deterministic tools (e.g., MAMMOTH/Rattlesnake) can be produced that is fully consistent with the Monte Carlo models. Work is ongoing to develop a full core model in MAMMOTH/Rattlesnake, which is a deterministic code based on the MOOSE framework. This model will be used for the evaluation of several transient experiments conducted at UWNR. Preliminary results of fresh-critical configurations show a good agreement among the four codes and experimental data. Also, preliminary results of depleted-critical configurations indicate that the depleted core model successfully tracks core reactivity over time as long as an initial (but relatively small) reactivity bias is eliminated. Formal uncertainty quantification will be carried out using SCALE to study the impact of model uncertainties on the effective multiplication factor and other observables. In conclusion, the evaluation of UWNR benchmark data provides increased confidence in various states/configurations of the UWNR computational model and will provide a unique model for use by other analysts.},\n\tbooktitle = {Proceedings of the {PHYSOR} 2020},\n\tauthor = {Park, YoungHui and Cheng, Ye and Elzohery, Rabab and Wilson, Paul P.H. and Roberts, Jeremy A. and DeHart, Mark D.},\n\tmonth = apr,\n\tyear = {2020},\n\tpages = {10},\n}\n", "data": { "key": "H4VPVVT5", - "version": 25203, + "version": 30817, "itemType": "conferencePaper", "title": "Evaluation of Critical Experiments in the University of Wisconsin Nuclear Reactor (UWNR) with Uncertainty Quantification", "creators": [ @@ -1886,7 +1936,7 @@ "DOI": "", "ISBN": "978-1-5272-6447-2", "shortTitle": "", - "url": "", + "url": "https://doi.org/10.1051/epjconf/202124710032", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1900,7 +1950,7 @@ ], "relations": {}, "dateAdded": "2021-02-07T00:18:47Z", - "dateModified": "2021-02-07T00:43:37Z" + "dateModified": "2024-09-09T14:52:31Z" } }, { @@ -10423,7 +10473,7 @@ }, { "key": "HU8FELQI", - "version": 21903, + "version": 30749, "library": { "type": "group", "id": 10058, @@ -10467,10 +10517,10 @@ "parsedDate": "2017-09-01", "numChildren": 2 }, - "bibtex": "\n@article{shriwise_particle_2017,\n\tseries = {Special {Issue} on {International} {Conference} on {Mathematics} and {Computational} {Methods} {Applied} to {Nuclear} {Science} and {Engineering} 2017 ({M}\\&{C} 2017)},\n\ttitle = {Particle tracking acceleration via signed distance fields in direct-accelerated geometry {Monte} {Carlo}},\n\tvolume = {49},\n\tissn = {1738-5733},\n\turl = {http://www.sciencedirect.com/science/article/pii/S1738573317303145},\n\tdoi = {10.1016/j.net.2017.08.008},\n\tabstract = {Computer-aided design (CAD)-based Monte Carlo radiation transport is of value to the nuclear engineering community for its ability to conduct transport on high-fidelity models of nuclear systems, but it is more computationally expensive than native geometry representations. This work describes the adaptation of a rendering data structure, the signed distance field, as a geometric query tool for accelerating CAD-based transport in the direct-accelerated geometry Monte Carlo toolkit. Demonstrations of its effectiveness are shown for several problems. The beginnings of a predictive model for the data structure's utilization based on various problem parameters is also introduced.},\n\tnumber = {6},\n\turldate = {2017-11-24},\n\tjournal = {Nuclear Engineering and Technology},\n\tauthor = {Shriwise, Patrick C. and Davis, Andrew and Jacobson, Lucas J. and Wilson, Paul P. H.},\n\tmonth = sep,\n\tyear = {2017},\n\tkeywords = {CAD, DAGMC, Monte Carlo, Radiation Transport},\n\tpages = {1189--1198},\n}\n", + "bibtex": "\n@article{shriwise_particle_2017,\n\tseries = {Special {Issue} on {International} {Conference} on {Mathematics} and {Computational} {Methods} {Applied} to {Nuclear} {Science} and {Engineering} 2017 ({M}\\&{C} 2017)},\n\ttitle = {Particle tracking acceleration via signed distance fields in direct-accelerated geometry {Monte} {Carlo}},\n\tvolume = {49},\n\tissn = {1738-5733},\n\turl = {http://www.sciencedirect.com/science/article/pii/S1738573317303145},\n\tdoi = {10.1016/j.net.2017.08.008},\n\tabstract = {Computer-aided design (CAD)-based Monte Carlo radiation transport is of value to the nuclear engineering community for its ability to conduct transport on high-fidelity models of nuclear systems, but it is more computationally expensive than native geometry representations. This work describes the adaptation of a rendering data structure, the signed distance field, as a geometric query tool for accelerating CAD-based transport in the direct-accelerated geometry Monte Carlo toolkit. Demonstrations of its effectiveness are shown for several problems. The beginnings of a predictive model for the data structure's utilization based on various problem parameters is also introduced.},\n\tnumber = {6},\n\turldate = {2017-11-24},\n\tjournal = {Nuclear Engineering and Technology},\n\tauthor = {Shriwise, Patrick C. and Davis, Andrew and Jacobson, Lucas J. and Wilson, Paul P. H.},\n\tmonth = sep,\n\tyear = {2017},\n\tkeywords = {CAD, DAGMC, Monte Carlo, Radiation Transport, product},\n\tpages = {1189--1198},\n}\n", "data": { "key": "HU8FELQI", - "version": 21903, + "version": 30749, "itemType": "journalArticle", "title": "Particle tracking acceleration via signed distance fields in direct-accelerated geometry Monte Carlo", "creators": [ @@ -10533,21 +10583,23 @@ { "tag": "Radiation Transport", "type": 1 + }, + { + "tag": "product" } ], "collections": [ - "UKXV4KID", - "H442QZRN", - "CMA2SK5V" + "APMMJXES", + "UKXV4KID" ], "relations": {}, "dateAdded": "2017-11-24T19:24:01Z", - "dateModified": "2017-11-24T19:24:01Z" + "dateModified": "2024-09-08T17:29:03Z" } }, { "key": "PG5YTTAG", - "version": 21903, + "version": 30748, "library": { "type": "group", "id": 10058, @@ -10591,10 +10643,10 @@ "parsedDate": "2017-07-04", "numChildren": 2 }, - "bibtex": "\n@article{el-guebaly_design_2017,\n\ttitle = {Design and {Evaluation} of {Nuclear} {System} for {ARIES}-{ACT2} {Power} {Plant} with {DCLL} {Blanket}},\n\tvolume = {72},\n\tissn = {1536-1055},\n\turl = {https://doi.org/10.1080/15361055.2016.1273669},\n\tdoi = {10.1080/15361055.2016.1273669},\n\tabstract = {The ARIES team has examined a multitude of fusion concepts over a period of 25 years. In recent years, the team wrapped up the Advanced Research, Innovation, and Evaluation Study (ARIES) series by completing the detailed design of the ARIES–Advanced and Conservative Tokamak (ARIES-ACT2) power plant—a plant with conservative physics and technology, representing a tokamak with reduced-activation ferritic/martensitic (RAFM) structure and dual-coolant lead-lithium blanket. The integration of nuclear assessments (neutronics, shielding, and activation) is an essential element to ARIES-ACT2 success. This paper highlights the design philosophy of in-vessel components and characterizes several nuclear-related issues that have been addressed during the course of the study to improve the ARIES-ACT2 design: sufficient breeding of tritium to fuel the plasma, well-optimized in-vessel components that satisfy all design requirements and guarantee the shielding functionality of its radial/vertical builds, survivability of low-activation/radiation-resistant structural materials in 14-MeV neutron environment, activation concerns for RAFM and corrosion-resistant oxide-dispersion-strengthened alloys, and an integral approach to handle the mildly radioactive materials during operation and after decommissioning.},\n\tnumber = {1},\n\turldate = {2018-04-05},\n\tjournal = {Fusion Science and Technology},\n\tauthor = {El-Guebaly, L. and Mynsberge, L. and Davis, A. and D’Angelo, C. and Rowcliffe, A. and Pint, B. and Team, ARIES-ACT},\n\tmonth = jul,\n\tyear = {2017},\n\tkeywords = {Activation analysis, DCLL blanket, neutronics},\n\tpages = {17--40},\n}\n", + "bibtex": "\n@article{el-guebaly_design_2017,\n\ttitle = {Design and {Evaluation} of {Nuclear} {System} for {ARIES}-{ACT2} {Power} {Plant} with {DCLL} {Blanket}},\n\tvolume = {72},\n\tissn = {1536-1055},\n\turl = {https://doi.org/10.1080/15361055.2016.1273669},\n\tdoi = {10.1080/15361055.2016.1273669},\n\tabstract = {The ARIES team has examined a multitude of fusion concepts over a period of 25 years. In recent years, the team wrapped up the Advanced Research, Innovation, and Evaluation Study (ARIES) series by completing the detailed design of the ARIES–Advanced and Conservative Tokamak (ARIES-ACT2) power plant—a plant with conservative physics and technology, representing a tokamak with reduced-activation ferritic/martensitic (RAFM) structure and dual-coolant lead-lithium blanket. The integration of nuclear assessments (neutronics, shielding, and activation) is an essential element to ARIES-ACT2 success. This paper highlights the design philosophy of in-vessel components and characterizes several nuclear-related issues that have been addressed during the course of the study to improve the ARIES-ACT2 design: sufficient breeding of tritium to fuel the plasma, well-optimized in-vessel components that satisfy all design requirements and guarantee the shielding functionality of its radial/vertical builds, survivability of low-activation/radiation-resistant structural materials in 14-MeV neutron environment, activation concerns for RAFM and corrosion-resistant oxide-dispersion-strengthened alloys, and an integral approach to handle the mildly radioactive materials during operation and after decommissioning.},\n\tnumber = {1},\n\turldate = {2018-04-05},\n\tjournal = {Fusion Science and Technology},\n\tauthor = {El-Guebaly, L. and Mynsberge, L. and Davis, A. and D’Angelo, C. and Rowcliffe, A. and Pint, B. and Team, ARIES-ACT},\n\tmonth = jul,\n\tyear = {2017},\n\tkeywords = {Activation analysis, DCLL blanket, neutronics, product},\n\tpages = {17--40},\n}\n", "data": { "key": "PG5YTTAG", - "version": 21903, + "version": 30748, "itemType": "journalArticle", "title": "Design and Evaluation of Nuclear System for ARIES-ACT2 Power Plant with DCLL Blanket", "creators": [ @@ -10668,21 +10720,23 @@ { "tag": "neutronics", "type": 1 + }, + { + "tag": "product" } ], "collections": [ - "UKXV4KID", - "H442QZRN", - "CMA2SK5V" + "APMMJXES", + "UKXV4KID" ], "relations": {}, "dateAdded": "2018-04-05T12:34:36Z", - "dateModified": "2018-04-05T12:34:36Z" + "dateModified": "2024-09-08T17:29:03Z" } }, { "key": "PP39E2UP", - "version": 21903, + "version": 30781, "library": { "type": "group", "id": 10058, @@ -10737,10 +10791,10 @@ "parsedDate": "2017-07", "numChildren": 1 }, - "bibtex": "\n@article{biondo_transmutation_2017,\n\ttitle = {Transmutation {Approximations} for the {Application} of {Hybrid} {Monte} {Carlo}/{Deterministic} {Neutron} {Transport} to {Shutdown} {Dose} {Rate} {Analysis}},\n\tvolume = {187},\n\turl = {http://www.tandfonline.com/doi/abs/10.1080/00295639.2016.1275848},\n\tabstract = {In fusion energy systems (FES) neutrons born from burning plasma activate system components. The photon dose rate after shutdown from resulting radionuclides must be quantified. This shutdown dose rate\n(SDR) is calculated by coupling neutron transport, activation analysis, and photon transport. The size, complexity, and attenuating configuration of FES motivate the use of hybrid Monte Carlo (MC)/deterministic\nneutron transport. The Multi-Step Consistent Adjoint Driven Importance Sampling (MS-CADIS) method can be used to optimize MC neutron transport for coupled multiphysics problems, including SDR analysis, using\ndeterministic estimates of adjoint flux distributions. When used for SDR analysis, MS-CADIS requires the formulation of an adjoint neutron source that approximates the transmutation process. In this work, transmutation approximations are used to derive a solution for this adjoint neutron source. It is shown that these approximations are reasonably met for typical FES neutron spectra and materials over a range of irradiation scenarios. When these approximations are met, the Groupwise Transmutation (GT)-CADIS method, proposed\nhere, can be used effectively. GT-CADIS is an implementation of the MS-CADIS method for SDR analysis that uses a series of single-energy-group irradiations to calculate the adjoint neutron source. For a simple SDR\nproblem, GT-CADIS provides speedups of 200 ± 100 relative to global variance reduction with the Forward Weighted (FW)-CADIS method and 9 ± 5 · 10{\\textasciicircum}4 relative to analog. This work shows that GT-CADIS is broadly applicable to FES problems and will significantly reduce the computational resources necessary for SDR\nanalysis.},\n\tnumber = {1},\n\tjournal = {Nuclear Science and Engineering},\n\tauthor = {Biondo, Elliott D. and Wilson, Paul P.H.},\n\tmonth = jul,\n\tyear = {2017},\n\tpages = {27--48},\n}\n", + "bibtex": "\n@article{biondo_transmutation_2017,\n\ttitle = {Transmutation {Approximations} for the {Application} of {Hybrid} {Monte} {Carlo}/{Deterministic} {Neutron} {Transport} to {Shutdown} {Dose} {Rate} {Analysis}},\n\tvolume = {187},\n\turl = {http://www.tandfonline.com/doi/abs/10.1080/00295639.2016.1275848},\n\tabstract = {In fusion energy systems (FES) neutrons born from burning plasma activate system components. The photon dose rate after shutdown from resulting radionuclides must be quantified. This shutdown dose rate\n(SDR) is calculated by coupling neutron transport, activation analysis, and photon transport. The size, complexity, and attenuating configuration of FES motivate the use of hybrid Monte Carlo (MC)/deterministic\nneutron transport. The Multi-Step Consistent Adjoint Driven Importance Sampling (MS-CADIS) method can be used to optimize MC neutron transport for coupled multiphysics problems, including SDR analysis, using\ndeterministic estimates of adjoint flux distributions. When used for SDR analysis, MS-CADIS requires the formulation of an adjoint neutron source that approximates the transmutation process. In this work, transmutation approximations are used to derive a solution for this adjoint neutron source. It is shown that these approximations are reasonably met for typical FES neutron spectra and materials over a range of irradiation scenarios. When these approximations are met, the Groupwise Transmutation (GT)-CADIS method, proposed\nhere, can be used effectively. GT-CADIS is an implementation of the MS-CADIS method for SDR analysis that uses a series of single-energy-group irradiations to calculate the adjoint neutron source. For a simple SDR\nproblem, GT-CADIS provides speedups of 200 ± 100 relative to global variance reduction with the Forward Weighted (FW)-CADIS method and 9 ± 5 · 10{\\textasciicircum}4 relative to analog. This work shows that GT-CADIS is broadly applicable to FES problems and will significantly reduce the computational resources necessary for SDR\nanalysis.},\n\tnumber = {1},\n\tjournal = {Nuclear Science and Engineering},\n\tauthor = {Biondo, Elliott D. and Wilson, Paul P.H.},\n\tmonth = jul,\n\tyear = {2017},\n\tkeywords = {CNERG:HK20 Final Report, product},\n\tpages = {27--48},\n}\n", "data": { "key": "PP39E2UP", - "version": 21903, + "version": 30781, "itemType": "journalArticle", "title": "Transmutation Approximations for the Application of Hybrid Monte Carlo/Deterministic Neutron Transport to Shutdown Dose Rate Analysis", "creators": [ @@ -10777,16 +10831,21 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + }, + { + "tag": "product" + } + ], "collections": [ - "UKXV4KID", - "4MDZ29N8", - "H442QZRN", - "CMA2SK5V" + "APMMJXES", + "UKXV4KID" ], "relations": {}, "dateAdded": "2017-06-23T02:45:37Z", - "dateModified": "2017-06-23T02:45:37Z" + "dateModified": "2024-09-08T17:38:42Z" } }, { @@ -10906,7 +10965,7 @@ }, { "key": "H4P52FWV", - "version": 21903, + "version": 30749, "library": { "type": "group", "id": 10058, @@ -10950,10 +11009,10 @@ "parsedDate": "2017", "numChildren": 1 }, - "bibtex": "\n@article{harb_effect_2017,\n\ttitle = {The {Effect} of {Constructed} {Mesh}-{Based} {Fluxes} on {Shutdown} {Dose} {Rate} {Calculations} in {Fusion} {Energy} {Systems}},\n\tvolume = {117},\n\tnumber = {1},\n\tjournal = {Transactions of the American Nuclear Society},\n\tauthor = {Harb, Moataz and Wilson, Paul P.H. and Davis, Andrew},\n\tmonth = nov,\n\tyear = {2017},\n\tpages = {1216--1219},\n}\n", + "bibtex": "\n@article{harb_effect_2017,\n\ttitle = {The {Effect} of {Constructed} {Mesh}-{Based} {Fluxes} on {Shutdown} {Dose} {Rate} {Calculations} in {Fusion} {Energy} {Systems}},\n\tvolume = {117},\n\tnumber = {1},\n\tjournal = {Transactions of the American Nuclear Society},\n\tauthor = {Harb, Moataz and Wilson, Paul P.H. and Davis, Andrew},\n\tmonth = nov,\n\tyear = {2017},\n\tkeywords = {product},\n\tpages = {1216--1219},\n}\n", "data": { "key": "H4P52FWV", - "version": 21903, + "version": 30749, "itemType": "journalArticle", "title": "The Effect of Constructed Mesh-Based Fluxes on Shutdown Dose Rate Calculations in Fusion Energy Systems", "creators": [ @@ -10995,15 +11054,18 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "product" + } + ], "collections": [ - "UKXV4KID", - "H442QZRN", - "CMA2SK5V" + "APMMJXES", + "UKXV4KID" ], "relations": {}, "dateAdded": "2017-11-24T19:31:43Z", - "dateModified": "2017-11-24T19:32:39Z" + "dateModified": "2024-09-08T17:29:03Z" } }, { @@ -11940,7 +12002,7 @@ }, { "key": "USAJZ3FD", - "version": 21903, + "version": 30781, "library": { "type": "group", "id": 10058, @@ -11995,10 +12057,10 @@ "parsedDate": "2016", "numChildren": 1 }, - "bibtex": "\n@article{biondo_shutdown_2016,\n\ttitle = {Shutdown {Dose} {Rate} {Analysis} with {CAD} {Geometry}, {Cartesian}/{Tetrahedral} {Mesh}, and {Advanced} {Variance} {Reduction}},\n\tvolume = {106},\n\tissn = {0920-3796},\n\turl = {http://www.sciencedirect.com/science/article/pii/S0920379616302009},\n\tdoi = {http://dx.doi.org/10.1016/j.fusengdes.2016.03.004},\n\tabstract = {In fusion energy systems (FES) high-energy neutrons born from burning plasma activate system components to form radionuclides. The biological dose rate that results from photons emitted by these radionuclides after shutdown—the shutdown dose rate (SDR)—must be quantified for maintenance planning. This can be done using the Rigorous Two-Step (R2S) method, which involves separate neutron and photon transport calculations, coupled by a nuclear inventory analysis code. The geometric complexity and highly attenuating configuration of FES motivates the use of CAD geometry and advanced variance\nreduction for this analysis.\n\nAn R2S workflow has been created with the new capability of performing SDR analysis directly from CAD geometry with Cartesian or tetrahedral meshes and with biased photon source sampling, enabling\nthe use of the Consistent Adjoint Driven Importance Sampling (CADIS) variance reduction technique. This workflow has been validated with the Frascati Neutron Generator (FNG)-ITER SDR benchmark using both Cartesian and tetrahedral meshes and both unbiased and biased photon source sampling. All results are\nwithin 20.4\\% of experimental values, which constitutes satisfactory agreement. Photon transport using\nCADIS is demonstrated to yield speedups as high as 8.5·10{\\textasciicircum}5 for problems using the FNG geometry.},\n\tjournal = {Fusion Engineering and Design},\n\tauthor = {Biondo, Elliott D. and Davis, Andrew and Wilson, Paul P.H.},\n\tmonth = may,\n\tyear = {2016},\n\tpages = {77--84},\n}\n", + "bibtex": "\n@article{biondo_shutdown_2016,\n\ttitle = {Shutdown {Dose} {Rate} {Analysis} with {CAD} {Geometry}, {Cartesian}/{Tetrahedral} {Mesh}, and {Advanced} {Variance} {Reduction}},\n\tvolume = {106},\n\tissn = {0920-3796},\n\turl = {http://www.sciencedirect.com/science/article/pii/S0920379616302009},\n\tdoi = {http://dx.doi.org/10.1016/j.fusengdes.2016.03.004},\n\tabstract = {In fusion energy systems (FES) high-energy neutrons born from burning plasma activate system components to form radionuclides. The biological dose rate that results from photons emitted by these radionuclides after shutdown—the shutdown dose rate (SDR)—must be quantified for maintenance planning. This can be done using the Rigorous Two-Step (R2S) method, which involves separate neutron and photon transport calculations, coupled by a nuclear inventory analysis code. The geometric complexity and highly attenuating configuration of FES motivates the use of CAD geometry and advanced variance\nreduction for this analysis.\n\nAn R2S workflow has been created with the new capability of performing SDR analysis directly from CAD geometry with Cartesian or tetrahedral meshes and with biased photon source sampling, enabling\nthe use of the Consistent Adjoint Driven Importance Sampling (CADIS) variance reduction technique. This workflow has been validated with the Frascati Neutron Generator (FNG)-ITER SDR benchmark using both Cartesian and tetrahedral meshes and both unbiased and biased photon source sampling. All results are\nwithin 20.4\\% of experimental values, which constitutes satisfactory agreement. Photon transport using\nCADIS is demonstrated to yield speedups as high as 8.5·10{\\textasciicircum}5 for problems using the FNG geometry.},\n\tjournal = {Fusion Engineering and Design},\n\tauthor = {Biondo, Elliott D. and Davis, Andrew and Wilson, Paul P.H.},\n\tmonth = may,\n\tyear = {2016},\n\tkeywords = {CNERG:HK20 Final Report, product},\n\tpages = {77--84},\n}\n", "data": { "key": "USAJZ3FD", - "version": 21903, + "version": 30781, "itemType": "journalArticle", "title": "Shutdown Dose Rate Analysis with CAD Geometry, Cartesian/Tetrahedral Mesh, and Advanced Variance Reduction", "creators": [ @@ -12040,16 +12102,21 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + }, + { + "tag": "product" + } + ], "collections": [ - "UKXV4KID", - "4MDZ29N8", - "H442QZRN", - "CMA2SK5V" + "APMMJXES", + "UKXV4KID" ], "relations": {}, "dateAdded": "2016-04-04T19:55:27Z", - "dateModified": "2017-01-11T03:36:09Z" + "dateModified": "2024-09-08T17:38:42Z" } }, { @@ -12754,8 +12821,7 @@ "extra": "", "tags": [], "collections": [ - "UKXV4KID", - "RI2DQ3B2" + "UKXV4KID" ], "relations": {}, "dateAdded": "2016-10-26T19:23:01Z", @@ -12875,7 +12941,7 @@ }, { "key": "E5STAZZD", - "version": 21761, + "version": 30777, "library": { "type": "group", "id": 10058, @@ -12930,10 +12996,10 @@ "parsedDate": "2015-04-19", "numChildren": 2 }, - "bibtex": "\n@inproceedings{biondo_accelerating_2015,\n\taddress = {Nashville, Tennessee},\n\ttitle = {Accelerating {Fusion} {Reactor} {Neutronics} {Modeling} by {Automatic} {Coupling} of {Hybrid} {Monte} {Carlo}/{Deterministic} {Transport} on {CAD} {Geometry}},\n\tisbn = {978-0-89448-720-0},\n\tabstract = {Detailed radiation transport calculations are necessary for many aspects of the design of fusion\nenergy systems (FES) such as ensuring occupational safety, assessing the activation of system components\nfor waste disposal, and maintaining cryogenic temperatures within superconducting magnets. Hybrid\nMonte Carlo (MC)/deterministic techniques are necessary for this analysis because FES are large, heavily\nshielded, and contain streaming paths that can only be resolved with MC. The tremendous complexity of\nFES necessitates the use of CAD geometry for design and analysis. Previous ITER analysis has required\nthe translation of CAD geometry to MCNP5 form in order to use the AutomateD VAriaNce reducTion\nGenerator (ADVANTG) for hybrid MC/deterministic transport. In this work, ADVANTG was modified\nto support CAD geometry, allowing hybrid (MC)/deterministic transport to be done automatically and\neliminating the need for this translation step. This was done by adding a new ray tracing routine\nto ADVANTG for CAD geometries using the Direct Accelerated Geometry Monte Carlo (DAGMC)\nsoftware library. This new capability is demonstrated with a prompt dose rate calculation for an ITER\ncomputational benchmark problem using both the Consistent Adjoint Driven Importance Sampling\n(CADIS) method an the Forward Weighted (FW)-CADIS method. The variance reduction parameters\nproduced by ADVANTG are shown to be the same using CAD geometry and standard MCNP5 geometry.\nSignificant speedups were observed for both neutrons (as high as a factor of 7.1) and photons (as high as\na factor of 59.6).},\n\tbooktitle = {Mathematics \\& {Computations} ({M}\\&{C}+{SNA}+{MC} 2015)},\n\tauthor = {Biondo, Elliott and Ibrahim, Ahmad M. and Mosher, Scott W. and Grove, Robert E.},\n\tmonth = apr,\n\tyear = {2015},\n}\n", + "bibtex": "\n@inproceedings{biondo_accelerating_2015,\n\taddress = {Nashville, Tennessee},\n\ttitle = {Accelerating {Fusion} {Reactor} {Neutronics} {Modeling} by {Automatic} {Coupling} of {Hybrid} {Monte} {Carlo}/{Deterministic} {Transport} on {CAD} {Geometry}},\n\tisbn = {978-0-89448-720-0},\n\tabstract = {Detailed radiation transport calculations are necessary for many aspects of the design of fusion\nenergy systems (FES) such as ensuring occupational safety, assessing the activation of system components\nfor waste disposal, and maintaining cryogenic temperatures within superconducting magnets. Hybrid\nMonte Carlo (MC)/deterministic techniques are necessary for this analysis because FES are large, heavily\nshielded, and contain streaming paths that can only be resolved with MC. The tremendous complexity of\nFES necessitates the use of CAD geometry for design and analysis. Previous ITER analysis has required\nthe translation of CAD geometry to MCNP5 form in order to use the AutomateD VAriaNce reducTion\nGenerator (ADVANTG) for hybrid MC/deterministic transport. In this work, ADVANTG was modified\nto support CAD geometry, allowing hybrid (MC)/deterministic transport to be done automatically and\neliminating the need for this translation step. This was done by adding a new ray tracing routine\nto ADVANTG for CAD geometries using the Direct Accelerated Geometry Monte Carlo (DAGMC)\nsoftware library. This new capability is demonstrated with a prompt dose rate calculation for an ITER\ncomputational benchmark problem using both the Consistent Adjoint Driven Importance Sampling\n(CADIS) method an the Forward Weighted (FW)-CADIS method. The variance reduction parameters\nproduced by ADVANTG are shown to be the same using CAD geometry and standard MCNP5 geometry.\nSignificant speedups were observed for both neutrons (as high as a factor of 7.1) and photons (as high as\na factor of 59.6).},\n\tbooktitle = {Mathematics \\& {Computations} ({M}\\&{C}+{SNA}+{MC} 2015)},\n\tauthor = {Biondo, Elliott and Ibrahim, Ahmad M. and Mosher, Scott W. and Grove, Robert E.},\n\tmonth = apr,\n\tyear = {2015},\n\tkeywords = {CNERG:HK20 Final Report},\n}\n", "data": { "key": "E5STAZZD", - "version": 21761, + "version": 30777, "itemType": "conferencePaper", "title": "Accelerating Fusion Reactor Neutronics Modeling by Automatic Coupling of Hybrid Monte Carlo/Deterministic Transport on CAD Geometry", "creators": [ @@ -12979,14 +13045,18 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + } + ], "collections": [ "UKXV4KID", - "4MDZ29N8" + "QNJGHVT4" ], "relations": {}, "dateAdded": "2015-07-09T20:06:20Z", - "dateModified": "2017-09-04T20:57:46Z" + "dateModified": "2024-09-08T17:38:36Z" } }, { @@ -13332,7 +13402,7 @@ }, { "key": "MSA966E9", - "version": 21903, + "version": 30781, "library": { "type": "group", "id": 10058, @@ -13387,10 +13457,10 @@ "parsedDate": "2015", "numChildren": 2 }, - "bibtex": "\n@article{biondo_rigorous_2015,\n\tseries = {Best of {Radiation} {Protection} and {Shielding} {Division} 2014---{II}},\n\ttitle = {Rigorous {Two}-{Step} {Activation} for {Fusion} {Systems} with {PyNE}},\n\tvolume = {112},\n\tjournal = {Transactions of the American Nuclear Society},\n\tauthor = {Biondo, Elliott and Davis, Andrew and Scopatz, Anthony and Wilson, Paul P.H.},\n\tyear = {2015},\n\tpages = {617--620},\n}\n", + "bibtex": "\n@article{biondo_rigorous_2015,\n\tseries = {Best of {Radiation} {Protection} and {Shielding} {Division} 2014---{II}},\n\ttitle = {Rigorous {Two}-{Step} {Activation} for {Fusion} {Systems} with {PyNE}},\n\tvolume = {112},\n\tjournal = {Transactions of the American Nuclear Society},\n\tauthor = {Biondo, Elliott and Davis, Andrew and Scopatz, Anthony and Wilson, Paul P.H.},\n\tyear = {2015},\n\tkeywords = {CNERG:HK20 Final Report, product},\n\tpages = {617--620},\n}\n", "data": { "key": "MSA966E9", - "version": 21903, + "version": 30781, "itemType": "journalArticle", "title": "Rigorous Two-Step Activation for Fusion Systems with PyNE", "creators": [ @@ -13437,16 +13507,21 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + }, + { + "tag": "product" + } + ], "collections": [ - "UKXV4KID", - "4MDZ29N8", - "H442QZRN", - "CMA2SK5V" + "APMMJXES", + "UKXV4KID" ], "relations": {}, "dateAdded": "2015-07-09T20:27:36Z", - "dateModified": "2017-01-11T04:03:41Z" + "dateModified": "2024-09-08T17:38:42Z" } }, { @@ -15259,8 +15334,8 @@ } }, { - "key": "UM2DP2WJ", - "version": 21760, + "key": "WKPH3AP6", + "version": 30781, "library": { "type": "group", "id": 10058, @@ -15274,28 +15349,28 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/UM2DP2WJ", + "href": "https://api.zotero.org/groups/10058/items/WKPH3AP6", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/UM2DP2WJ", + "href": "https://www.zotero.org/groups/10058/items/WKPH3AP6", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/RWQ34QDW", + "href": "https://api.zotero.org/groups/10058/items/62VDJJT6", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 733265 + "attachmentSize": 663460 } }, "meta": { "createdByUser": { - "id": 162605, - "username": "kldunn", - "name": "", + "id": 708524, + "username": "erelson", + "name": "Eric Relson", "links": { "alternate": { - "href": "https://www.zotero.org/kldunn", + "href": "https://www.zotero.org/erelson", "type": "text/html" } } @@ -15311,29 +15386,34 @@ } } }, - "creatorSummary": "Dunn and Wilson", + "creatorSummary": "Relson et al.", "parsedDate": "2013-05-05", - "numChildren": 1 + "numChildren": 2 }, - "bibtex": "\n@inproceedings{dunn_monte_2013,\n\taddress = {Sun Valley, ID},\n\ttitle = {Monte {Carlo} {Mesh} {Tallies} based on a {Kernel} {Density} {Estimator} {Approach} using {Integrated} {Particle} {Tracks}},\n\tbooktitle = {M\\&{C} 2013},\n\tpublisher = {American Nuclear Society},\n\tauthor = {Dunn, K. L. and Wilson, P. H.},\n\tmonth = may,\n\tyear = {2013},\n}\n", + "bibtex": "\n@inproceedings{relson_improved_2013,\n\taddress = {Sun Valley, ID},\n\ttitle = {Improved {Mesh} {Based} {Photon} {Sampling} {Techniques} {For} {Neutron} {Activation} {Analysis}},\n\tabstract = {The design of fusion power systems requires analysis of neutron activation of large, complex volumes, and the resulting particles emitted from these volumes. Structured mesh-based discretization of these problems allows for improved modeling in these activation analysis problems. Finer discretization of these problems results in large computational costs, which drives the investigation of more efficient methods. Within an ad hoc subroutine of the Monte Carlo transport code MCNP, we implement sampling of voxels and photon energies for volumetric sources using the alias method. The alias method enables efficient sampling of a discrete probability distribution, and operates in O(1) time, whereas the simpler direct discrete method requires O(log(n)) time. By using the alias method, voxel sampling becomes a viable alternative to sampling space with the O(1) approach of uniformly sampling the problem volume. Additionally, with voxel sampling it is straightforward to introduce biasing of volumetric sources,\nand we implement this biasing of voxels as an additional variance reduction technique that can be applied. We verify our implementation and compare the alias method, with and without biasing, to direct discrete sampling of voxels, and to uniform sampling. We study the behavior of source biasing in a second set of tests and find trends between improvements and source shape, material, and material density. Overall, however, the magnitude of improvements from source biasing appears to be limited. Future work will benefit from the implementation of efficient voxel sampling – particularly with conformal unstructured meshes where the uniform sampling approach cannot be applied.},\n\tbooktitle = {M\\&{C} 2013},\n\tpublisher = {American Nuclear Society},\n\tauthor = {Relson, E. and Wilson, P.P.H. and Biondo, Elliott},\n\tmonth = may,\n\tyear = {2013},\n\tkeywords = {ALARA, CNERG:HK20 Final Report, MCNP, Photons, R2S-ACT, Sampling},\n}\n", "data": { - "key": "UM2DP2WJ", - "version": 21760, + "key": "WKPH3AP6", + "version": 30781, "itemType": "conferencePaper", - "title": "Monte Carlo Mesh Tallies based on a Kernel Density Estimator Approach using Integrated Particle Tracks", + "title": "Improved Mesh Based Photon Sampling Techniques For Neutron Activation Analysis", "creators": [ { "creatorType": "author", - "firstName": "K. L.", - "lastName": "Dunn" + "firstName": "E.", + "lastName": "Relson" }, { "creatorType": "author", - "firstName": "P. H.", + "firstName": "P.P.H.", "lastName": "Wilson" + }, + { + "creatorType": "author", + "firstName": "Elliott", + "lastName": "Biondo" } ], - "abstractNote": "", + "abstractNote": "The design of fusion power systems requires analysis of neutron activation of large, complex volumes, and the resulting particles emitted from these volumes. Structured mesh-based discretization of these problems allows for improved modeling in these activation analysis problems. Finer discretization of these problems results in large computational costs, which drives the investigation of more efficient methods. Within an ad hoc subroutine of the Monte Carlo transport code MCNP, we implement sampling of voxels and photon energies for volumetric sources using the alias method. The alias method enables efficient sampling of a discrete probability distribution, and operates in O(1) time, whereas the simpler direct discrete method requires O(log(n)) time. By using the alias method, voxel sampling becomes a viable alternative to sampling space with the O(1) approach of uniformly sampling the problem volume. Additionally, with voxel sampling it is straightforward to introduce biasing of volumetric sources,\nand we implement this biasing of voxels as an additional variance reduction technique that can be applied. We verify our implementation and compare the alias method, with and without biasing, to direct discrete sampling of voxels, and to uniform sampling. We study the behavior of source biasing in a second set of tests and find trends between improvements and source shape, material, and material density. Overall, however, the magnitude of improvements from source biasing appears to be limited. Future work will benefit from the implementation of efficient voxel sampling – particularly with conformal unstructured meshes where the uniform sampling approach cannot be applied.", "date": "May 5-9, 2013", "proceedingsTitle": "M&C 2013", "conferenceName": "International Conference on Mathematics and Computational Methods Applied to Nuclear Science & Engineering (M&C 2013)", @@ -15354,17 +15434,37 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "ALARA" + }, + { + "tag": "CNERG:HK20 Final Report" + }, + { + "tag": "MCNP" + }, + { + "tag": "Photons" + }, + { + "tag": "R2S-ACT" + }, + { + "tag": "Sampling" + } + ], "collections": [ - "UKXV4KID" + "UKXV4KID", + "QNJGHVT4" ], "relations": {}, - "dateAdded": "2013-07-01T15:56:13Z", - "dateModified": "2014-01-15T21:21:58Z" + "dateAdded": "2013-04-04T22:35:53Z", + "dateModified": "2024-09-08T17:38:42Z" } }, { - "key": "WKPH3AP6", + "key": "UM2DP2WJ", "version": 21760, "library": { "type": "group", @@ -15379,28 +15479,28 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/WKPH3AP6", + "href": "https://api.zotero.org/groups/10058/items/UM2DP2WJ", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/WKPH3AP6", + "href": "https://www.zotero.org/groups/10058/items/UM2DP2WJ", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/62VDJJT6", + "href": "https://api.zotero.org/groups/10058/items/RWQ34QDW", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 663460 + "attachmentSize": 733265 } }, "meta": { "createdByUser": { - "id": 708524, - "username": "erelson", - "name": "Eric Relson", + "id": 162605, + "username": "kldunn", + "name": "", "links": { "alternate": { - "href": "https://www.zotero.org/erelson", + "href": "https://www.zotero.org/kldunn", "type": "text/html" } } @@ -15416,34 +15516,29 @@ } } }, - "creatorSummary": "Relson et al.", + "creatorSummary": "Dunn and Wilson", "parsedDate": "2013-05-05", - "numChildren": 2 + "numChildren": 1 }, - "bibtex": "\n@inproceedings{relson_improved_2013,\n\taddress = {Sun Valley, ID},\n\ttitle = {Improved {Mesh} {Based} {Photon} {Sampling} {Techniques} {For} {Neutron} {Activation} {Analysis}},\n\tabstract = {The design of fusion power systems requires analysis of neutron activation of large, complex volumes, and the resulting particles emitted from these volumes. Structured mesh-based discretization of these problems allows for improved modeling in these activation analysis problems. Finer discretization of these problems results in large computational costs, which drives the investigation of more efficient methods. Within an ad hoc subroutine of the Monte Carlo transport code MCNP, we implement sampling of voxels and photon energies for volumetric sources using the alias method. The alias method enables efficient sampling of a discrete probability distribution, and operates in O(1) time, whereas the simpler direct discrete method requires O(log(n)) time. By using the alias method, voxel sampling becomes a viable alternative to sampling space with the O(1) approach of uniformly sampling the problem volume. Additionally, with voxel sampling it is straightforward to introduce biasing of volumetric sources,\nand we implement this biasing of voxels as an additional variance reduction technique that can be applied. We verify our implementation and compare the alias method, with and without biasing, to direct discrete sampling of voxels, and to uniform sampling. We study the behavior of source biasing in a second set of tests and find trends between improvements and source shape, material, and material density. Overall, however, the magnitude of improvements from source biasing appears to be limited. Future work will benefit from the implementation of efficient voxel sampling – particularly with conformal unstructured meshes where the uniform sampling approach cannot be applied.},\n\tbooktitle = {M\\&{C} 2013},\n\tpublisher = {American Nuclear Society},\n\tauthor = {Relson, E. and Wilson, P.P.H. and Biondo, Elliott},\n\tmonth = may,\n\tyear = {2013},\n\tkeywords = {ALARA, MCNP, Photons, R2S-ACT, Sampling},\n}\n", + "bibtex": "\n@inproceedings{dunn_monte_2013,\n\taddress = {Sun Valley, ID},\n\ttitle = {Monte {Carlo} {Mesh} {Tallies} based on a {Kernel} {Density} {Estimator} {Approach} using {Integrated} {Particle} {Tracks}},\n\tbooktitle = {M\\&{C} 2013},\n\tpublisher = {American Nuclear Society},\n\tauthor = {Dunn, K. L. and Wilson, P. H.},\n\tmonth = may,\n\tyear = {2013},\n}\n", "data": { - "key": "WKPH3AP6", + "key": "UM2DP2WJ", "version": 21760, "itemType": "conferencePaper", - "title": "Improved Mesh Based Photon Sampling Techniques For Neutron Activation Analysis", + "title": "Monte Carlo Mesh Tallies based on a Kernel Density Estimator Approach using Integrated Particle Tracks", "creators": [ { "creatorType": "author", - "firstName": "E.", - "lastName": "Relson" + "firstName": "K. L.", + "lastName": "Dunn" }, { "creatorType": "author", - "firstName": "P.P.H.", + "firstName": "P. H.", "lastName": "Wilson" - }, - { - "creatorType": "author", - "firstName": "Elliott", - "lastName": "Biondo" } ], - "abstractNote": "The design of fusion power systems requires analysis of neutron activation of large, complex volumes, and the resulting particles emitted from these volumes. Structured mesh-based discretization of these problems allows for improved modeling in these activation analysis problems. Finer discretization of these problems results in large computational costs, which drives the investigation of more efficient methods. Within an ad hoc subroutine of the Monte Carlo transport code MCNP, we implement sampling of voxels and photon energies for volumetric sources using the alias method. The alias method enables efficient sampling of a discrete probability distribution, and operates in O(1) time, whereas the simpler direct discrete method requires O(log(n)) time. By using the alias method, voxel sampling becomes a viable alternative to sampling space with the O(1) approach of uniformly sampling the problem volume. Additionally, with voxel sampling it is straightforward to introduce biasing of volumetric sources,\nand we implement this biasing of voxels as an additional variance reduction technique that can be applied. We verify our implementation and compare the alias method, with and without biasing, to direct discrete sampling of voxels, and to uniform sampling. We study the behavior of source biasing in a second set of tests and find trends between improvements and source shape, material, and material density. Overall, however, the magnitude of improvements from source biasing appears to be limited. Future work will benefit from the implementation of efficient voxel sampling – particularly with conformal unstructured meshes where the uniform sampling approach cannot be applied.", + "abstractNote": "", "date": "May 5-9, 2013", "proceedingsTitle": "M&C 2013", "conferenceName": "International Conference on Mathematics and Computational Methods Applied to Nuclear Science & Engineering (M&C 2013)", @@ -15464,30 +15559,13 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [ - { - "tag": "ALARA" - }, - { - "tag": "MCNP" - }, - { - "tag": "Photons" - }, - { - "tag": "R2S-ACT" - }, - { - "tag": "Sampling" - } - ], + "tags": [], "collections": [ - "UKXV4KID", - "4MDZ29N8" + "UKXV4KID" ], "relations": {}, - "dateAdded": "2013-04-04T22:35:53Z", - "dateModified": "2014-01-15T21:21:53Z" + "dateAdded": "2013-07-01T15:56:13Z", + "dateModified": "2014-01-15T21:21:58Z" } }, { diff --git a/_data/theses.json b/_data/theses.json index b9cb9e8f..d4524115 100644 --- a/_data/theses.json +++ b/_data/theses.json @@ -1,7 +1,177 @@ [ + { + "key": "FAWYVCU4", + "version": 30653, + "library": { + "type": "group", + "id": 10058, + "name": "CNERG", + "links": { + "alternate": { + "href": "https://www.zotero.org/groups/10058", + "type": "text/html" + } + } + }, + "links": { + "self": { + "href": "https://api.zotero.org/groups/10058/items/FAWYVCU4", + "type": "application/json" + }, + "alternate": { + "href": "https://www.zotero.org/groups/10058/items/FAWYVCU4", + "type": "text/html" + }, + "attachment": { + "href": "https://api.zotero.org/groups/10058/items/X59US696", + "type": "application/json", + "attachmentType": "application/pdf", + "attachmentSize": 21640371 + } + }, + "meta": { + "createdByUser": { + "id": 112658, + "username": "gonuke", + "name": "", + "links": { + "alternate": { + "href": "https://www.zotero.org/gonuke", + "type": "text/html" + } + } + }, + "creatorSummary": "Granda Duarte", + "parsedDate": "2024-08-26", + "numChildren": 1 + }, + "bibtex": "\n@phdthesis{granda_duarte_advancements_2024,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Advancements of {Shutdown} {Dose} {Rate} {Analysis} {Tools} for {Accelerator}-{Driven} {Systems}},\n\turl = {https://digital.library.wisc.edu/1711.dl/2Y7KCCVBI7STO8F},\n\tabstract = {This thesis focuses on advancing the state of the art for calculating the shutdown dose rate (SDR) in accelerator-driven systems, where high-energy neutrons produced by particle interactions activate materials and produce unstable radionuclides. These radionuclides emit photons as they decay, which can pose a hazard to maintenance personnel. Therefore, it's important to compute the SDR as a function of time and space after system shutdown. The SDR can be calculated using the Rigorous Two-Step (R2S) method, which requires a separate neutron and photon transport calculations coupled with an activation calculation. The current R2S workflow for accelerator-driven system relies on physics models in the Monte Carlo N-Particle (MCNP) code to simulate interactions beyond the nuclear data libraries' energy domain. This method faces limitations, as the resolution of calculations is restricted to volumetric cells. Analysts have to manually divide these volumes to improve detail, which can be complex and time-consuming. Additionally, Monte Carlo (MC) transport introduces statistical uncertainty, particularly in high-attenuation areas. Variance reduction (VR) methods such as Consistent Adjoint Driven Importance Sampling (CADIS) and its variants are often used to help reduce the inherent variance in MC results. A CADIS based VR technique exists that reduces the variance of the SDR by optimizing the primary step of the R2S calculation but has only been implemented for energy regimes typical of fusion devices. This work aims to improve the R2S workflow in two main ways. First, it introduces meshing capability into the special tally used in the R2S workflow, which enhances the resolution of high-energy production and destruction rate data. A complete R2S workflow is then developed, verified, and demonstrated. Second, it implements a multi-step CADIS (MS-CADIS) method for VR optimization in accelerator-driven system named high-energy GT-CADIS. The thesis further demonstrates the use of the high-energy GT-CADIS method to generate VR parameters for accelerator-driven systems considering energy regimes where cross-section libraries are not available. The high-energy GT-CADIS workflow is verified and demonstrated using test and full production models, and results indicate a significant computational speedup of up to 107 times faster convergence when compared to results obtained without VR.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Granda Duarte, Nancy},\n\tmonth = aug,\n\tyear = {2024},\n}\n", + "data": { + "key": "FAWYVCU4", + "version": 30653, + "itemType": "thesis", + "title": "Advancements of Shutdown Dose Rate Analysis Tools for Accelerator-Driven Systems", + "creators": [ + { + "creatorType": "author", + "firstName": "Nancy", + "lastName": "Granda Duarte" + } + ], + "abstractNote": "This thesis focuses on advancing the state of the art for calculating the shutdown dose rate (SDR) in accelerator-driven systems, where high-energy neutrons produced by particle interactions activate materials and produce unstable radionuclides. These radionuclides emit photons as they decay, which can pose a hazard to maintenance personnel. Therefore, it's important to compute the SDR as a function of time and space after system shutdown. The SDR can be calculated using the Rigorous Two-Step (R2S) method, which requires a separate neutron and photon transport calculations coupled with an activation calculation. The current R2S workflow for accelerator-driven system relies on physics models in the Monte Carlo N-Particle (MCNP) code to simulate interactions beyond the nuclear data libraries' energy domain. This method faces limitations, as the resolution of calculations is restricted to volumetric cells. Analysts have to manually divide these volumes to improve detail, which can be complex and time-consuming. Additionally, Monte Carlo (MC) transport introduces statistical uncertainty, particularly in high-attenuation areas. Variance reduction (VR) methods such as Consistent Adjoint Driven Importance Sampling (CADIS) and its variants are often used to help reduce the inherent variance in MC results. A CADIS based VR technique exists that reduces the variance of the SDR by optimizing the primary step of the R2S calculation but has only been implemented for energy regimes typical of fusion devices. This work aims to improve the R2S workflow in two main ways. First, it introduces meshing capability into the special tally used in the R2S workflow, which enhances the resolution of high-energy production and destruction rate data. A complete R2S workflow is then developed, verified, and demonstrated. Second, it implements a multi-step CADIS (MS-CADIS) method for VR optimization in accelerator-driven system named high-energy GT-CADIS. The thesis further demonstrates the use of the high-energy GT-CADIS method to generate VR parameters for accelerator-driven systems considering energy regimes where cross-section libraries are not available. The high-energy GT-CADIS workflow is verified and demonstrated using test and full production models, and results indicate a significant computational speedup of up to 107 times faster convergence when compared to results obtained without VR.", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", + "university": "University of Wisconsin-Madison", + "place": "Madison, WI", + "date": "08/26/2024", + "numPages": "161", + "language": "English", + "shortTitle": "", + "url": "https://digital.library.wisc.edu/1711.dl/2Y7KCCVBI7STO8F", + "accessDate": "", + "archive": "", + "archiveLocation": "", + "libraryCatalog": "", + "callNumber": "", + "rights": "", + "extra": "", + "tags": [], + "collections": [ + "6259B6TV", + "34I86HPD" + ], + "relations": {}, + "dateAdded": "2024-09-07T19:53:33Z", + "dateModified": "2024-09-08T16:46:48Z" + } + }, + { + "key": "T3G87978", + "version": 30654, + "library": { + "type": "group", + "id": 10058, + "name": "CNERG", + "links": { + "alternate": { + "href": "https://www.zotero.org/groups/10058", + "type": "text/html" + } + } + }, + "links": { + "self": { + "href": "https://api.zotero.org/groups/10058/items/T3G87978", + "type": "application/json" + }, + "alternate": { + "href": "https://www.zotero.org/groups/10058/items/T3G87978", + "type": "text/html" + }, + "attachment": { + "href": "https://api.zotero.org/groups/10058/items/2YRIK7LP", + "type": "application/json", + "attachmentType": "application/pdf", + "attachmentSize": 9444771 + } + }, + "meta": { + "createdByUser": { + "id": 112658, + "username": "gonuke", + "name": "", + "links": { + "alternate": { + "href": "https://www.zotero.org/gonuke", + "type": "text/html" + } + } + }, + "creatorSummary": "Stomps", + "parsedDate": "2023", + "numChildren": 2 + }, + "bibtex": "\n@phdthesis{stomps_gamma_2023,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Gamma spectroscopy data augmentation for self-supervised machine learning applications to nuclear nonproliferation on measured data with limited ground-truth},\n\turl = {https://digital.library.wisc.edu/1711.dl/6JJIHNH5JIY4Y8U},\n\tabstract = {The timely detection of special nuclear material (SNM) transfers is an important monitoring objective in nuclear nonproliferation. Labeling sufficient volumes of radiation data for\nsuccessful supervised machine learning can be too costly when manual analysis is employed.\nTherefore, this work is developing a machine learning model built on semi-supervised learning\nto utilize both labeled and unlabeled data and therefore alleviate the cost of labeling.\nAs a preliminary experiment, radiation measurements collected with sodium iodide (NaI)\ndetectors from the Multi-Informatics for Nuclear Operating Scenarios (MINOS) testbed at\nOak Ridge National Laboratory (ORNL) are used. Anomalous measurements are identified\nusing a method of statistical hypothesis testing. After background estimation, an energy\ndependent spectroscopic analysis is used to characterize an anomaly based on its radiation\nsignatures in a noisy labeling heuristic. These noisily labeled spectra are used in training and\ntesting classification models that estimate a binary label: SNM transfer or other anomalous\nmeasurement. Supervised logistic regression—trained only on limited labeled data—serves\nas a baseline to compare three semi-supervised machine learning models all trained on the\nsame limited labeled data and a larger volume of unlabeled data: co-training, Label Propagation, and a Convolutional Neural Network (CNN). In each case, the semi-supervised models\noutperform logistic regression, suggesting unlabeled data can be valuable when training and\ndemonstrating performative value in semi-supervised nonproliferation implementations.\nThis work uses a self-supervised contrastive learning framework to efficiently extract\ninformation from unlabeled data. A contrastive model learns patterns by perturbing data\ninstances using a set of label-invariant data augmentations, meaning augmented samples\npreserve labeling information present in an original measurement. A set of transformations\nare designed for gamma spectra, tailored for specific principles of radiation detection. MINOS\nmeasurements are augmented, and an encoder is contrastively trained to produce meaningful\nhigh-dimensional representations of spectra. A supervised classifier then uses these encoded\nrepresentations to assign a label estimating whether a given transfer spectrum was of tracked\nnuclear material or not. Even a simple linear model built on these representations and trained\non limited labeled data can achieve a balanced accuracy score of 80.30\\%.\nSeveral tools are employed for evaluating the efficacy of augmentations, representations,\nand classification models. Principal Component Analysis (PCA) is used to demonstrate\nthat representations provide a richer feature space for detecting nuclear material transfers\nby embedding distributional information from unlabeled data. Integrated Gradients connect a classifier’s decision boundary to spectral features, suggesting the framework learns\nrelevant patterns in spectra that can be used for detecting transfers. When labeled data\nare scarce, this work suggests that training a supervised classifier should be prioritized over\nsemi-supervised (compared to self-supervised) contrastive learning an encoder to maximize\ndetection accuracy. Hyperparameter optimization was conducted, finding a locally optimum\nmaximum cross-validated balanced accuracy score. Overall, a methodology has been established for using semi-supervision to accurately classify SNM transfers without the prohibitive\ncost of labeling.},\n\turldate = {2024-03-18},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Stomps, Jordan},\n\tyear = {2023},\n}\n", + "data": { + "key": "T3G87978", + "version": 30654, + "itemType": "thesis", + "title": "Gamma spectroscopy data augmentation for self-supervised machine learning applications to nuclear nonproliferation on measured data with limited ground-truth", + "creators": [ + { + "creatorType": "author", + "firstName": "Jordan", + "lastName": "Stomps" + } + ], + "abstractNote": "The timely detection of special nuclear material (SNM) transfers is an important monitoring objective in nuclear nonproliferation. Labeling sufficient volumes of radiation data for\nsuccessful supervised machine learning can be too costly when manual analysis is employed.\nTherefore, this work is developing a machine learning model built on semi-supervised learning\nto utilize both labeled and unlabeled data and therefore alleviate the cost of labeling.\nAs a preliminary experiment, radiation measurements collected with sodium iodide (NaI)\ndetectors from the Multi-Informatics for Nuclear Operating Scenarios (MINOS) testbed at\nOak Ridge National Laboratory (ORNL) are used. Anomalous measurements are identified\nusing a method of statistical hypothesis testing. After background estimation, an energy\ndependent spectroscopic analysis is used to characterize an anomaly based on its radiation\nsignatures in a noisy labeling heuristic. These noisily labeled spectra are used in training and\ntesting classification models that estimate a binary label: SNM transfer or other anomalous\nmeasurement. Supervised logistic regression—trained only on limited labeled data—serves\nas a baseline to compare three semi-supervised machine learning models all trained on the\nsame limited labeled data and a larger volume of unlabeled data: co-training, Label Propagation, and a Convolutional Neural Network (CNN). In each case, the semi-supervised models\noutperform logistic regression, suggesting unlabeled data can be valuable when training and\ndemonstrating performative value in semi-supervised nonproliferation implementations.\nThis work uses a self-supervised contrastive learning framework to efficiently extract\ninformation from unlabeled data. A contrastive model learns patterns by perturbing data\ninstances using a set of label-invariant data augmentations, meaning augmented samples\npreserve labeling information present in an original measurement. A set of transformations\nare designed for gamma spectra, tailored for specific principles of radiation detection. MINOS\nmeasurements are augmented, and an encoder is contrastively trained to produce meaningful\nhigh-dimensional representations of spectra. A supervised classifier then uses these encoded\nrepresentations to assign a label estimating whether a given transfer spectrum was of tracked\nnuclear material or not. Even a simple linear model built on these representations and trained\non limited labeled data can achieve a balanced accuracy score of 80.30%.\nSeveral tools are employed for evaluating the efficacy of augmentations, representations,\nand classification models. Principal Component Analysis (PCA) is used to demonstrate\nthat representations provide a richer feature space for detecting nuclear material transfers\nby embedding distributional information from unlabeled data. Integrated Gradients connect a classifier’s decision boundary to spectral features, suggesting the framework learns\nrelevant patterns in spectra that can be used for detecting transfers. When labeled data\nare scarce, this work suggests that training a supervised classifier should be prioritized over\nsemi-supervised (compared to self-supervised) contrastive learning an encoder to maximize\ndetection accuracy. Hyperparameter optimization was conducted, finding a locally optimum\nmaximum cross-validated balanced accuracy score. Overall, a methodology has been established for using semi-supervision to accurately classify SNM transfers without the prohibitive\ncost of labeling.", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", + "university": "University of Wisconsin-Madison", + "place": "Madison, WI", + "date": "2023", + "numPages": "155", + "language": "", + "shortTitle": "", + "url": "https://digital.library.wisc.edu/1711.dl/6JJIHNH5JIY4Y8U", + "accessDate": "2024-03-18T22:26:41Z", + "archive": "", + "archiveLocation": "", + "libraryCatalog": "University of Wisconsin-Madison Libraries Catalog", + "callNumber": "", + "rights": "", + "extra": "", + "tags": [], + "collections": [ + "6259B6TV", + "34I86HPD" + ], + "relations": {}, + "dateAdded": "2024-03-18T22:26:41Z", + "dateModified": "2024-09-08T16:46:57Z" + } + }, { "key": "AEA47423", - "version": 27069, + "version": 30640, "library": { "type": "group", "id": 10058, @@ -56,10 +226,10 @@ "parsedDate": "2022-01", "numChildren": 1 }, - "bibtex": "\n@phdthesis{kiesling_weight_2022,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Weight {Window} {Isosurface} {Geometries} for {Monte} {Carlo} {Radiation} {Transport} {Variance} {Reduction}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:HHJQGAKFHSOFW9B/datastreams/REF/content},\n\tabstract = {In order to perform accurate Monte Carlo (MC) simulations, which is a stochastic method resulting in uncertainty, variance reduction (VR) techniques are often necessary to reduce the relative error for quantities of interest. The use of weight windows (WWs) is a common VR method in which the statistical weight of particles are changed based on various parameters in the simulation. WWs are most commonly represented as a Cartesian WW mesh (CWWM) where WWs are defined across all energies on each mesh voxel. For large, geometrically complex problems, these meshes often need to be developed with fine resolution over the entire spatial domain in order to capture necessary fine detail in some regions of the geometry. This can cause the memory footprint of these meshes to be extremely large and computationally prohibitive. Furthermore, CWWMs are not necessarily efficient in their implementation with respect to when particle weight is checked and updated. \n\nThis dissertation work presents a novel method for representing WWs aimed at addressing the computational limitations of CWWMs while also improving VR efficiency. In this method, the WWs are transformed into a faceted mesh geometry, known as a WW isosurface geometry (WWIG), where the surfaces are the isosurfaces derived from the WW values in a CWWM. The WWIGs can then be used during particle tracking with the Direct Accelerated Geometry Monte Carlo (DAGMC) toolkit, which allows for particle tracking on arbitrarily complex geometries.\n\nIn this work, an algorithm for using WWIGs for MC VR has been implemented in DAGMC coupled with Monte Carlo N-Particle transport code (MCNP) (DAG-MCNP) 6.2. Initial verification and demonstration experiments show that the WWIG method performs accurate and comparable VR to using CWWMs. Further analysis has been done to demonstrate how changing mesh geometric features of the WWIGs affects computational performance during MC radiation transport. Depending on parameters set for generating the WWIGs and the starting CWWM, the isosurfaces of the WWIGs can vary in mesh coarseness, surface roughness, and spacing. In this work, we explore how these different geometric features of the WWIGs affect the memory footprint and computational performance during variance reduction for Monte Carlo radiation transport. In the end, we see that using WWIGs for MC VR improves WW efficiency and is comparable in performance to using CWWMs},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Kiesling, Kalin R.},\n\tmonth = jan,\n\tyear = {2022},\n}\n", + "bibtex": "\n@phdthesis{kiesling_weight_2022,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Weight {Window} {Isosurface} {Geometries} for {Monte} {Carlo} {Radiation} {Transport} {Variance} {Reduction}},\n\turl = {https://digital.library.wisc.edu/1711.dl/VXRRFM35MBR2J9A},\n\tabstract = {In order to perform accurate Monte Carlo (MC) simulations, which is a stochastic method resulting in uncertainty, variance reduction (VR) techniques are often necessary to reduce the relative error for quantities of interest. The use of weight windows (WWs) is a common VR method in which the statistical weight of particles are changed based on various parameters in the simulation. WWs are most commonly represented as a Cartesian WW mesh (CWWM) where WWs are defined across all energies on each mesh voxel. For large, geometrically complex problems, these meshes often need to be developed with fine resolution over the entire spatial domain in order to capture necessary fine detail in some regions of the geometry. This can cause the memory footprint of these meshes to be extremely large and computationally prohibitive. Furthermore, CWWMs are not necessarily efficient in their implementation with respect to when particle weight is checked and updated. \n\nThis dissertation work presents a novel method for representing WWs aimed at addressing the computational limitations of CWWMs while also improving VR efficiency. In this method, the WWs are transformed into a faceted mesh geometry, known as a WW isosurface geometry (WWIG), where the surfaces are the isosurfaces derived from the WW values in a CWWM. The WWIGs can then be used during particle tracking with the Direct Accelerated Geometry Monte Carlo (DAGMC) toolkit, which allows for particle tracking on arbitrarily complex geometries.\n\nIn this work, an algorithm for using WWIGs for MC VR has been implemented in DAGMC coupled with Monte Carlo N-Particle transport code (MCNP) (DAG-MCNP) 6.2. Initial verification and demonstration experiments show that the WWIG method performs accurate and comparable VR to using CWWMs. Further analysis has been done to demonstrate how changing mesh geometric features of the WWIGs affects computational performance during MC radiation transport. Depending on parameters set for generating the WWIGs and the starting CWWM, the isosurfaces of the WWIGs can vary in mesh coarseness, surface roughness, and spacing. In this work, we explore how these different geometric features of the WWIGs affect the memory footprint and computational performance during variance reduction for Monte Carlo radiation transport. In the end, we see that using WWIGs for MC VR improves WW efficiency and is comparable in performance to using CWWMs},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Kiesling, Kalin R.},\n\tmonth = jan,\n\tyear = {2022},\n}\n", "data": { "key": "AEA47423", - "version": 27069, + "version": 30640, "itemType": "thesis", "title": "Weight Window Isosurface Geometries for Monte Carlo Radiation Transport Variance Reduction", "creators": [ @@ -77,7 +247,7 @@ "numPages": "", "language": "", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:HHJQGAKFHSOFW9B/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/VXRRFM35MBR2J9A", "accessDate": "", "archive": "", "archiveLocation": "", @@ -92,12 +262,12 @@ ], "relations": {}, "dateAdded": "2022-02-16T18:12:26Z", - "dateModified": "2022-10-04T01:40:00Z" + "dateModified": "2024-09-08T16:42:19Z" } }, { "key": "QR5KAH2I", - "version": 27072, + "version": 30649, "library": { "type": "group", "id": 10058, @@ -141,10 +311,10 @@ "parsedDate": "2021-12-18", "numChildren": 1 }, - "bibtex": "\n@phdthesis{britt_angular_2021,\n\taddress = {Madison, WI},\n\ttype = {{PhD}},\n\ttitle = {Angular {Importance} {Sampling} for {Forward} and {Adjoint} {Monte} {Carlo} {Radiation} {Transport}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:TAHOHYAYQKRUA84/datastreams/REF/content},\n\tabstract = {Variance reduction is an important tool to increase the rate of convergence in certain configurations of Monte Carlo problems. Methods such as CADIS are particularly useful to achieve this increased rate of convergence. However, CADIS does not include information for direction phase space, and an equivalent method has not been used for the adjoint Monte Carlo method. In this work, the benefits of including direction information in a weight window and weight target (a new type of importance sampling technique presented here) are analyzed and explored, along with a way to use importance sampling theory on the adjoint Monte Carlo method},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Britt, Philip},\n\tmonth = dec,\n\tyear = {2021},\n}\n", + "bibtex": "\n@phdthesis{britt_angular_2021,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Angular {Importance} {Sampling} for {Forward} and {Adjoint} {Monte} {Carlo} {Radiation} {Transport}},\n\turl = {https://digital.library.wisc.edu/1711.dl/Z7OZ7R4H6656S8B},\n\tabstract = {Variance reduction is an important tool to increase the rate of convergence in certain configurations of Monte Carlo problems. Methods such as CADIS are particularly useful to achieve this increased rate of convergence. However, CADIS does not include information for direction phase space, and an equivalent method has not been used for the adjoint Monte Carlo method. In this work, the benefits of including direction information in a weight window and weight target (a new type of importance sampling technique presented here) are analyzed and explored, along with a way to use importance sampling theory on the adjoint Monte Carlo method},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Britt, Philip},\n\tmonth = dec,\n\tyear = {2021},\n}\n", "data": { "key": "QR5KAH2I", - "version": 27072, + "version": 30649, "itemType": "thesis", "title": "Angular Importance Sampling for Forward and Adjoint Monte Carlo Radiation Transport", "creators": [ @@ -155,14 +325,14 @@ } ], "abstractNote": "Variance reduction is an important tool to increase the rate of convergence in certain configurations of Monte Carlo problems. Methods such as CADIS are particularly useful to achieve this increased rate of convergence. However, CADIS does not include information for direction phase space, and an equivalent method has not been used for the adjoint Monte Carlo method. In this work, the benefits of including direction information in a weight window and weight target (a new type of importance sampling technique presented here) are analyzed and explored, along with a way to use importance sampling theory on the adjoint Monte Carlo method", - "thesisType": "PhD", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI", "date": "12/18/2021", "numPages": "112", "language": "English", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:TAHOHYAYQKRUA84/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/Z7OZ7R4H6656S8B", "accessDate": "", "archive": "", "archiveLocation": "", @@ -177,12 +347,12 @@ ], "relations": {}, "dateAdded": "2022-01-12T12:08:32Z", - "dateModified": "2022-10-04T01:41:20Z" + "dateModified": "2024-09-08T16:46:10Z" } }, { "key": "TP99Q4XA", - "version": 26434, + "version": 30652, "library": { "type": "group", "id": 10058, @@ -226,10 +396,10 @@ "parsedDate": "2021", "numChildren": 5 }, - "bibtex": "\n@phdthesis{opotowsky_spent_2021,\n\taddress = {United States -- Wisconsin},\n\ttype = {{PhD}},\n\ttitle = {Spent {Nuclear} {Fuel} {Attribution} {Using} {Statistical} {Methods}: {Impacts} of {Information} {Reduction} on {Prediction} {Performance}},\n\tcopyright = {Database copyright ProQuest LLC; ProQuest does not claim copyright in the individual underlying works.},\n\tshorttitle = {Spent {Nuclear} {Fuel} {Attribution} {Using} {Statistical} {Methods}},\n\turl = {http://www.proquest.com/pqdtglobal/docview/2572584487/abstract/63EF45F6C2D47A2PQ/1},\n\tabstract = {Nuclear forensics is a nuclear security capability that is broadly defined as material attribution in the event of a nuclear incident. Improvement and research is needed for technical components of this process. One such area is the provenance of non-detonated special nuclear material; studied here is spent nuclear fuel (SNF), which is applicable in a scenario involving the unlawful use of commercial byproducts from nuclear power reactors. The experimental process involves measuring known forensics signatures to ascertain the reactor parameters that produced the material, assisting in locating its source. This work proposes the use of statistical methods to determine these quantities instead of empirical relationships.\nThe purpose of this work is to probe the feasibility of this method with a focus on field-deployable detection. Thus, two experiments are conducted, the first informing the second by providing a baseline of performance. Both experiments use simulated nuclide measurements as observations and reactor operation parameters as the prediction goals. First, machine learning algorithms are employed with full-knowledge training data, i.e., nuclide vectors from simulations that mimic lab-based mass spectrometry. The error in the mass measurements is artificially increased to probe the prediction performance with respect to information reduction. Second, this machine learning workflow is performed on training data analogous to a field-deployed gamma detector that can only measure radionuclides. The detector configuration is varied so that the information reduction now represents decreasing detector energy resolution. The results are evaluated using the error of the reactor parameter predictions.\nThe reactor parameters of interest are the reactor type and three quantities that can attribute SNF: burnup, initial 235U enrichment, and time since irradiation. The algorithms used to predict these quantities are k-nearest neighbors, decision trees, and maximum log-likelihood calculations. The first experiment predicts all of these quantities well using the three algorithms, except for k-nearest neighbors predicting time since irradiation. For the second experiment, most of the detector configurations predict burnup well, none of them predict enrichment well, and the time since irradiation results perform on or near the baseline. This approach is an exploratory study; the results are promising and warrant further study.},\n\tlanguage = {English},\n\turldate = {2021-10-15},\n\tschool = {The University of Wisconsin - Madison},\n\tauthor = {Opotowsky, Arrielle C.},\n\tyear = {2021},\n\tnote = {ISBN: 9798538113514},\n\tkeywords = {Machine learning, Nuclear forensics, Nuclear security, Reactor parameter prediction, Spent nuclear fuel attribution, Statistical methods},\n}\n", + "bibtex": "\n@phdthesis{opotowsky_spent_2021,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Spent {Nuclear} {Fuel} {Attribution} {Using} {Statistical} {Methods}: {Impacts} of {Information} {Reduction} on {Prediction} {Performance}},\n\tcopyright = {Database copyright ProQuest LLC; ProQuest does not claim copyright in the individual underlying works.},\n\tshorttitle = {Spent {Nuclear} {Fuel} {Attribution} {Using} {Statistical} {Methods}},\n\turl = {https://digital.library.wisc.edu/1711.dl/J27OQ7UQFR4WT8C},\n\tabstract = {Nuclear forensics is a nuclear security capability that is broadly defined as material attribution in the event of a nuclear incident. Improvement and research is needed for technical components of this process. One such area is the provenance of non-detonated special nuclear material; studied here is spent nuclear fuel (SNF), which is applicable in a scenario involving the unlawful use of commercial byproducts from nuclear power reactors. The experimental process involves measuring known forensics signatures to ascertain the reactor parameters that produced the material, assisting in locating its source. This work proposes the use of statistical methods to determine these quantities instead of empirical relationships.\nThe purpose of this work is to probe the feasibility of this method with a focus on field-deployable detection. Thus, two experiments are conducted, the first informing the second by providing a baseline of performance. Both experiments use simulated nuclide measurements as observations and reactor operation parameters as the prediction goals. First, machine learning algorithms are employed with full-knowledge training data, i.e., nuclide vectors from simulations that mimic lab-based mass spectrometry. The error in the mass measurements is artificially increased to probe the prediction performance with respect to information reduction. Second, this machine learning workflow is performed on training data analogous to a field-deployed gamma detector that can only measure radionuclides. The detector configuration is varied so that the information reduction now represents decreasing detector energy resolution. The results are evaluated using the error of the reactor parameter predictions.\nThe reactor parameters of interest are the reactor type and three quantities that can attribute SNF: burnup, initial 235U enrichment, and time since irradiation. The algorithms used to predict these quantities are k-nearest neighbors, decision trees, and maximum log-likelihood calculations. The first experiment predicts all of these quantities well using the three algorithms, except for k-nearest neighbors predicting time since irradiation. For the second experiment, most of the detector configurations predict burnup well, none of them predict enrichment well, and the time since irradiation results perform on or near the baseline. This approach is an exploratory study; the results are promising and warrant further study.},\n\tlanguage = {English},\n\turldate = {2021-10-15},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Opotowsky, Arrielle C.},\n\tyear = {2021},\n\tnote = {ISBN: 9798538113514},\n\tkeywords = {Machine learning, Nuclear forensics, Nuclear security, Reactor parameter prediction, Spent nuclear fuel attribution, Statistical methods},\n}\n", "data": { "key": "TP99Q4XA", - "version": 26434, + "version": 30652, "itemType": "thesis", "title": "Spent Nuclear Fuel Attribution Using Statistical Methods: Impacts of Information Reduction on Prediction Performance", "creators": [ @@ -240,14 +410,14 @@ } ], "abstractNote": "Nuclear forensics is a nuclear security capability that is broadly defined as material attribution in the event of a nuclear incident. Improvement and research is needed for technical components of this process. One such area is the provenance of non-detonated special nuclear material; studied here is spent nuclear fuel (SNF), which is applicable in a scenario involving the unlawful use of commercial byproducts from nuclear power reactors. The experimental process involves measuring known forensics signatures to ascertain the reactor parameters that produced the material, assisting in locating its source. This work proposes the use of statistical methods to determine these quantities instead of empirical relationships.\nThe purpose of this work is to probe the feasibility of this method with a focus on field-deployable detection. Thus, two experiments are conducted, the first informing the second by providing a baseline of performance. Both experiments use simulated nuclide measurements as observations and reactor operation parameters as the prediction goals. First, machine learning algorithms are employed with full-knowledge training data, i.e., nuclide vectors from simulations that mimic lab-based mass spectrometry. The error in the mass measurements is artificially increased to probe the prediction performance with respect to information reduction. Second, this machine learning workflow is performed on training data analogous to a field-deployed gamma detector that can only measure radionuclides. The detector configuration is varied so that the information reduction now represents decreasing detector energy resolution. The results are evaluated using the error of the reactor parameter predictions.\nThe reactor parameters of interest are the reactor type and three quantities that can attribute SNF: burnup, initial 235U enrichment, and time since irradiation. The algorithms used to predict these quantities are k-nearest neighbors, decision trees, and maximum log-likelihood calculations. The first experiment predicts all of these quantities well using the three algorithms, except for k-nearest neighbors predicting time since irradiation. For the second experiment, most of the detector configurations predict burnup well, none of them predict enrichment well, and the time since irradiation results perform on or near the baseline. This approach is an exploratory study; the results are promising and warrant further study.", - "thesisType": "PhD", - "university": "The University of Wisconsin - Madison", - "place": "United States -- Wisconsin", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", + "university": "University of Wisconsin-Madison", + "place": "Madison, WI", "date": "2021", "numPages": "197", "language": "English", "shortTitle": "Spent Nuclear Fuel Attribution Using Statistical Methods", - "url": "http://www.proquest.com/pqdtglobal/docview/2572584487/abstract/63EF45F6C2D47A2PQ/1", + "url": "https://digital.library.wisc.edu/1711.dl/J27OQ7UQFR4WT8C", "accessDate": "2021-10-15T01:13:14Z", "archive": "", "archiveLocation": "", @@ -287,7 +457,7 @@ ], "relations": {}, "dateAdded": "2021-10-15T01:13:14Z", - "dateModified": "2022-02-16T18:01:59Z" + "dateModified": "2024-09-08T16:46:35Z" } }, { @@ -462,7 +632,7 @@ }, { "key": "QECSDSE9", - "version": 27074, + "version": 30658, "library": { "type": "group", "id": 10058, @@ -517,10 +687,10 @@ "parsedDate": "2019-05-16", "numChildren": 3 }, - "bibtex": "\n@phdthesis{dangelo_variance_2019,\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Variance {Reduction} for {Multi}-physics {Analysis} of {Moving} {Systems}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:KT62IHINMM6JZ9A/datastreams/REF/content},\n\tabstract = {The quantification of the shutdown dose rate (SDR) caused by photons emitted by activated structural materials is an important and necessary step of the design process of fusion energy systems (FES). FES are purposefully designed with modular components that can be moved out of a facility after shutdown for maintenance. It is particularly important to accurately quantify the SDR during maintenance procedures that may cause facility personnel to be in closer proximity to activated equipment. This type of analysis requires neutron and photon transport calculations coupled by activation analysis to determine the SDR. Due to its ability to obtain highly accurate results, the Monte Carlo (MC) method is often used for both transport operations, but the computational expense of obtaining results with low error in systems with heavy shielding can be prohibitive. However, variance reduction (VR) methods can be used to optimize the computational efficiency by artificially increasing the simulation of events that will contribute to the quantity of interest.\n\nOne hybrid VR technique used to optimize the initial transport step of a multi-step process is known as the Multi-Step Consistent Adjoint Driven Importance Sampling (MS-CADIS) method. The basis of MS-CADIS is that the importance function used in each step of the problem must represent the importance of the particles to the final objective function. As the spatial configuration of the materials changes, the probability that they will contribute to the objective function also changes. In the specific case of SDR analysis, the importance function for the neutron transport step must capture the probability of materials to become activated and subsequently emit photons that will make a significant contribution to the SDR. The Groupwise Transmutation (GT)-CADIS method is an implementation of MS-CADIS that optimizes the neutron transport step of SDR calculations. GT-CADIS generates an adjoint neutron source based on certain assumptions and approximations about the transmutation network. This source is used for adjoint transport and the resulting flux is used to generate the biasing parameters to optimize the forward neutron transport.\n\nFor systems that undergo movement, a new hybrid deterministic/MC VR technique, the Time-integrated (T)GT-CADIS method, that adapts GT-CADIS for dynamic systems by calculating a time-integrated adjoint neutron source was developed. This work demonstrates the tools and workflows necessary to efficiently calculate quantities of interest resulting from coupled, multi-physics processes in dynamic systems.},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {D'Angelo, Chelsea},\n\tmonth = may,\n\tyear = {2019},\n}\n", + "bibtex": "\n@phdthesis{dangelo_variance_2019,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Variance {Reduction} for {Multi}-physics {Analysis} of {Moving} {Systems}},\n\turl = {https://digital.library.wisc.edu/1711.dl/AE5FFYCFQ7WCO8T},\n\tabstract = {The quantification of the shutdown dose rate (SDR) caused by photons emitted by activated structural materials is an important and necessary step of the design process of fusion energy systems (FES). FES are purposefully designed with modular components that can be moved out of a facility after shutdown for maintenance. It is particularly important to accurately quantify the SDR during maintenance procedures that may cause facility personnel to be in closer proximity to activated equipment. This type of analysis requires neutron and photon transport calculations coupled by activation analysis to determine the SDR. Due to its ability to obtain highly accurate results, the Monte Carlo (MC) method is often used for both transport operations, but the computational expense of obtaining results with low error in systems with heavy shielding can be prohibitive. However, variance reduction (VR) methods can be used to optimize the computational efficiency by artificially increasing the simulation of events that will contribute to the quantity of interest.\n\nOne hybrid VR technique used to optimize the initial transport step of a multi-step process is known as the Multi-Step Consistent Adjoint Driven Importance Sampling (MS-CADIS) method. The basis of MS-CADIS is that the importance function used in each step of the problem must represent the importance of the particles to the final objective function. As the spatial configuration of the materials changes, the probability that they will contribute to the objective function also changes. In the specific case of SDR analysis, the importance function for the neutron transport step must capture the probability of materials to become activated and subsequently emit photons that will make a significant contribution to the SDR. The Groupwise Transmutation (GT)-CADIS method is an implementation of MS-CADIS that optimizes the neutron transport step of SDR calculations. GT-CADIS generates an adjoint neutron source based on certain assumptions and approximations about the transmutation network. This source is used for adjoint transport and the resulting flux is used to generate the biasing parameters to optimize the forward neutron transport.\n\nFor systems that undergo movement, a new hybrid deterministic/MC VR technique, the Time-integrated (T)GT-CADIS method, that adapts GT-CADIS for dynamic systems by calculating a time-integrated adjoint neutron source was developed. This work demonstrates the tools and workflows necessary to efficiently calculate quantities of interest resulting from coupled, multi-physics processes in dynamic systems.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {D'Angelo, Chelsea},\n\tmonth = may,\n\tyear = {2019},\n}\n", "data": { "key": "QECSDSE9", - "version": 27074, + "version": 30658, "itemType": "thesis", "title": "Variance Reduction for Multi-physics Analysis of Moving Systems", "creators": [ @@ -533,12 +703,12 @@ "abstractNote": "The quantification of the shutdown dose rate (SDR) caused by photons emitted by activated structural materials is an important and necessary step of the design process of fusion energy systems (FES). FES are purposefully designed with modular components that can be moved out of a facility after shutdown for maintenance. It is particularly important to accurately quantify the SDR during maintenance procedures that may cause facility personnel to be in closer proximity to activated equipment. This type of analysis requires neutron and photon transport calculations coupled by activation analysis to determine the SDR. Due to its ability to obtain highly accurate results, the Monte Carlo (MC) method is often used for both transport operations, but the computational expense of obtaining results with low error in systems with heavy shielding can be prohibitive. However, variance reduction (VR) methods can be used to optimize the computational efficiency by artificially increasing the simulation of events that will contribute to the quantity of interest.\n\nOne hybrid VR technique used to optimize the initial transport step of a multi-step process is known as the Multi-Step Consistent Adjoint Driven Importance Sampling (MS-CADIS) method. The basis of MS-CADIS is that the importance function used in each step of the problem must represent the importance of the particles to the final objective function. As the spatial configuration of the materials changes, the probability that they will contribute to the objective function also changes. In the specific case of SDR analysis, the importance function for the neutron transport step must capture the probability of materials to become activated and subsequently emit photons that will make a significant contribution to the SDR. The Groupwise Transmutation (GT)-CADIS method is an implementation of MS-CADIS that optimizes the neutron transport step of SDR calculations. GT-CADIS generates an adjoint neutron source based on certain assumptions and approximations about the transmutation network. This source is used for adjoint transport and the resulting flux is used to generate the biasing parameters to optimize the forward neutron transport.\n\nFor systems that undergo movement, a new hybrid deterministic/MC VR technique, the Time-integrated (T)GT-CADIS method, that adapts GT-CADIS for dynamic systems by calculating a time-integrated adjoint neutron source was developed. This work demonstrates the tools and workflows necessary to efficiently calculate quantities of interest resulting from coupled, multi-physics processes in dynamic systems.", "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", - "place": "", + "place": "Madison, WI", "date": "05/16/2019", - "numPages": "", - "language": "", + "numPages": "109", + "language": "English", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:KT62IHINMM6JZ9A/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/AE5FFYCFQ7WCO8T", "accessDate": "", "archive": "", "archiveLocation": "", @@ -553,12 +723,12 @@ ], "relations": {}, "dateAdded": "2019-08-12T12:15:40Z", - "dateModified": "2022-10-04T01:48:47Z" + "dateModified": "2024-09-08T16:48:00Z" } }, { "key": "DX9W8A98", - "version": 27075, + "version": 30661, "library": { "type": "group", "id": 10058, @@ -613,10 +783,10 @@ "parsedDate": "2019-03-15", "numChildren": 1 }, - "bibtex": "\n@phdthesis{harb_propagation_2019,\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Propagation of {Statistical} {Uncertainty} in {Mesh}-{Based} {Shutdown} {Dose} {Rate} {Calculations}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:6MDCBYJEASBZ78Z/datastreams/REF/content},\n\tabstract = {In fusion energy systems (FES), high energy neutrons are emitted from the\nplasma source - due to the D-T fusion reaction - enabling them to penetrate deep in the materials surrounding the core. Energy is then deposited along the path of the neutrons due to interactions with nuclides, resulting in - besides nuclear heating - two main effects; radiation damage and transmutation. Radiation damage causes changes in the macroscopic properties of the materials due to microscopic changes that result from interactions of high energy neutrons with nuclides. Transmutation is caused by the absorption of neutrons by nuclides in the medium and almost always results in the production of radioactive nuclides. Such radioactive nuclides are of importance to FES design and operation as they persist after the shutdown of the facility due to their long half lives. Efforts are directed to quantify the shutdown dose rate (SDR) that results from gamma emitting nuclides produced by transmutation. Monte Carlo (MC) methods are favored over deterministic methods for the simulation of particles transport in FES due to complexity of the models and to reduce the uncertainties/errors of the predicted particle flux distributions due to approximations. The rigorous 2-step method (R2S) relies on dedicated activation calculations to predict the photon emission density distribution, and is widely used for SDR quantification. It involves a neutron transport step, activation analysis to obtain the photon emission density, and a photon transport step to calculate the SDR.\nIt is often the case that neutrons suffer attenuation in traversing the medium from the plasma source - due to interactions with nuclides - and that results in a steep gradient in the neutron flux. Variance reduction (VR) tools have been developed with the primary goal of pushing neutrons - simulated particles - to regions of the phase-space that are of importance for the quantities under consideration in order to reduce the uncertainty in the MC results. The recently developed Group- wise Transmutation - Consistent Adjoint Driven Importance Sampling (GT CADIS) method provides a capability to obtain the photon emission density distribution as a function of the energy dependent group-wise neutron flux distribution via linearization of the transmutation operator. Using the photon emission density it is possible to overcome previous difficulties of the error propagation in the R2S workflow. One primary concern with the R2S workflow is that only the contribution of the photon transport step is considered as a measure of the uncertainty of the calculated SDR, while the contribution from the neutron transport step remains undefined. Previous methods have tried to tackle this issue but there was always difficulty in obtaining the correlation of the neutron fluxes and that resulted in implementing either impractical approximations or just calculating the upper and lower bounds of the uncertainty of the SDR.\nIn this document, the R2S workflow has been investigated. First, issues related to the neutron transport step and the uncertainty of the photon emission density have been addressed. Second, a scheme was developed to propagate the statistical uncertainty of the neutron transport step to the SDR. Starting with the neutron transport step, a variation of the main R2S that aimed at increasing the resolution while reducing the computational expenses was found to introduce systematic errors that might undermine the gain in the computational cost. One of the difficulties in propagating the neutron flux uncertainty to the photon emission density is obtaining the correlation values between the neutron fluxes in different energy groups and mesh voxels. By utilizing the GT method, an approximation to the calculation of the correlation coefficients has been investigated building on the fact that using group-wise transmutation the correlation terms needed were greatly reduced. It was discovered that the correlation between the neutron fluxes in different energy groups is a function of the material composition. That facilitated obtaining the needed correlation matrix and quantifying the uncertainty of the photon emission density. A method to propagate the photon source uncertainty to the SDR by random sampling was developed and was demonstrated to be efficient on various types of numerical experiments as well as a production level problem.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Harb, Moataz S.},\n\tmonth = mar,\n\tyear = {2019},\n\tkeywords = {Correlation, Error Propagation, FESS-FNSF, R2S, SDR},\n}\n", + "bibtex": "\n@phdthesis{harb_propagation_2019,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Propagation of {Statistical} {Uncertainty} in {Mesh}-{Based} {Shutdown} {Dose} {Rate} {Calculations}},\n\turl = {https://digital.library.wisc.edu/1711.dl/4VEHRUF6HSYPO8Q},\n\tabstract = {In fusion energy systems (FES), high energy neutrons are emitted from the\nplasma source - due to the D-T fusion reaction - enabling them to penetrate deep in the materials surrounding the core. Energy is then deposited along the path of the neutrons due to interactions with nuclides, resulting in - besides nuclear heating - two main effects; radiation damage and transmutation. Radiation damage causes changes in the macroscopic properties of the materials due to microscopic changes that result from interactions of high energy neutrons with nuclides. Transmutation is caused by the absorption of neutrons by nuclides in the medium and almost always results in the production of radioactive nuclides. Such radioactive nuclides are of importance to FES design and operation as they persist after the shutdown of the facility due to their long half lives. Efforts are directed to quantify the shutdown dose rate (SDR) that results from gamma emitting nuclides produced by transmutation. Monte Carlo (MC) methods are favored over deterministic methods for the simulation of particles transport in FES due to complexity of the models and to reduce the uncertainties/errors of the predicted particle flux distributions due to approximations. The rigorous 2-step method (R2S) relies on dedicated activation calculations to predict the photon emission density distribution, and is widely used for SDR quantification. It involves a neutron transport step, activation analysis to obtain the photon emission density, and a photon transport step to calculate the SDR.\nIt is often the case that neutrons suffer attenuation in traversing the medium from the plasma source - due to interactions with nuclides - and that results in a steep gradient in the neutron flux. Variance reduction (VR) tools have been developed with the primary goal of pushing neutrons - simulated particles - to regions of the phase-space that are of importance for the quantities under consideration in order to reduce the uncertainty in the MC results. The recently developed Group- wise Transmutation - Consistent Adjoint Driven Importance Sampling (GT CADIS) method provides a capability to obtain the photon emission density distribution as a function of the energy dependent group-wise neutron flux distribution via linearization of the transmutation operator. Using the photon emission density it is possible to overcome previous difficulties of the error propagation in the R2S workflow. One primary concern with the R2S workflow is that only the contribution of the photon transport step is considered as a measure of the uncertainty of the calculated SDR, while the contribution from the neutron transport step remains undefined. Previous methods have tried to tackle this issue but there was always difficulty in obtaining the correlation of the neutron fluxes and that resulted in implementing either impractical approximations or just calculating the upper and lower bounds of the uncertainty of the SDR.\nIn this document, the R2S workflow has been investigated. First, issues related to the neutron transport step and the uncertainty of the photon emission density have been addressed. Second, a scheme was developed to propagate the statistical uncertainty of the neutron transport step to the SDR. Starting with the neutron transport step, a variation of the main R2S that aimed at increasing the resolution while reducing the computational expenses was found to introduce systematic errors that might undermine the gain in the computational cost. One of the difficulties in propagating the neutron flux uncertainty to the photon emission density is obtaining the correlation values between the neutron fluxes in different energy groups and mesh voxels. By utilizing the GT method, an approximation to the calculation of the correlation coefficients has been investigated building on the fact that using group-wise transmutation the correlation terms needed were greatly reduced. It was discovered that the correlation between the neutron fluxes in different energy groups is a function of the material composition. That facilitated obtaining the needed correlation matrix and quantifying the uncertainty of the photon emission density. A method to propagate the photon source uncertainty to the SDR by random sampling was developed and was demonstrated to be efficient on various types of numerical experiments as well as a production level problem.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Harb, Moataz S.},\n\tmonth = mar,\n\tyear = {2019},\n\tkeywords = {Correlation, Error Propagation, FESS-FNSF, R2S, SDR},\n}\n", "data": { "key": "DX9W8A98", - "version": 27075, + "version": 30661, "itemType": "thesis", "title": "Propagation of Statistical Uncertainty in Mesh-Based Shutdown Dose Rate Calculations", "creators": [ @@ -629,12 +799,12 @@ "abstractNote": "In fusion energy systems (FES), high energy neutrons are emitted from the\nplasma source - due to the D-T fusion reaction - enabling them to penetrate deep in the materials surrounding the core. Energy is then deposited along the path of the neutrons due to interactions with nuclides, resulting in - besides nuclear heating - two main effects; radiation damage and transmutation. Radiation damage causes changes in the macroscopic properties of the materials due to microscopic changes that result from interactions of high energy neutrons with nuclides. Transmutation is caused by the absorption of neutrons by nuclides in the medium and almost always results in the production of radioactive nuclides. Such radioactive nuclides are of importance to FES design and operation as they persist after the shutdown of the facility due to their long half lives. Efforts are directed to quantify the shutdown dose rate (SDR) that results from gamma emitting nuclides produced by transmutation. Monte Carlo (MC) methods are favored over deterministic methods for the simulation of particles transport in FES due to complexity of the models and to reduce the uncertainties/errors of the predicted particle flux distributions due to approximations. The rigorous 2-step method (R2S) relies on dedicated activation calculations to predict the photon emission density distribution, and is widely used for SDR quantification. It involves a neutron transport step, activation analysis to obtain the photon emission density, and a photon transport step to calculate the SDR.\nIt is often the case that neutrons suffer attenuation in traversing the medium from the plasma source - due to interactions with nuclides - and that results in a steep gradient in the neutron flux. Variance reduction (VR) tools have been developed with the primary goal of pushing neutrons - simulated particles - to regions of the phase-space that are of importance for the quantities under consideration in order to reduce the uncertainty in the MC results. The recently developed Group- wise Transmutation - Consistent Adjoint Driven Importance Sampling (GT CADIS) method provides a capability to obtain the photon emission density distribution as a function of the energy dependent group-wise neutron flux distribution via linearization of the transmutation operator. Using the photon emission density it is possible to overcome previous difficulties of the error propagation in the R2S workflow. One primary concern with the R2S workflow is that only the contribution of the photon transport step is considered as a measure of the uncertainty of the calculated SDR, while the contribution from the neutron transport step remains undefined. Previous methods have tried to tackle this issue but there was always difficulty in obtaining the correlation of the neutron fluxes and that resulted in implementing either impractical approximations or just calculating the upper and lower bounds of the uncertainty of the SDR.\nIn this document, the R2S workflow has been investigated. First, issues related to the neutron transport step and the uncertainty of the photon emission density have been addressed. Second, a scheme was developed to propagate the statistical uncertainty of the neutron transport step to the SDR. Starting with the neutron transport step, a variation of the main R2S that aimed at increasing the resolution while reducing the computational expenses was found to introduce systematic errors that might undermine the gain in the computational cost. One of the difficulties in propagating the neutron flux uncertainty to the photon emission density is obtaining the correlation values between the neutron fluxes in different energy groups and mesh voxels. By utilizing the GT method, an approximation to the calculation of the correlation coefficients has been investigated building on the fact that using group-wise transmutation the correlation terms needed were greatly reduced. It was discovered that the correlation between the neutron fluxes in different energy groups is a function of the material composition. That facilitated obtaining the needed correlation matrix and quantifying the uncertainty of the photon emission density. A method to propagate the photon source uncertainty to the SDR by random sampling was developed and was demonstrated to be efficient on various types of numerical experiments as well as a production level problem.", "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", - "place": "", + "place": "Madison, WI", "date": "03/15/2019", - "numPages": "", + "numPages": "155", "language": "English", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:6MDCBYJEASBZ78Z/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/4VEHRUF6HSYPO8Q", "accessDate": "", "archive": "", "archiveLocation": "", @@ -665,7 +835,7 @@ ], "relations": {}, "dateAdded": "2019-04-25T12:14:36Z", - "dateModified": "2022-10-04T01:56:14Z" + "dateModified": "2024-09-08T16:48:32Z" } }, { @@ -765,7 +935,7 @@ }, { "key": "KV8BMAXT", - "version": 27077, + "version": 30663, "library": { "type": "group", "id": 10058, @@ -820,10 +990,10 @@ "parsedDate": "2018-06-08", "numChildren": 2 }, - "bibtex": "\n@phdthesis{shriwise_geometry_2018,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Geometry {Query} {Optimizations} in {CAD}-{Based} {Tessellations} for {Monte} {Carlo} {Radiation} {Transport}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:NNODVI4XHMSOV8F/datastreams/REF/content},\n\tabstract = {The performance of direct CAD-based Monte Carlo Radiation Transport (MCRT) relies heavily on its ability to return geometric queries robustly via ray tracing methods. Current applications of ray tracing for MCRT are robust given that certain requirements are met [48], but cause simulations to run much longer than native code geometry representations. This work explores alternate geometry query methods aimed at reducing the complexity of these operations as well as algorithmic optimization by adapting recent developments in CPU ray tracing for use in engineering analysis. A preconditioning scheme is presented aimed at avoiding unnecessary ray queries for volumes with high collision densities. A model is also developed to inform the application of the preconditioning data structure based on a post facto analysis. Next, a specialized ray tracing kernel for MCRT is presented. As new ray tracing kernels are developed for real-time, photo-realistic rendering, algorithmic approaches have appeared which are demonstrated to be advantageous when applied in radiation transport. In particular, the application of data parallelism in ray tracing for Monte Carlo is demonstrated - resulting in significant performance improvements. Finally, model features resulting in systematic performance degradation commonly found in CAD models for MCRT are studied. Methods are proposed and demonstrated to improve performance of ray tracing kernels in models with these features. The combination of this work is shown to provide improvement factors ranging from 1.1 to 9.54 in simulation run time without loss of robustness for several production analysis models. The final impact of this work is the alleviation of concern for additional computational time in using CAD geometries for MCRT while maintaining the benefit of reduced human time and effort in model preparation and design.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin - Madison},\n\tauthor = {Shriwise, Patrick},\n\tmonth = jun,\n\tyear = {2018},\n\tkeywords = {CAD, dagmc, mesh, monte carlo, performance, ray tracing},\n}\n", + "bibtex": "\n@phdthesis{shriwise_geometry_2018,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Geometry {Query} {Optimizations} in {CAD}-{Based} {Tessellations} for {Monte} {Carlo} {Radiation} {Transport}},\n\turl = {https://digital.library.wisc.edu/1711.dl/ADF7ZBX54SP4A8L},\n\tabstract = {The performance of direct CAD-based Monte Carlo Radiation Transport (MCRT) relies heavily on its ability to return geometric queries robustly via ray tracing methods. Current applications of ray tracing for MCRT are robust given that certain requirements are met [48], but cause simulations to run much longer than native code geometry representations. This work explores alternate geometry query methods aimed at reducing the complexity of these operations as well as algorithmic optimization by adapting recent developments in CPU ray tracing for use in engineering analysis. A preconditioning scheme is presented aimed at avoiding unnecessary ray queries for volumes with high collision densities. A model is also developed to inform the application of the preconditioning data structure based on a post facto analysis. Next, a specialized ray tracing kernel for MCRT is presented. As new ray tracing kernels are developed for real-time, photo-realistic rendering, algorithmic approaches have appeared which are demonstrated to be advantageous when applied in radiation transport. In particular, the application of data parallelism in ray tracing for Monte Carlo is demonstrated - resulting in significant performance improvements. Finally, model features resulting in systematic performance degradation commonly found in CAD models for MCRT are studied. Methods are proposed and demonstrated to improve performance of ray tracing kernels in models with these features. The combination of this work is shown to provide improvement factors ranging from 1.1 to 9.54 in simulation run time without loss of robustness for several production analysis models. The final impact of this work is the alleviation of concern for additional computational time in using CAD geometries for MCRT while maintaining the benefit of reduced human time and effort in model preparation and design.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Shriwise, Patrick},\n\tmonth = jun,\n\tyear = {2018},\n\tkeywords = {CAD, dagmc, mesh, monte carlo, performance, ray tracing},\n}\n", "data": { "key": "KV8BMAXT", - "version": 27077, + "version": 30663, "itemType": "thesis", "title": "Geometry Query Optimizations in CAD-Based Tessellations for Monte Carlo Radiation Transport", "creators": [ @@ -835,13 +1005,13 @@ ], "abstractNote": "The performance of direct CAD-based Monte Carlo Radiation Transport (MCRT) relies heavily on its ability to return geometric queries robustly via ray tracing methods. Current applications of ray tracing for MCRT are robust given that certain requirements are met [48], but cause simulations to run much longer than native code geometry representations. This work explores alternate geometry query methods aimed at reducing the complexity of these operations as well as algorithmic optimization by adapting recent developments in CPU ray tracing for use in engineering analysis. A preconditioning scheme is presented aimed at avoiding unnecessary ray queries for volumes with high collision densities. A model is also developed to inform the application of the preconditioning data structure based on a post facto analysis. Next, a specialized ray tracing kernel for MCRT is presented. As new ray tracing kernels are developed for real-time, photo-realistic rendering, algorithmic approaches have appeared which are demonstrated to be advantageous when applied in radiation transport. In particular, the application of data parallelism in ray tracing for Monte Carlo is demonstrated - resulting in significant performance improvements. Finally, model features resulting in systematic performance degradation commonly found in CAD models for MCRT are studied. Methods are proposed and demonstrated to improve performance of ray tracing kernels in models with these features. The combination of this work is shown to provide improvement factors ranging from 1.1 to 9.54 in simulation run time without loss of robustness for several production analysis models. The final impact of this work is the alleviation of concern for additional computational time in using CAD geometries for MCRT while maintaining the benefit of reduced human time and effort in model preparation and design.", "thesisType": "PhD Nuclear Engineering and Engineering Physics", - "university": "University of Wisconsin - Madison", + "university": "University of Wisconsin-Madison", "place": "Madison, WI", "date": "June 8th, 2018", "numPages": "223", "language": "English", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:NNODVI4XHMSOV8F/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/ADF7ZBX54SP4A8L", "accessDate": "", "archive": "", "archiveLocation": "", @@ -875,12 +1045,12 @@ ], "relations": {}, "dateAdded": "2018-12-21T19:15:11Z", - "dateModified": "2022-10-04T01:58:18Z" + "dateModified": "2024-09-08T16:49:04Z" } }, { "key": "6VTPU3C5", - "version": 27078, + "version": 30784, "library": { "type": "group", "id": 10058, @@ -935,10 +1105,10 @@ "parsedDate": "2016-07-21", "numChildren": 2 }, - "bibtex": "\n@phdthesis{biondo_hybrid_2016,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Hybrid {Monte} {Carlo}/{Deterministic} {Neutron} {Transport} for {Shutdown} {Dose} {Rate} {Analysis}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:ITANHEGGRPM338Z/datastreams/REF/content},\n\tabstract = {In fusion energy systems (FES) neutrons are born from a burning plasma and subsequently activate surrounding system components. The photon dose rate after shutdown from the resultant radionuclides must be quantified for maintenance planning. This shutdown dose rate (SDR) is calculated by coupling neutron transport, activation analysis, and photon transport. The size, complexity, and attenuating configuration of FES motivate the use of hybrid Monte Carlo (MC)/deterministic neutron transport. The Multi-Step Consistent Adjoint Driven Importance Sampling (MS-CADIS) method can be used to optimize MC neutron transport for this purpose. This requires the formulation of an adjoint neutron source that approximates the transmutation process. In this work one such formulation is introduced which is valid when a specific set of transmutation criteria are met, referred to as the Single Neutron Interaction and Low Burnup (SNILB) criteria. These criteria are quantitatively evaluated for typical FES scenarios and are shown to be met within a reasonable margin. Groupwise Transmutation (GT)-CADIS, proposed here, is an implementation of MS-CADIS that calculates this adjoint neutron source using a series of irradiation calculations. For a simple SDR problem, GT-CADIS provides speedups of 200 ± 100 relative to global variance reduction with the Forward Weighted (FW)-CADIS method and 90,000 ± 50,000 relative to analog. When the SNILB criteria are egregiously violated, GT-CADIS modifications are proposed and are shown to provide significant performance improvements. Finally, GT-CADIS is applied to a production-level problem involving a Spherical Tokamak Fusion Nuclear Science Facility (ST-FNSF) device. This work shows that GT-CADIS is broadly applicable to FES scenarios and will significantly reduce the computational resources necessary for SDR analysis.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Biondo, Elliott Dean},\n\tmonth = jul,\n\tyear = {2016},\n}\n", + "bibtex": "\n@phdthesis{biondo_hybrid_2016,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Hybrid {Monte} {Carlo}/{Deterministic} {Neutron} {Transport} for {Shutdown} {Dose} {Rate} {Analysis}},\n\turl = {https://digital.library.wisc.edu/1711.dl/5DMMVSMDW5LUU9A},\n\tabstract = {In fusion energy systems (FES) neutrons are born from a burning plasma and subsequently activate surrounding system components. The photon dose rate after shutdown from the resultant radionuclides must be quantified for maintenance planning. This shutdown dose rate (SDR) is calculated by coupling neutron transport, activation analysis, and photon transport. The size, complexity, and attenuating configuration of FES motivate the use of hybrid Monte Carlo (MC)/deterministic neutron transport. The Multi-Step Consistent Adjoint Driven Importance Sampling (MS-CADIS) method can be used to optimize MC neutron transport for this purpose. This requires the formulation of an adjoint neutron source that approximates the transmutation process. In this work one such formulation is introduced which is valid when a specific set of transmutation criteria are met, referred to as the Single Neutron Interaction and Low Burnup (SNILB) criteria. These criteria are quantitatively evaluated for typical FES scenarios and are shown to be met within a reasonable margin. Groupwise Transmutation (GT)-CADIS, proposed here, is an implementation of MS-CADIS that calculates this adjoint neutron source using a series of irradiation calculations. For a simple SDR problem, GT-CADIS provides speedups of 200 ± 100 relative to global variance reduction with the Forward Weighted (FW)-CADIS method and 90,000 ± 50,000 relative to analog. When the SNILB criteria are egregiously violated, GT-CADIS modifications are proposed and are shown to provide significant performance improvements. Finally, GT-CADIS is applied to a production-level problem involving a Spherical Tokamak Fusion Nuclear Science Facility (ST-FNSF) device. This work shows that GT-CADIS is broadly applicable to FES scenarios and will significantly reduce the computational resources necessary for SDR analysis.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Biondo, Elliott Dean},\n\tmonth = jul,\n\tyear = {2016},\n\tkeywords = {CNERG:HK20 Final Report, product},\n}\n", "data": { "key": "6VTPU3C5", - "version": 27078, + "version": 30784, "itemType": "thesis", "title": "Hybrid Monte Carlo/Deterministic Neutron Transport for Shutdown Dose Rate Analysis", "creators": [ @@ -956,30 +1126,34 @@ "numPages": "179", "language": "English", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:ITANHEGGRPM338Z/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/5DMMVSMDW5LUU9A", "accessDate": "", - "archive": "ProQuest Dissertations and Theses", - "archiveLocation": "10145410", + "archive": "", + "archiveLocation": "", "libraryCatalog": "", "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + }, + { + "tag": "product" + } + ], "collections": [ "6259B6TV", - "4MDZ29N8", - "H442QZRN", - "CMA2SK5V", "34I86HPD" ], "relations": {}, "dateAdded": "2016-08-20T14:20:24Z", - "dateModified": "2022-10-04T01:58:50Z" + "dateModified": "2024-09-08T17:38:42Z" } }, { "key": "38F2U7DS", - "version": 28781, + "version": 30665, "library": { "type": "group", "id": 10058, @@ -1019,24 +1193,13 @@ } } }, - "lastModifiedByUser": { - "id": 2259868, - "username": "micah.gale", - "name": "", - "links": { - "alternate": { - "href": "https://www.zotero.org/micah.gale", - "type": "text/html" - } - } - }, "creatorSummary": "Carlsen", "numChildren": 1 }, - "bibtex": "\n@phdthesis{carlsen_advanced_2016,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Advanced {Nuclear} {Fuel} {Cycle} {Transitions}: {Optimization}, {Modeling} {Choices}, and {Disruptions}},\n\turl = {https://search.library.wisc.edu/digital/ARXV7VRVTZ2BCW8I},\n\tabstract = {Nuclear fuel cycle analysis is a field focused on understanding and modeling the nuclear industry and ecosystem at a macroscopic level. To date, fuel cycle analysis has mostly involved hand-crafting details of fuel cycle scenarios for investigation. Many different tools have evolved over time to help address the need to investigate both the equilibrium properties of nuclear fuel cycles and the dynamics of transitions between them. There is great potential for computational resources to improve both the quality of answers and the size of questions that can be asked. Cyclus is one of the first nuclear fuel cycle simulators to strongly accommodate larger-scale analysis with its free availability, liberal open-source licensing, and first-class Linux support. Cyclus also provides features that uniquely enable investigating the effects of modeling choices and modeling fidelity within fuel cycle scenarios. This is made possible by the complementary nature of Cyclus’ dynamic resource exchange and plugin based architecture. This work is divided into three major pieces focusing on optimization, investigating effects of modeling choices, and dealing with uncertainty.\n\nEffective optimization techniques are developed for automatically determining desirable facility deployment schedules for fuel cycle scenarios with Cyclus. A novel method for mapping optimization variables to deployment schedules is developed. This method allows relationships between reactor types and power capacity constraints to be represented implicitly in the definition of the optimization variables. This not only enables optimizers without constraint support to be used, but it also prevents wasting computational resources searching through many infeasible deployment schedules. With the simplified constraint handling, optimization can be used to analyze larger problems in addition to providing better solutions generally. The developed methodology also enables the deployed power generation capacity over time and the deployment of non-reactor support facilities to be included as optimization variables.\n\nThere exist many fuel cycle simulators built with many different combinations of mod\n\nix eling choices and assumptions. This makes comparing results from them difficult. The flexibility of Cyclus makes it a rich playground for comparing the effects of such modeling choices in a consistent way. Effects such as reactor refueling cycle synchronization, inter-facility competition, on-hand inventory requirements, and others are compared in four fuel cycle scenarios each using combinations of fleet or individually modeled reactors with 1-month or 3-month long time steps. There are noticeable differences in results from the different cases. The largest differences are seen during periods of constrained fuel availability for reactors. Research into the effects of modeling choices such as these can help improve the quality and consistency of fuel cycle analysis codes in addition to increasing confidence in the utility of fuel cycle analysis generally.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Carlsen, Robert W.},\n\tmonth = mar,\n\tyear = {2016},\n}\n", + "bibtex": "\n@phdthesis{carlsen_advanced_2016,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Advanced {Nuclear} {Fuel} {Cycle} {Transitions}: {Optimization}, {Modeling} {Choices}, and {Disruptions}},\n\turl = {https://digital.library.wisc.edu/1711.dl/RXV7VRVTZ2BCW8I},\n\tabstract = {Nuclear fuel cycle analysis is a field focused on understanding and modeling the nuclear industry and ecosystem at a macroscopic level. To date, fuel cycle analysis has mostly involved hand-crafting details of fuel cycle scenarios for investigation. Many different tools have evolved over time to help address the need to investigate both the equilibrium properties of nuclear fuel cycles and the dynamics of transitions between them. There is great potential for computational resources to improve both the quality of answers and the size of questions that can be asked. Cyclus is one of the first nuclear fuel cycle simulators to strongly accommodate larger-scale analysis with its free availability, liberal open-source licensing, and first-class Linux support. Cyclus also provides features that uniquely enable investigating the effects of modeling choices and modeling fidelity within fuel cycle scenarios. This is made possible by the complementary nature of Cyclus’ dynamic resource exchange and plugin based architecture. This work is divided into three major pieces focusing on optimization, investigating effects of modeling choices, and dealing with uncertainty.\n\nEffective optimization techniques are developed for automatically determining desirable facility deployment schedules for fuel cycle scenarios with Cyclus. A novel method for mapping optimization variables to deployment schedules is developed. This method allows relationships between reactor types and power capacity constraints to be represented implicitly in the definition of the optimization variables. This not only enables optimizers without constraint support to be used, but it also prevents wasting computational resources searching through many infeasible deployment schedules. With the simplified constraint handling, optimization can be used to analyze larger problems in addition to providing better solutions generally. The developed methodology also enables the deployed power generation capacity over time and the deployment of non-reactor support facilities to be included as optimization variables.\n\nThere exist many fuel cycle simulators built with many different combinations of mod\n\nix eling choices and assumptions. This makes comparing results from them difficult. The flexibility of Cyclus makes it a rich playground for comparing the effects of such modeling choices in a consistent way. Effects such as reactor refueling cycle synchronization, inter-facility competition, on-hand inventory requirements, and others are compared in four fuel cycle scenarios each using combinations of fleet or individually modeled reactors with 1-month or 3-month long time steps. There are noticeable differences in results from the different cases. The largest differences are seen during periods of constrained fuel availability for reactors. Research into the effects of modeling choices such as these can help improve the quality and consistency of fuel cycle analysis codes in addition to increasing confidence in the utility of fuel cycle analysis generally.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Carlsen, Robert W.},\n\tmonth = mar,\n\tyear = {2016},\n}\n", "data": { "key": "38F2U7DS", - "version": 28781, + "version": 30665, "itemType": "thesis", "title": "Advanced Nuclear Fuel Cycle Transitions: Optimization, Modeling Choices, and Disruptions", "creators": [ @@ -1054,7 +1217,7 @@ "numPages": "135", "language": "English", "shortTitle": "", - "url": "https://search.library.wisc.edu/digital/ARXV7VRVTZ2BCW8I", + "url": "https://digital.library.wisc.edu/1711.dl/RXV7VRVTZ2BCW8I", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1071,12 +1234,12 @@ "dc:replaces": "http://zotero.org/groups/10058/items/KKSPE7A5" }, "dateAdded": "2016-06-08T19:08:08Z", - "dateModified": "2023-09-10T19:22:06Z" + "dateModified": "2024-09-08T16:50:06Z" } }, { "key": "SCJ8W3Z9", - "version": 27404, + "version": 30668, "library": { "type": "group", "id": 10058, @@ -1131,10 +1294,10 @@ "parsedDate": "2015-03", "numChildren": 1 }, - "bibtex": "\n@phdthesis{gidden_agent-based_2015,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {An {Agent}-{Based} {Modeling} {Framework} and {Application} for the {Generic} {Nuclear} {Fuel} {Cycle}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:ZAPAY7G76EAKB9E/datastreams/REF/content},\n\tabstract = {Key components of a novel methodology and implementation of an agent-based, dynamic nuclear fuel\ncycle simulator,\nCyclus\n, are presented. The nuclear fuel cycle is a complex, physics-dependent supply\nchain. To date, existing dynamic simulators have not treated constrained fuel supply, time-dependent,\nisotopic-quality based demand, or fuel fungibility particularly well. Utilizing an agent-based methodology\nthat incorporates sophisticated graph theory and operations research techniques can overcome these\ndeficiencies. This work describes a simulation kernel and agents that interact with it, highlighting the\nDynamic Resource Exchange (DRE), the supply-demand framework at the heart of the kernel. The key\nagent-DRE interaction mechanisms are described, which enable complex entity interaction through the\nuse of physics and socio-economic models. The translation of an exchange instance to a variant of the\nMulticommodity Transportation Problem, which can be solved feasibly or optimally, follows. An extensive\ninvestigation of solution performance and fidelity is then presented. Finally, recommendations for future\nusers of\nCyclus\nand the DRE are provided.},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Gidden, Matthew J.},\n\tmonth = mar,\n\tyear = {2015},\n}\n", + "bibtex": "\n@phdthesis{gidden_agent-based_2015,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {An {Agent}-{Based} {Modeling} {Framework} and {Application} for the {Generic} {Nuclear} {Fuel} {Cycle}},\n\turl = {https://digital.library.wisc.edu/1711.dl/NU3IIKST3DRBX8U},\n\tabstract = {Key components of a novel methodology and implementation of an agent-based, dynamic nuclear fuel\ncycle simulator,\nCyclus\n, are presented. The nuclear fuel cycle is a complex, physics-dependent supply\nchain. To date, existing dynamic simulators have not treated constrained fuel supply, time-dependent,\nisotopic-quality based demand, or fuel fungibility particularly well. Utilizing an agent-based methodology\nthat incorporates sophisticated graph theory and operations research techniques can overcome these\ndeficiencies. This work describes a simulation kernel and agents that interact with it, highlighting the\nDynamic Resource Exchange (DRE), the supply-demand framework at the heart of the kernel. The key\nagent-DRE interaction mechanisms are described, which enable complex entity interaction through the\nuse of physics and socio-economic models. The translation of an exchange instance to a variant of the\nMulticommodity Transportation Problem, which can be solved feasibly or optimally, follows. An extensive\ninvestigation of solution performance and fidelity is then presented. Finally, recommendations for future\nusers of\nCyclus\nand the DRE are provided.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Gidden, Matthew J.},\n\tmonth = mar,\n\tyear = {2015},\n}\n", "data": { "key": "SCJ8W3Z9", - "version": 27404, + "version": 30668, "itemType": "thesis", "title": "An Agent-Based Modeling Framework and Application for the Generic Nuclear Fuel Cycle", "creators": [ @@ -1150,9 +1313,9 @@ "place": "Madison, WI, United States", "date": "March 2015", "numPages": "186", - "language": "", + "language": "English", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:ZAPAY7G76EAKB9E/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/NU3IIKST3DRBX8U", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1167,12 +1330,12 @@ ], "relations": {}, "dateAdded": "2015-04-19T21:14:21Z", - "dateModified": "2022-12-07T23:30:54Z" + "dateModified": "2024-09-08T16:50:28Z" } }, { "key": "M8VGJZXN", - "version": 27405, + "version": 30669, "library": { "type": "group", "id": 10058, @@ -1226,10 +1389,10 @@ "creatorSummary": "Dunn", "numChildren": 1 }, - "bibtex": "\n@phdthesis{dunn_monte_2014,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Monte {Carlo} {Mesh} {Tallies} based on a {Kernel} {Density} {Estimator} {Approach}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:OXDMBPODZJERF8A/datastreams/REF/content},\n\tabstract = {Kernel density estimators (KDE) are considered for use with the Monte Carlo transport method as an alternative to conventional methods for solving fixed-source problems on arbitrary 3D input meshes. Since conventional methods produce a piecewise constant approximation, their accuracy can suffer when using coarse meshes to approximate neutron flux distributions with strong gradients. Comparatively, KDE mesh tallies produce point estimates independently of the mesh structure, which means that their values will not change even if the mesh is refined.\n\nA new KDE integral-track estimator is introduced in this dissertation for use with mesh tallies. Two input parameters are needed, namely a bandwidth and kernel. The bandwidth is equivalent to choosing mesh cell size, whereas the kernel determines the weight of each contribution with respect to its distance from the calculation point being evaluated. The KDE integral-track estimator is shown to produce more accurate results than the original KDE track length estimator, with no performance penalty, and identical or comparable results to conventional methods. However, unlike conventional methods, KDE mesh tallies can use different bandwidths and kernels to improve accuracy without changing the input mesh.\n\nThis dissertation also explores the accuracy and efficiency of the KDE integral-track mesh tally in detail. Like other KDE applications, accuracy is highly dependent on the choice of bandwidth. This choice becomes even more important when approximating regions of the neutron flux distribution with high curvature, where changing the bandwidth is much more sensitive. Other factors that affect accuracy include properties of the kernel, and the boundary bias effect for calculation points near external geometrical boundaries. Numerous factors also affect efficiency, with the most significant being the concept of the neighborhood region. The neighborhood region determines how many calculation points are expected to add non-trivial contributions, which depends on node density, bandwidth, kernel, and properties of the track being tallied.\n\nThe KDE integral-track mesh tally is a promising alternative for solving fixed-source problems on arbitrary 3D input meshes. Producing results at specific points rather than cell-averaged values allows a more accurate representation of the neutron flux distribution to be obtained, especially when coarser meshes are used.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Dunn, K. L.},\n\tmonth = aug,\n\tyear = {2014},\n}\n", + "bibtex": "\n@phdthesis{dunn_monte_2014,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Monte {Carlo} {Mesh} {Tallies} based on a {Kernel} {Density} {Estimator} {Approach}},\n\turl = {https://digital.library.wisc.edu/1711.dl/RNSOJ4VA35HT78P},\n\tabstract = {Kernel density estimators (KDE) are considered for use with the Monte Carlo transport method as an alternative to conventional methods for solving fixed-source problems on arbitrary 3D input meshes. Since conventional methods produce a piecewise constant approximation, their accuracy can suffer when using coarse meshes to approximate neutron flux distributions with strong gradients. Comparatively, KDE mesh tallies produce point estimates independently of the mesh structure, which means that their values will not change even if the mesh is refined.\n\nA new KDE integral-track estimator is introduced in this dissertation for use with mesh tallies. Two input parameters are needed, namely a bandwidth and kernel. The bandwidth is equivalent to choosing mesh cell size, whereas the kernel determines the weight of each contribution with respect to its distance from the calculation point being evaluated. The KDE integral-track estimator is shown to produce more accurate results than the original KDE track length estimator, with no performance penalty, and identical or comparable results to conventional methods. However, unlike conventional methods, KDE mesh tallies can use different bandwidths and kernels to improve accuracy without changing the input mesh.\n\nThis dissertation also explores the accuracy and efficiency of the KDE integral-track mesh tally in detail. Like other KDE applications, accuracy is highly dependent on the choice of bandwidth. This choice becomes even more important when approximating regions of the neutron flux distribution with high curvature, where changing the bandwidth is much more sensitive. Other factors that affect accuracy include properties of the kernel, and the boundary bias effect for calculation points near external geometrical boundaries. Numerous factors also affect efficiency, with the most significant being the concept of the neighborhood region. The neighborhood region determines how many calculation points are expected to add non-trivial contributions, which depends on node density, bandwidth, kernel, and properties of the track being tallied.\n\nThe KDE integral-track mesh tally is a promising alternative for solving fixed-source problems on arbitrary 3D input meshes. Producing results at specific points rather than cell-averaged values allows a more accurate representation of the neutron flux distribution to be obtained, especially when coarser meshes are used.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Dunn, K. L.},\n\tmonth = aug,\n\tyear = {2014},\n}\n", "data": { "key": "M8VGJZXN", - "version": 27405, + "version": 30669, "itemType": "thesis", "title": "Monte Carlo Mesh Tallies based on a Kernel Density Estimator Approach", "creators": [ @@ -1247,7 +1410,7 @@ "numPages": "201", "language": "English", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:OXDMBPODZJERF8A/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/RNSOJ4VA35HT78P", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1262,12 +1425,12 @@ ], "relations": {}, "dateAdded": "2014-10-28T15:51:39Z", - "dateModified": "2022-12-08T20:07:39Z" + "dateModified": "2024-09-08T16:50:49Z" } }, { "key": "2ATC4MDN", - "version": 27083, + "version": 30670, "library": { "type": "group", "id": 10058, @@ -1311,10 +1474,10 @@ "parsedDate": "2013-08-16", "numChildren": 2 }, - "bibtex": "\n@phdthesis{huff_integrated_2013,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {An {Integrated} {Used} {Fuel} {Disposition} and {Generic} {Repository} {Model} for {Fuel} {Cycle} {Analysis}},\n\turl = {https://depot.library.wisc.edu/repository/fedora/1711.dl:Y2ZY2ZRN6GI5K8S/datastreams/REF/content},\n\tabstract = {As the United States and other nuclear nations consider alternative fuel cycles and waste disposal options simultaneously, an integrated fuel cycle and generic disposal system analysis tool grows increasingly necessary for informing spent nuclear fuel management policy. The long term performance characteristics of deep geologic disposal concepts are affected by heat and radionuclide release characteristics sensitive to disposal system choices as well as variable spent fuel compositions associated with alternative fuel cycles. Computational tools capable of simulating the dynamic, heterogeneous spent fuel isotopics resulting from alternative nuclear fuel cycles and fuel cycle transition scenarios are, however, lacking in disposal system modeling options. This work has resulted in Cyder , a generic repository software library appropriate for system analysis of potential future fuel cycle deployment scenarios. By emphasizing modularity and speed, Cyder is capable of representing the dominant physics of candidate geologic host media, repository designs, and engineering components. Robust and flexible integration with the Cyclus fuel cycle simulator enables this analysis in the context of fuel cycle options.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Huff, Kathryn D.},\n\tmonth = aug,\n\tyear = {2013},\n}\n", + "bibtex": "\n@phdthesis{huff_integrated_2013,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {An {Integrated} {Used} {Fuel} {Disposition} and {Generic} {Repository} {Model} for {Fuel} {Cycle} {Analysis}},\n\turl = {https://digital.library.wisc.edu/1711.dl/PPEWADM4OG5ST9B},\n\tabstract = {As the United States and other nuclear nations consider alternative fuel cycles and waste disposal options simultaneously, an integrated fuel cycle and generic disposal system analysis tool grows increasingly necessary for informing spent nuclear fuel management policy. The long term performance characteristics of deep geologic disposal concepts are affected by heat and radionuclide release characteristics sensitive to disposal system choices as well as variable spent fuel compositions associated with alternative fuel cycles. Computational tools capable of simulating the dynamic, heterogeneous spent fuel isotopics resulting from alternative nuclear fuel cycles and fuel cycle transition scenarios are, however, lacking in disposal system modeling options. This work has resulted in Cyder , a generic repository software library appropriate for system analysis of potential future fuel cycle deployment scenarios. By emphasizing modularity and speed, Cyder is capable of representing the dominant physics of candidate geologic host media, repository designs, and engineering components. Robust and flexible integration with the Cyclus fuel cycle simulator enables this analysis in the context of fuel cycle options.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Huff, Kathryn D.},\n\tmonth = aug,\n\tyear = {2013},\n}\n", "data": { "key": "2ATC4MDN", - "version": 27083, + "version": 30670, "itemType": "thesis", "title": "An Integrated Used Fuel Disposition and Generic Repository Model for Fuel Cycle Analysis", "creators": [ @@ -1332,7 +1495,7 @@ "numPages": "222", "language": "English", "shortTitle": "", - "url": "https://depot.library.wisc.edu/repository/fedora/1711.dl:Y2ZY2ZRN6GI5K8S/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/PPEWADM4OG5ST9B", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1347,7 +1510,7 @@ ], "relations": {}, "dateAdded": "2016-02-04T14:51:49Z", - "dateModified": "2022-10-04T02:02:55Z" + "dateModified": "2024-09-08T16:51:05Z" } }, { @@ -1437,7 +1600,7 @@ }, { "key": "GN6PK84Z", - "version": 26427, + "version": 30781, "library": { "type": "group", "id": 10058, @@ -1481,10 +1644,10 @@ "parsedDate": "2013-07", "numChildren": 2 }, - "bibtex": "\n@phdthesis{relson_improved_2013,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Improved {Methods} {For} {Sampling} {Mesh}-{Based} {Volumetric} {Sources} {In} {Monte} {Carlo} {Transport}},\n\tabstract = {This research focuses on developing mesh-based techniques for sampling distributed,\n volumetric sources in Monte Carlo particle transport codes, such as MCNP. This work\n culminated in several source sampling techniques being implemented within a 3-D neutron\n activation workflow.\n\nThe most significant development is the implementation of an efficient voxel sampling\n technique. Voxel sampling can be applied to source meshes with any number of mesh\n elements thanks to efficient sampling via the alias method, and meshing of non-source volumes\n can be avoided. Voxel sampling in turn enables straight-forward implementation of source\n biasing for variance reduction, and also the use of unstructured source meshes using\n tetrahedral mesh elements. The uniform sampling technique used in past work is effectively a\n biasing scheme, and thus can be implemented more efficiently with biased voxel sampling.\n\nFor this work, the source meshes are inherited from neutron mesh tallies. Cartesian\n structured meshes, which provide straight-forward compatibility with legacy tools can be\n sampled with either the voxel or uniform sampling methods. Alternately, using an unstructured\n mesh (via the unstructured mesh tally capabilities in DAG-MCNP) allows for better conforming\n meshes – particularly with geometries that do not align well with a structured mesh, or where\n the source region is spread out through a region of non-source materials, such as systems of\n pipes.\n\nThe set of source sampling techniques is useful as a toolkit for obtaining quality answers\n from a variety of scenarios. This thesis supplements methods development and\n implementation with experiments to identify and understand which sampling techniques\n should be used in different scenarios. The new sampling methods and workflows are shown to\n be in good agreement with results from older methods. While there remain several aspects of\n the new methods’ behavior to characterize, voxel sampling and its derivatives have fully\n replaced older sampling methods in neutron activation analysis work at UW-Madison.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Relson, Eric},\n\tmonth = jul,\n\tyear = {2013},\n}\n", + "bibtex": "\n@phdthesis{relson_improved_2013,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Improved {Methods} {For} {Sampling} {Mesh}-{Based} {Volumetric} {Sources} {In} {Monte} {Carlo} {Transport}},\n\tabstract = {This research focuses on developing mesh-based techniques for sampling distributed,\n volumetric sources in Monte Carlo particle transport codes, such as MCNP. This work\n culminated in several source sampling techniques being implemented within a 3-D neutron\n activation workflow.\n\nThe most significant development is the implementation of an efficient voxel sampling\n technique. Voxel sampling can be applied to source meshes with any number of mesh\n elements thanks to efficient sampling via the alias method, and meshing of non-source volumes\n can be avoided. Voxel sampling in turn enables straight-forward implementation of source\n biasing for variance reduction, and also the use of unstructured source meshes using\n tetrahedral mesh elements. The uniform sampling technique used in past work is effectively a\n biasing scheme, and thus can be implemented more efficiently with biased voxel sampling.\n\nFor this work, the source meshes are inherited from neutron mesh tallies. Cartesian\n structured meshes, which provide straight-forward compatibility with legacy tools can be\n sampled with either the voxel or uniform sampling methods. Alternately, using an unstructured\n mesh (via the unstructured mesh tally capabilities in DAG-MCNP) allows for better conforming\n meshes – particularly with geometries that do not align well with a structured mesh, or where\n the source region is spread out through a region of non-source materials, such as systems of\n pipes.\n\nThe set of source sampling techniques is useful as a toolkit for obtaining quality answers\n from a variety of scenarios. This thesis supplements methods development and\n implementation with experiments to identify and understand which sampling techniques\n should be used in different scenarios. The new sampling methods and workflows are shown to\n be in good agreement with results from older methods. While there remain several aspects of\n the new methods’ behavior to characterize, voxel sampling and its derivatives have fully\n replaced older sampling methods in neutron activation analysis work at UW-Madison.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Relson, Eric},\n\tmonth = jul,\n\tyear = {2013},\n\tkeywords = {CNERG:HK20 Final Report},\n}\n", "data": { "key": "GN6PK84Z", - "version": 26427, + "version": 30781, "itemType": "thesis", "title": "Improved Methods For Sampling Mesh-Based Volumetric Sources In Monte Carlo Transport", "creators": [ @@ -1510,20 +1673,23 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + } + ], "collections": [ "6259B6TV", - "4MDZ29N8", "Y4UI9B4X" ], "relations": {}, "dateAdded": "2013-11-10T16:12:41Z", - "dateModified": "2013-11-10T16:15:41Z" + "dateModified": "2024-09-08T17:38:42Z" } }, { "key": "4BD4NW6X", - "version": 26427, + "version": 30782, "library": { "type": "group", "id": 10058, @@ -1567,10 +1733,10 @@ "parsedDate": "2012-06", "numChildren": 2 }, - "bibtex": "\n@phdthesis{ibrahim_automatic_2012,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Automatic {Mesh} {Adaptivity} for {Hybrid} {Monte} {Carlo}/{Deterministic} {Neutronics} {Modeling} of {Difficult} {Shielding} {Problems}},\n\turl = {http://depot.library.wisc.edu/repository/fedora/1711.dl:GFPDJ3G2URTCL9D/datastreams/REF/content},\n\tabstract = {Over the last decade, the role of neutronics modeling has been shifting from analysis of\neach component separately to high fidelity, full-scale analysis of the nuclear systems entire\ndomains. The high accuracy, associated with minimizing modeling approximations and including\nmore physical and geometric details, is now feasible because of advancements in computing\nhardware and development of efficient modeling methods. The hybrid Monte Carlo/deterministic\ntechniques, CADIS and FW-CADIS dramatically increase the efficiency of neutronics modeling,\nbut their use in the design of large and geometrically complex nuclear systems is restricted by the\navailability of computing resources for their preliminarily deterministic calculations and the\nlarge computer memory requirements of their final Monte Carlo calculations.\nTo reduce the computational time and memory requirements of the hybrid Monte\nCarlo/deterministic techniques while maintaining their efficiency improvements, three automatic\nmesh adaptivity algorithms were developed and added to the Oak Ridge National Laboratory\nAutomateD VAriaNce reducTion Generator (ADVANTG) code. First, a mixed-material\napproach, which we refer to as the macromaterial approach, enhances the fidelity of the\ndeterministic models without having to refine the mesh of the deterministic calculations. Second,\na deterministic mesh refinement algorithm improves the accuracy of structured mesh\ndeterministic calculations by capturing as much geometric detail as possible without exceeding\nthe total number of mesh elements that is usually determined by the availability of computing\nresources. Finally, a weight window coarsening algorithm decouples the weight window mesh\nfrom the mesh of the deterministic calculations to remove the memory constraint of the weight\nwindow map from the deterministic mesh resolution.\nii\nTo analyze the combined effect of the three algorithms developed in this thesis, they were\nused to calculate the prompt dose rate throughout the entire ITER experimental facility. This\ncalculation represents a very challenging shielding problem because of the immense size and\ncomplexity of the ITER structure and the presence of a two meter thick biological shield.\nCompared to a FW-CADIS calculation with the same storage size of the variance reduction\nparameters, the use of the three algorithms resulted in a 23.3\\% increase in the regions where the\ndose rate results are achieved in a 10 day Monte Carlo calculation and increased the efficiency of\nthe Monte Carlo simulation by a factor of 3.4. Because of this significant increase in the Monte\nCarlo efficiency which was not accompanied by an increase in the memory requirements, the use\nof the three algorithms in FW-CADIS simulations enabled the simulation of this difficult\nshielding problem on a regular computer cluster using parallel processing of Monte Carlo\ncalculations. The results of the parallel Monte Carlo calculation agreed at four points with a very\nfine mesh deterministic calculation that was performed on the super-computer, Jaguar.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Ibrahim, Ahmad},\n\tmonth = jun,\n\tyear = {2012},\n}\n", + "bibtex": "\n@phdthesis{ibrahim_automatic_2012,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Automatic {Mesh} {Adaptivity} for {Hybrid} {Monte} {Carlo}/{Deterministic} {Neutronics} {Modeling} of {Difficult} {Shielding} {Problems}},\n\turl = {https://digital.library.wisc.edu/1711.dl/7KMFF4JZJEU4S87},\n\tabstract = {Over the last decade, the role of neutronics modeling has been shifting from analysis of\neach component separately to high fidelity, full-scale analysis of the nuclear systems entire\ndomains. The high accuracy, associated with minimizing modeling approximations and including\nmore physical and geometric details, is now feasible because of advancements in computing\nhardware and development of efficient modeling methods. The hybrid Monte Carlo/deterministic\ntechniques, CADIS and FW-CADIS dramatically increase the efficiency of neutronics modeling,\nbut their use in the design of large and geometrically complex nuclear systems is restricted by the\navailability of computing resources for their preliminarily deterministic calculations and the\nlarge computer memory requirements of their final Monte Carlo calculations.\nTo reduce the computational time and memory requirements of the hybrid Monte\nCarlo/deterministic techniques while maintaining their efficiency improvements, three automatic\nmesh adaptivity algorithms were developed and added to the Oak Ridge National Laboratory\nAutomateD VAriaNce reducTion Generator (ADVANTG) code. First, a mixed-material\napproach, which we refer to as the macromaterial approach, enhances the fidelity of the\ndeterministic models without having to refine the mesh of the deterministic calculations. Second,\na deterministic mesh refinement algorithm improves the accuracy of structured mesh\ndeterministic calculations by capturing as much geometric detail as possible without exceeding\nthe total number of mesh elements that is usually determined by the availability of computing\nresources. Finally, a weight window coarsening algorithm decouples the weight window mesh\nfrom the mesh of the deterministic calculations to remove the memory constraint of the weight\nwindow map from the deterministic mesh resolution.\nii\nTo analyze the combined effect of the three algorithms developed in this thesis, they were\nused to calculate the prompt dose rate throughout the entire ITER experimental facility. This\ncalculation represents a very challenging shielding problem because of the immense size and\ncomplexity of the ITER structure and the presence of a two meter thick biological shield.\nCompared to a FW-CADIS calculation with the same storage size of the variance reduction\nparameters, the use of the three algorithms resulted in a 23.3\\% increase in the regions where the\ndose rate results are achieved in a 10 day Monte Carlo calculation and increased the efficiency of\nthe Monte Carlo simulation by a factor of 3.4. Because of this significant increase in the Monte\nCarlo efficiency which was not accompanied by an increase in the memory requirements, the use\nof the three algorithms in FW-CADIS simulations enabled the simulation of this difficult\nshielding problem on a regular computer cluster using parallel processing of Monte Carlo\ncalculations. The results of the parallel Monte Carlo calculation agreed at four points with a very\nfine mesh deterministic calculation that was performed on the super-computer, Jaguar.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Ibrahim, Ahmad},\n\tmonth = jun,\n\tyear = {2012},\n\tkeywords = {CNERG:HK20 Final Report},\n}\n", "data": { "key": "4BD4NW6X", - "version": 26427, + "version": 30782, "itemType": "thesis", "title": "Automatic Mesh Adaptivity for Hybrid Monte Carlo/Deterministic Neutronics Modeling of Difficult Shielding Problems", "creators": [ @@ -1588,7 +1754,7 @@ "numPages": "178", "language": "English", "shortTitle": "", - "url": "http://depot.library.wisc.edu/repository/fedora/1711.dl:GFPDJ3G2URTCL9D/datastreams/REF/content", + "url": "https://digital.library.wisc.edu/1711.dl/7KMFF4JZJEU4S87", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1596,21 +1762,23 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + } + ], "collections": [ "6259B6TV", - "4MDZ29N8", - "RI2DQ3B2", "34I86HPD" ], "relations": {}, "dateAdded": "2013-01-21T21:43:04Z", - "dateModified": "2014-08-28T13:35:27Z" + "dateModified": "2024-09-08T17:38:42Z" } }, { "key": "NED88QUT", - "version": 28775, + "version": 30680, "library": { "type": "group", "id": 10058, @@ -1650,25 +1818,14 @@ } } }, - "lastModifiedByUser": { - "id": 2259868, - "username": "micah.gale", - "name": "", - "links": { - "alternate": { - "href": "https://www.zotero.org/micah.gale", - "type": "text/html" - } - } - }, "creatorSummary": "Slaybaugh", "parsedDate": "2011-11", "numChildren": 2 }, - "bibtex": "\n@phdthesis{slaybaugh_acceleration_2011,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Acceleration {Methods} for {Massively} {Parallel} {Deterministic} {Transport}},\n\tabstract = {To enhance and improve the design of nuclear systems, high-fidelity neutron fluxes are required. Leadership-class machines provide platforms on which very large problems can be solved in a reasonable amount of time. Computing such fluxes accurately and efficiently requires numerical methods with good convergence properties and algorithms that can scale to hundreds of thousands of cores. Many 3-D deterministic transport codes are decomposable in space and angle only, limiting them to tens of thousands of cores. Most codes rely on methods such as Gauss Seidel for fixed source problems and power iteration wrapped around Gauss Seidel for eigenvalue problems, both of which can be slow to converge for challenging problems like those with highly scattering materials or high dominance ratios.\n\nThree methods have been added to the 3-D SN transport code Denovo that are designed to improve convergence and enable the full use of leadership-class computers. The first method is a multigroup Krylov solver that improves convergence when compared to Gauss Seidel and parallelizes the code in energy. Tests show that the multigroup Krylov solver can substantially outperform Gauss Seidel in challenging problems. The energy decomposition added by the solver allows Denovo to solve problems on hundreds of thousands of cores.\n\nThe second method is Rayleigh quotient iteration (RQI), an old method being applied in a new context. This eigenvalue solver finds the dominant eigenvalue in a mathematically optimal way, and theory indicates that RQI should converge in fewer iterations than the traditional power iteration. RQI creates an energy-block-dense system that would be difficult for Gauss Seidel to solve. The new Krylov solver treats this kind of system very efficiently and RQI would not be a good choice without it. However, RQI creates poorly conditioned systems such that the method is only useful in very simple problems. Preconditioning can alleviate this concern.\n\nThe final method is a multigrid in energy, physics-based preconditioner. Because the grids are in energy rather than space or angle, the preconditioner can easily and efficiently take advantage of the new energy decomposition. The new preconditioner was very effective at reducing multigroup iteration count for many types of problems. In some cases it also reduced eigenvalue iteration count. The application of the preconditioner allowed RQI to be successful for problems it could not solve otherwise. The preconditioner also scaled very well in energy, and was tested on up to 200,000 cores using a full-facility pressurized water reactor.\n\nThe three methods added to Denovo accomplish the goals of this work. They converge in fewer iterations than traditional methods and enable the use of hundreds of thousands of cores. Each method can be used individually, with the multigroup Krylov solver and multigrid-in-energy pre-conditioner being particularly successful on their own. For “grand challenge” eigenvalue problems, though, the largest benefit comes from using these methods in concert.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Slaybaugh, Rachel N.},\n\tmonth = nov,\n\tyear = {2011},\n\tkeywords = {Prelim},\n}\n", + "bibtex": "\n@phdthesis{slaybaugh_acceleration_2011,\n\taddress = {Madison, WI},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Acceleration {Methods} for {Massively} {Parallel} {Deterministic} {Transport}},\n\turl = {https://www.proquest.com/pqdtglobal/docview/929133823/abstract/535884C168054098PQ/3?sourcetype=Dissertations%20&%20Theses},\n\tabstract = {To enhance and improve the design of nuclear systems, high-fidelity neutron fluxes are required. Leadership-class machines provide platforms on which very large problems can be solved in a reasonable amount of time. Computing such fluxes accurately and efficiently requires numerical methods with good convergence properties and algorithms that can scale to hundreds of thousands of cores. Many 3-D deterministic transport codes are decomposable in space and angle only, limiting them to tens of thousands of cores. Most codes rely on methods such as Gauss Seidel for fixed source problems and power iteration wrapped around Gauss Seidel for eigenvalue problems, both of which can be slow to converge for challenging problems like those with highly scattering materials or high dominance ratios.\n\nThree methods have been added to the 3-D SN transport code Denovo that are designed to improve convergence and enable the full use of leadership-class computers. The first method is a multigroup Krylov solver that improves convergence when compared to Gauss Seidel and parallelizes the code in energy. Tests show that the multigroup Krylov solver can substantially outperform Gauss Seidel in challenging problems. The energy decomposition added by the solver allows Denovo to solve problems on hundreds of thousands of cores.\n\nThe second method is Rayleigh quotient iteration (RQI), an old method being applied in a new context. This eigenvalue solver finds the dominant eigenvalue in a mathematically optimal way, and theory indicates that RQI should converge in fewer iterations than the traditional power iteration. RQI creates an energy-block-dense system that would be difficult for Gauss Seidel to solve. The new Krylov solver treats this kind of system very efficiently and RQI would not be a good choice without it. However, RQI creates poorly conditioned systems such that the method is only useful in very simple problems. Preconditioning can alleviate this concern.\n\nThe final method is a multigrid in energy, physics-based preconditioner. Because the grids are in energy rather than space or angle, the preconditioner can easily and efficiently take advantage of the new energy decomposition. The new preconditioner was very effective at reducing multigroup iteration count for many types of problems. In some cases it also reduced eigenvalue iteration count. The application of the preconditioner allowed RQI to be successful for problems it could not solve otherwise. The preconditioner also scaled very well in energy, and was tested on up to 200,000 cores using a full-facility pressurized water reactor.\n\nThe three methods added to Denovo accomplish the goals of this work. They converge in fewer iterations than traditional methods and enable the use of hundreds of thousands of cores. Each method can be used individually, with the multigroup Krylov solver and multigrid-in-energy pre-conditioner being particularly successful on their own. For “grand challenge” eigenvalue problems, though, the largest benefit comes from using these methods in concert.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Slaybaugh, Rachel N.},\n\tmonth = nov,\n\tyear = {2011},\n\tkeywords = {Prelim},\n}\n", "data": { "key": "NED88QUT", - "version": 28775, + "version": 30680, "itemType": "thesis", "title": "Acceleration Methods for Massively Parallel Deterministic Transport", "creators": [ @@ -1686,7 +1843,7 @@ "numPages": "166", "language": "English", "shortTitle": "", - "url": "", + "url": "https://www.proquest.com/pqdtglobal/docview/929133823/abstract/535884C168054098PQ/3?sourcetype=Dissertations%20&%20Theses", "accessDate": "", "archive": "", "archiveLocation": "", @@ -1708,12 +1865,12 @@ "dc:replaces": "http://zotero.org/groups/10058/items/CUH5RRBU" }, "dateAdded": "2013-03-21T02:38:03Z", - "dateModified": "2023-09-10T19:09:39Z" + "dateModified": "2024-09-08T16:56:51Z" } }, { "key": "7CCN265G", - "version": 26427, + "version": 30781, "library": { "type": "group", "id": 10058, @@ -1757,10 +1914,10 @@ "parsedDate": "2011-08", "numChildren": 3 }, - "bibtex": "\n@phdthesis{snouffer_validation_2011,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Validation and {Verification} of {Direct} {Accelerated} {Geometry} {Monte} {Carlo}},\n\tabstract = {As both Monte Carlo radiation transport codes and 3D CAD modeling become more widely used, there has been an increasing number of efforts to combine these tools. One such effort is the Direct Accelerated Geometry Monte Carlo (DAGMC) software package being developed at the University of Wisconsin-Madison . DAGMC performs the particle tracking needed for\nMonte Carlo radiation transport code directly on CAD geometries. DAGMC has been in development for a number of years and is in need of validation and verification in order to build user confidence in DAGMC's reliability and accuracy.\n\nThis work performs extensive testing of DAGMC implemented with the radiation transport code Monte Carlo N-Particle 5 (DAG-MCNP5). Four tests suites have been compiled for DAG-MCNP5 to ensure the accuracy of the code now and for future developers. These test suites are based largely on the test suites for MCNP5 and include: a suite of 80 regression tests, a suite of 75 verification tests, a suite of 30 validation criticality tests, and a suite of 19 validation shielding tests. These tests encompass a wide range of geometries, materials, and physics to test almost all of the features of DAG-MCNP5. The results of these test were compared to both analytical and experimental results, where appropriate, and MCNP5 results. A faceting tolerance study\nwas also performed for many of these test. It was found that a faceting tolerance of not large than 10-4 cm produces statistically similar results to MCNP5 on a consistent basis for all problem types. It is concluded that DAG-MCNP5 performs as accurately as MCNP5 for these test problems, and that DAG-MCNP5 can be considered a reliable neutronics code.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Snouffer, Patrick},\n\tmonth = aug,\n\tyear = {2011},\n}\n", + "bibtex": "\n@phdthesis{snouffer_validation_2011,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Validation and {Verification} of {Direct} {Accelerated} {Geometry} {Monte} {Carlo}},\n\tabstract = {As both Monte Carlo radiation transport codes and 3D CAD modeling become more widely used, there has been an increasing number of efforts to combine these tools. One such effort is the Direct Accelerated Geometry Monte Carlo (DAGMC) software package being developed at the University of Wisconsin-Madison . DAGMC performs the particle tracking needed for\nMonte Carlo radiation transport code directly on CAD geometries. DAGMC has been in development for a number of years and is in need of validation and verification in order to build user confidence in DAGMC's reliability and accuracy.\n\nThis work performs extensive testing of DAGMC implemented with the radiation transport code Monte Carlo N-Particle 5 (DAG-MCNP5). Four tests suites have been compiled for DAG-MCNP5 to ensure the accuracy of the code now and for future developers. These test suites are based largely on the test suites for MCNP5 and include: a suite of 80 regression tests, a suite of 75 verification tests, a suite of 30 validation criticality tests, and a suite of 19 validation shielding tests. These tests encompass a wide range of geometries, materials, and physics to test almost all of the features of DAG-MCNP5. The results of these test were compared to both analytical and experimental results, where appropriate, and MCNP5 results. A faceting tolerance study\nwas also performed for many of these test. It was found that a faceting tolerance of not large than 10-4 cm produces statistically similar results to MCNP5 on a consistent basis for all problem types. It is concluded that DAG-MCNP5 performs as accurately as MCNP5 for these test problems, and that DAG-MCNP5 can be considered a reliable neutronics code.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Snouffer, Patrick},\n\tmonth = aug,\n\tyear = {2011},\n\tkeywords = {CNERG:HK20 Final Report},\n}\n", "data": { "key": "7CCN265G", - "version": 26427, + "version": 30781, "itemType": "thesis", "title": "Validation and Verification of Direct Accelerated Geometry Monte Carlo", "creators": [ @@ -1786,15 +1943,18 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + } + ], "collections": [ "6259B6TV", - "4MDZ29N8", "Y4UI9B4X" ], "relations": {}, "dateAdded": "2012-12-07T14:29:06Z", - "dateModified": "2012-12-07T14:31:43Z" + "dateModified": "2024-09-08T17:38:42Z" } }, { @@ -1883,8 +2043,8 @@ } }, { - "key": "5WKCU3AF", - "version": 26428, + "key": "TW3VBHD8", + "version": 30781, "library": { "type": "group", "id": 10058, @@ -1898,16 +2058,33 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/5WKCU3AF", + "href": "https://api.zotero.org/groups/10058/items/TW3VBHD8", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/5WKCU3AF", + "href": "https://www.zotero.org/groups/10058/items/TW3VBHD8", "type": "text/html" + }, + "attachment": { + "href": "https://api.zotero.org/groups/10058/items/MUUA6GAS", + "type": "application/json", + "attachmentType": "application/pdf", + "attachmentSize": 1351946 } }, "meta": { "createdByUser": { + "id": 708524, + "username": "erelson", + "name": "Eric Relson", + "links": { + "alternate": { + "href": "https://www.zotero.org/erelson", + "type": "text/html" + } + } + }, + "lastModifiedByUser": { "id": 112658, "username": "gonuke", "name": "", @@ -1918,21 +2095,21 @@ } } }, - "creatorSummary": "Nygaard", + "creatorSummary": "Moule", "parsedDate": "2011", - "numChildren": 0 + "numChildren": 2 }, - "bibtex": "\n@phdthesis{nygaard_notitle_2011,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Nygaard, Erik},\n\tyear = {2011},\n}\n", + "bibtex": "\n@phdthesis{moule_sampling_2011,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Sampling {Material} {Composition} of {CAD} {Geometries}},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Moule, Damien},\n\tyear = {2011},\n\tkeywords = {CNERG:HK20 Final Report},\n}\n", "data": { - "key": "5WKCU3AF", - "version": 26428, + "key": "TW3VBHD8", + "version": 30781, "itemType": "thesis", - "title": "", + "title": "Sampling Material Composition of CAD Geometries", "creators": [ { "creatorType": "author", - "firstName": "Erik", - "lastName": "Nygaard" + "firstName": "Damien", + "lastName": "Moule" } ], "abstractNote": "", @@ -1940,8 +2117,8 @@ "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2011", - "numPages": "", - "language": "", + "numPages": "71", + "language": "English", "shortTitle": "", "url": "", "accessDate": "", @@ -1951,19 +2128,23 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + } + ], "collections": [ "6259B6TV", "Y4UI9B4X" ], "relations": {}, - "dateAdded": "2016-11-27T21:57:05Z", - "dateModified": "2020-12-30T15:03:12Z" + "dateAdded": "2013-02-07T21:51:45Z", + "dateModified": "2024-09-08T17:38:42Z" } }, { - "key": "TW3VBHD8", - "version": 26427, + "key": "VDD6RCJQ", + "version": 30781, "library": { "type": "group", "id": 10058, @@ -1977,33 +2158,22 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/TW3VBHD8", + "href": "https://api.zotero.org/groups/10058/items/VDD6RCJQ", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/TW3VBHD8", + "href": "https://www.zotero.org/groups/10058/items/VDD6RCJQ", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/MUUA6GAS", + "href": "https://api.zotero.org/groups/10058/items/GUXS9QRA", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 1351946 + "attachmentSize": 8600060 } }, "meta": { "createdByUser": { - "id": 708524, - "username": "erelson", - "name": "Eric Relson", - "links": { - "alternate": { - "href": "https://www.zotero.org/erelson", - "type": "text/html" - } - } - }, - "lastModifiedByUser": { "id": 112658, "username": "gonuke", "name": "", @@ -2014,32 +2184,32 @@ } } }, - "creatorSummary": "Moule", + "creatorSummary": "Smith", "parsedDate": "2011", "numChildren": 2 }, - "bibtex": "\n@phdthesis{moule_sampling_2011,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Sampling {Material} {Composition} of {CAD} {Geometries}},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Moule, Damien},\n\tyear = {2011},\n}\n", + "bibtex": "\n@phdthesis{smith_robust_2011,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Robust {Tracking} and {Advanced} {Geometry} for {Monte} {Carlo} {Radiation} {Transport}},\n\turl = {https://www.proquest.com/pqdtglobal/docview/885040837/abstract/49781E1E9AC841E4PQ/1?sourcetype=Dissertations%20&%20Theses},\n\tabstract = {A set of improved geometric capabilities are developed for the Direct Accelerated Geometry for Monte Carlo (DAGMC) library to increase its ease of use and accuracy. The improvements are watertight faceting, robust particle tracking, automatic creation of nonsolid space, and overlap tolerance. Before being sealed, adjacent faceted surfaces do not have the same discretization along shared curves. Sealing together surfaces to create a watertight faceting prevents leakage of particles between surfaces. The tracking algorithm is made robust by ensuring numerical consistency and avoiding geometric tolerances. Monte Carlo simulation requires all space to be defined, whether it be vacuum, air, coolant, or a solid material. The implicit creation of nonsolid space reduces human effort otherwise required to explicitly create nonsolid space in a CAD program. CAD models often contain small gaps and overlaps between adjacent volumes due to imprecise modeling, file translation, or intentional deformation. Although gaps are filled by the implicit creation of nonsolid space, overlaps cause geometric queries to become unreliable. The particle tracking algorithm and point inclusion test are modified to tolerate small overlaps of adjacent volumes. Overlap-tolerant particle tracking eliminates manual repair of CAD models and enables analysis of meshed finite element models undergoing structural deformation. These improvements are implemented in a coupling of DAGMC with the Monte Carlo N-Particle (MCNP) code, known as DAG-MCNP. The elimination of both manual CAD repair and lost particles are demonstrated with CAD models used in production calculations.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Smith, Brandon M.},\n\tyear = {2011},\n\tkeywords = {CNERG:HK20 Final Report},\n}\n", "data": { - "key": "TW3VBHD8", - "version": 26427, + "key": "VDD6RCJQ", + "version": 30781, "itemType": "thesis", - "title": "Sampling Material Composition of CAD Geometries", + "title": "Robust Tracking and Advanced Geometry for Monte Carlo Radiation Transport", "creators": [ { "creatorType": "author", - "firstName": "Damien", - "lastName": "Moule" + "firstName": "Brandon M.", + "lastName": "Smith" } ], - "abstractNote": "", - "thesisType": "MS Nuclear Engineering and Engineering Physics", + "abstractNote": "A set of improved geometric capabilities are developed for the Direct Accelerated Geometry for Monte Carlo (DAGMC) library to increase its ease of use and accuracy. The improvements are watertight faceting, robust particle tracking, automatic creation of nonsolid space, and overlap tolerance. Before being sealed, adjacent faceted surfaces do not have the same discretization along shared curves. Sealing together surfaces to create a watertight faceting prevents leakage of particles between surfaces. The tracking algorithm is made robust by ensuring numerical consistency and avoiding geometric tolerances. Monte Carlo simulation requires all space to be defined, whether it be vacuum, air, coolant, or a solid material. The implicit creation of nonsolid space reduces human effort otherwise required to explicitly create nonsolid space in a CAD program. CAD models often contain small gaps and overlaps between adjacent volumes due to imprecise modeling, file translation, or intentional deformation. Although gaps are filled by the implicit creation of nonsolid space, overlaps cause geometric queries to become unreliable. The particle tracking algorithm and point inclusion test are modified to tolerate small overlaps of adjacent volumes. Overlap-tolerant particle tracking eliminates manual repair of CAD models and enables analysis of meshed finite element models undergoing structural deformation. These improvements are implemented in a coupling of DAGMC with the Monte Carlo N-Particle (MCNP) code, known as DAG-MCNP. The elimination of both manual CAD repair and lost particles are demonstrated with CAD models used in production calculations.", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2011", - "numPages": "71", + "numPages": "145", "language": "English", "shortTitle": "", - "url": "", + "url": "https://www.proquest.com/pqdtglobal/docview/885040837/abstract/49781E1E9AC841E4PQ/1?sourcetype=Dissertations%20&%20Theses", "accessDate": "", "archive": "", "archiveLocation": "", @@ -2047,21 +2217,23 @@ "callNumber": "", "rights": "", "extra": "", - "tags": [], + "tags": [ + { + "tag": "CNERG:HK20 Final Report" + } + ], "collections": [ "6259B6TV", - "4MDZ29N8", - "RI2DQ3B2", - "Y4UI9B4X" + "34I86HPD" ], "relations": {}, - "dateAdded": "2013-02-07T21:51:45Z", - "dateModified": "2013-11-10T16:19:44Z" + "dateAdded": "2012-12-07T14:36:11Z", + "dateModified": "2024-09-08T17:38:42Z" } }, { - "key": "VDD6RCJQ", - "version": 26427, + "key": "5WKCU3AF", + "version": 26428, "library": { "type": "group", "id": 10058, @@ -2075,18 +2247,12 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/VDD6RCJQ", + "href": "https://api.zotero.org/groups/10058/items/5WKCU3AF", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/VDD6RCJQ", + "href": "https://www.zotero.org/groups/10058/items/5WKCU3AF", "type": "text/html" - }, - "attachment": { - "href": "https://api.zotero.org/groups/10058/items/GUXS9QRA", - "type": "application/json", - "attachmentType": "application/pdf", - "attachmentSize": 8600060 } }, "meta": { @@ -2101,30 +2267,30 @@ } } }, - "creatorSummary": "Smith", + "creatorSummary": "Nygaard", "parsedDate": "2011", - "numChildren": 2 + "numChildren": 0 }, - "bibtex": "\n@phdthesis{smith_robust_2011,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Robust {Tracking} and {Advanced} {Geometry} for {Monte} {Carlo} {Radiation} {Transport}},\n\tabstract = {A set of improved geometric capabilities are developed for the Direct Accelerated Geometry for Monte Carlo (DAGMC) library to increase its ease of use and accuracy. The improvements are watertight faceting, robust particle tracking, automatic creation of nonsolid space, and overlap tolerance. Before being sealed, adjacent faceted surfaces do not have the same discretization along shared curves. Sealing together surfaces to create a watertight faceting prevents leakage of particles between surfaces. The tracking algorithm is made robust by ensuring numerical consistency and avoiding geometric tolerances. Monte Carlo simulation requires all space to be defined, whether it be vacuum, air, coolant, or a solid material. The implicit creation of nonsolid space reduces human effort otherwise required to explicitly create nonsolid space in a CAD program. CAD models often contain small gaps and overlaps between adjacent volumes due to imprecise modeling, file translation, or intentional deformation. Although gaps are filled by the implicit creation of nonsolid space, overlaps cause geometric queries to become unreliable. The particle tracking algorithm and point inclusion test are modified to tolerate small overlaps of adjacent volumes. Overlap-tolerant particle tracking eliminates manual repair of CAD models and enables analysis of meshed finite element models undergoing structural deformation. These improvements are implemented in a coupling of DAGMC with the Monte Carlo N-Particle (MCNP) code, known as DAG-MCNP. The elimination of both manual CAD repair and lost particles are demonstrated with CAD models used in production calculations.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Smith, Brandon M.},\n\tyear = {2011},\n}\n", + "bibtex": "\n@phdthesis{nygaard_notitle_2011,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Nygaard, Erik},\n\tyear = {2011},\n}\n", "data": { - "key": "VDD6RCJQ", - "version": 26427, + "key": "5WKCU3AF", + "version": 26428, "itemType": "thesis", - "title": "Robust Tracking and Advanced Geometry for Monte Carlo Radiation Transport", + "title": "", "creators": [ { "creatorType": "author", - "firstName": "Brandon M.", - "lastName": "Smith" + "firstName": "Erik", + "lastName": "Nygaard" } ], - "abstractNote": "A set of improved geometric capabilities are developed for the Direct Accelerated Geometry for Monte Carlo (DAGMC) library to increase its ease of use and accuracy. The improvements are watertight faceting, robust particle tracking, automatic creation of nonsolid space, and overlap tolerance. Before being sealed, adjacent faceted surfaces do not have the same discretization along shared curves. Sealing together surfaces to create a watertight faceting prevents leakage of particles between surfaces. The tracking algorithm is made robust by ensuring numerical consistency and avoiding geometric tolerances. Monte Carlo simulation requires all space to be defined, whether it be vacuum, air, coolant, or a solid material. The implicit creation of nonsolid space reduces human effort otherwise required to explicitly create nonsolid space in a CAD program. CAD models often contain small gaps and overlaps between adjacent volumes due to imprecise modeling, file translation, or intentional deformation. Although gaps are filled by the implicit creation of nonsolid space, overlaps cause geometric queries to become unreliable. The particle tracking algorithm and point inclusion test are modified to tolerate small overlaps of adjacent volumes. Overlap-tolerant particle tracking eliminates manual repair of CAD models and enables analysis of meshed finite element models undergoing structural deformation. These improvements are implemented in a coupling of DAGMC with the Monte Carlo N-Particle (MCNP) code, known as DAG-MCNP. The elimination of both manual CAD repair and lost particles are demonstrated with CAD models used in production calculations.", - "thesisType": "PhD Nuclear Engineering and Engineering Physics", + "abstractNote": "", + "thesisType": "MS Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2011", - "numPages": "145", - "language": "English", + "numPages": "", + "language": "", "shortTitle": "", "url": "", "accessDate": "", @@ -2137,12 +2303,11 @@ "tags": [], "collections": [ "6259B6TV", - "4MDZ29N8", - "34I86HPD" + "Y4UI9B4X" ], "relations": {}, - "dateAdded": "2012-12-07T14:36:11Z", - "dateModified": "2012-12-07T14:40:07Z" + "dateAdded": "2016-11-27T21:57:05Z", + "dateModified": "2020-12-30T15:03:12Z" } }, { @@ -2231,8 +2396,8 @@ } }, { - "key": "23P9KT43", - "version": 28773, + "key": "NGJ8UC47", + "version": 30758, "library": { "type": "group", "id": 10058, @@ -2246,70 +2411,70 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/23P9KT43", + "href": "https://api.zotero.org/groups/10058/items/NGJ8UC47", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/23P9KT43", + "href": "https://www.zotero.org/groups/10058/items/NGJ8UC47", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/IEPH8GF8", + "href": "https://api.zotero.org/groups/10058/items/GED9HPK8", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 303098 + "attachmentSize": 2141571 } }, "meta": { "createdByUser": { - "id": 112658, - "username": "gonuke", + "id": 144819, + "username": "gidden", "name": "", "links": { "alternate": { - "href": "https://www.zotero.org/gonuke", + "href": "https://www.zotero.org/gidden", "type": "text/html" } } }, "lastModifiedByUser": { - "id": 2259868, - "username": "micah.gale", + "id": 112658, + "username": "gonuke", "name": "", "links": { "alternate": { - "href": "https://www.zotero.org/micah.gale", + "href": "https://www.zotero.org/gonuke", "type": "text/html" } } }, - "creatorSummary": "Kiedrowski", + "creatorSummary": "Oliver", "parsedDate": "2009", - "numChildren": 4 + "numChildren": 6 }, - "bibtex": "\n@phdthesis{kiedrowski_adjoint_2009,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Adjoint {Weighting} for {Continuous}-{Energy} {Monte} {Carlo} {Radiation} {Transport}},\n\tabstract = {Methods are developed for importance or adjoint weighting of individual tally scores within a continuous-energy k-eigenvalue Monte Carlo calculation. These adjoint-weighted tallies allow for the calculation of certain quantities important to understanding the physics of a nuclear reactor.\n\nThe methods, unlike traditional approaches to computing adjoint-weighted quantities, do not attempt to invert the random walk. Rather, they are based upon the iterated fission probability interpretation of the adjoint flux, which is the steady state population in a critical nuclear reactor caused by a neutron introduced at that point in phase space. This can be calculated in a strictly forward calculation, and this factor can be applied to previously computed tally scores.\n\nThese methods are implemented in a production Monte Carlo code and are used to calculate parameters requiring adjoint weighting, the point reactor kinetics parameters and reactivity changes based upon first-order perturbation theory. The results of these calculations are compared against experimental measurements, equivalent discrete ordinates calculations, or other Monte Carlo based techniques.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Kiedrowski, Brian C.},\n\tyear = {2009},\n}\n", + "bibtex": "\n@phdthesis{oliver_geniusv2_2009,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {{GENIUSv2}: {Software} {Design} and {Mathematical} {Formulations} for {Multi}-{Region} {Discrete} {Nuclear} {Fuel} {Cycle} {Simulation} and {Analysis}},\n\turl = {http://kyleoliver.net/work/thesis_KMO.pdf},\n\turldate = {2012-11-25},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Oliver, Kyle M.},\n\tyear = {2009},\n}\n", "data": { - "key": "23P9KT43", - "version": 28773, + "key": "NGJ8UC47", + "version": 30758, "itemType": "thesis", - "title": "Adjoint Weighting for Continuous-Energy Monte Carlo Radiation Transport", + "title": "GENIUSv2: Software Design and Mathematical Formulations for Multi-Region Discrete Nuclear Fuel Cycle Simulation and Analysis", "creators": [ { "creatorType": "author", - "firstName": "Brian C.", - "lastName": "Kiedrowski" + "firstName": "Kyle M.", + "lastName": "Oliver" } ], - "abstractNote": "Methods are developed for importance or adjoint weighting of individual tally scores within a continuous-energy k-eigenvalue Monte Carlo calculation. These adjoint-weighted tallies allow for the calculation of certain quantities important to understanding the physics of a nuclear reactor.\n\nThe methods, unlike traditional approaches to computing adjoint-weighted quantities, do not attempt to invert the random walk. Rather, they are based upon the iterated fission probability interpretation of the adjoint flux, which is the steady state population in a critical nuclear reactor caused by a neutron introduced at that point in phase space. This can be calculated in a strictly forward calculation, and this factor can be applied to previously computed tally scores.\n\nThese methods are implemented in a production Monte Carlo code and are used to calculate parameters requiring adjoint weighting, the point reactor kinetics parameters and reactivity changes based upon first-order perturbation theory. The results of these calculations are compared against experimental measurements, equivalent discrete ordinates calculations, or other Monte Carlo based techniques.", - "thesisType": "PhD Nuclear Engineering and Engineering Physics", + "abstractNote": "", + "thesisType": "MS Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2009", - "numPages": "190", - "language": "English", + "numPages": "131", + "language": "", "shortTitle": "", - "url": "", - "accessDate": "", + "url": "http://kyleoliver.net/work/thesis_KMO.pdf", + "accessDate": "2012-11-25T19:20:07Z", "archive": "", "archiveLocation": "", "libraryCatalog": "", @@ -2319,19 +2484,19 @@ "tags": [], "collections": [ "6259B6TV", - "34I86HPD", - "4H6EUSDS" + "7FVXFFRN", + "Y4UI9B4X" ], "relations": { - "dc:replaces": "http://zotero.org/groups/10058/items/WXP4LPXI" + "dc:replaces": "http://zotero.org/groups/10058/items/HBH5EWGG" }, - "dateAdded": "2013-11-13T21:36:00Z", - "dateModified": "2023-09-10T19:12:12Z" + "dateAdded": "2012-11-25T19:24:55Z", + "dateModified": "2024-09-08T17:34:27Z" } }, { - "key": "HBH5EWGG", - "version": 26428, + "key": "23P9KT43", + "version": 30686, "library": { "type": "group", "id": 10058, @@ -2345,18 +2510,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/HBH5EWGG", + "href": "https://api.zotero.org/groups/10058/items/23P9KT43", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/HBH5EWGG", + "href": "https://www.zotero.org/groups/10058/items/23P9KT43", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/UB4ATDK5", + "href": "https://api.zotero.org/groups/10058/items/IEPH8GF8", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 26454 + "attachmentSize": 303098 } }, "meta": { @@ -2371,32 +2536,32 @@ } } }, - "creatorSummary": "Oliver", + "creatorSummary": "Kiedrowski", "parsedDate": "2009", - "numChildren": 6 + "numChildren": 4 }, - "bibtex": "\n@phdthesis{oliver_geniusv2_2009,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {{GENIUSv2}: {Software} {Design} and {Mathematical} {Formulations} for {Multi}-{Region} {Discrete} {Nuclear} {Fuel} {Cycle} {Simulation} and {Analysis}},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Oliver, Kyle M.},\n\tyear = {2009},\n}\n", + "bibtex": "\n@phdthesis{kiedrowski_adjoint_2009,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Adjoint {Weighting} for {Continuous}-{Energy} {Monte} {Carlo} {Radiation} {Transport}},\n\turl = {https://www.proquest.com/pqdtglobal/docview/305033185/abstract/A62FE598B9FC46AEPQ/1?sourcetype=Dissertations%20&%20Theses},\n\tabstract = {Methods are developed for importance or adjoint weighting of individual tally scores within a continuous-energy k-eigenvalue Monte Carlo calculation. These adjoint-weighted tallies allow for the calculation of certain quantities important to understanding the physics of a nuclear reactor.\n\nThe methods, unlike traditional approaches to computing adjoint-weighted quantities, do not attempt to invert the random walk. Rather, they are based upon the iterated fission probability interpretation of the adjoint flux, which is the steady state population in a critical nuclear reactor caused by a neutron introduced at that point in phase space. This can be calculated in a strictly forward calculation, and this factor can be applied to previously computed tally scores.\n\nThese methods are implemented in a production Monte Carlo code and are used to calculate parameters requiring adjoint weighting, the point reactor kinetics parameters and reactivity changes based upon first-order perturbation theory. The results of these calculations are compared against experimental measurements, equivalent discrete ordinates calculations, or other Monte Carlo based techniques.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Kiedrowski, Brian C.},\n\tyear = {2009},\n}\n", "data": { - "key": "HBH5EWGG", - "version": 26428, + "key": "23P9KT43", + "version": 30686, "itemType": "thesis", - "title": "GENIUSv2: Software Design and Mathematical Formulations for Multi-Region Discrete Nuclear Fuel Cycle Simulation and Analysis", + "title": "Adjoint Weighting for Continuous-Energy Monte Carlo Radiation Transport", "creators": [ { "creatorType": "author", - "firstName": "Kyle M.", - "lastName": "Oliver" + "firstName": "Brian C.", + "lastName": "Kiedrowski" } ], - "abstractNote": "", - "thesisType": "MS Nuclear Engineering and Engineering Physics", + "abstractNote": "Methods are developed for importance or adjoint weighting of individual tally scores within a continuous-energy k-eigenvalue Monte Carlo calculation. These adjoint-weighted tallies allow for the calculation of certain quantities important to understanding the physics of a nuclear reactor.\n\nThe methods, unlike traditional approaches to computing adjoint-weighted quantities, do not attempt to invert the random walk. Rather, they are based upon the iterated fission probability interpretation of the adjoint flux, which is the steady state population in a critical nuclear reactor caused by a neutron introduced at that point in phase space. This can be calculated in a strictly forward calculation, and this factor can be applied to previously computed tally scores.\n\nThese methods are implemented in a production Monte Carlo code and are used to calculate parameters requiring adjoint weighting, the point reactor kinetics parameters and reactivity changes based upon first-order perturbation theory. The results of these calculations are compared against experimental measurements, equivalent discrete ordinates calculations, or other Monte Carlo based techniques.", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2009", - "numPages": "", - "language": "", + "numPages": "190", + "language": "English", "shortTitle": "", - "url": "", + "url": "https://www.proquest.com/pqdtglobal/docview/305033185/abstract/A62FE598B9FC46AEPQ/1?sourcetype=Dissertations%20&%20Theses", "accessDate": "", "archive": "", "archiveLocation": "", @@ -2407,11 +2572,14 @@ "tags": [], "collections": [ "6259B6TV", - "Y4UI9B4X" + "34I86HPD", + "4H6EUSDS" ], - "relations": {}, - "dateAdded": "2013-09-04T20:14:28Z", - "dateModified": "2020-12-30T15:02:53Z" + "relations": { + "dc:replaces": "http://zotero.org/groups/10058/items/WXP4LPXI" + }, + "dateAdded": "2013-11-13T21:36:00Z", + "dateModified": "2024-09-08T17:00:22Z" } }, { @@ -2500,8 +2668,8 @@ } }, { - "key": "DQVPWQ5T", - "version": 26429, + "key": "NN84DEPH", + "version": 30691, "library": { "type": "group", "id": 10058, @@ -2515,18 +2683,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/DQVPWQ5T", + "href": "https://api.zotero.org/groups/10058/items/NN84DEPH", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/DQVPWQ5T", + "href": "https://www.zotero.org/groups/10058/items/NN84DEPH", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/X6G6TC9H", + "href": "https://api.zotero.org/groups/10058/items/VQQSC9M8", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 269170 + "attachmentSize": 3932950 } }, "meta": { @@ -2541,32 +2709,32 @@ } } }, - "creatorSummary": "Scholbrock", + "creatorSummary": "Hu", "parsedDate": "2008", "numChildren": 3 }, - "bibtex": "\n@phdthesis{scholbrock_attribute_2008,\n\taddress = {Madison, WI, United States},\n\ttype = {{BS} {Engineering} {Physics}},\n\ttitle = {Attribute {Management} in {ACIS} {Based} {Geometry} {Files}},\n\tabstract = {Computer aided design provides for a means to represent physical quantities in a computer as well as the concepts related to it in order to provide an efficient design process. Using computer simulation over physical tests allow for quicker and cheaper results. However much of the potential that computer aided design has to offer is not being utilized due to the cumbersome interfaces that currently stand between engineers and computers. Specifically when dealing with attributes (labels that help define physical properties of the geometric representation) current geometry tools do not provide means to adapt attributes as needed in the simulation based design process. This research looks into creating a tool to apply and manipulate attributes on geometric entities while providing an efficient means for a user to interact with a geometric model.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Scholbrock, Andrew},\n\tyear = {2008},\n}\n", + "bibtex": "\n@phdthesis{hu_coupled_2008,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Coupled {Neutronics}/{Thermal}-hydraulics {Analyses} of {Supercritical} {Water} {Reactor}},\n\turl = {https://www.proquest.com/pqdtglobal/docview/304451711/abstract/7B56EE2026464FF0PQ/1?sourcetype=Dissertations%20&%20Theses},\n\tabstract = {The Supercritical Water Reactor (SCWR) is a next generation nuclear reactor concept well known for its system simplification and high thermal efficiency. The current study develops analysis capability for the U.S. reference design by extending existing LWR analysis codes and study SCWR behaviors under steady state, burnup and transient conditions.\n\nAn extended version of PARCS that can analyze SCWR in steady state is developed first. The modified code is used to demonstrate the importance of moderator heating on the neutronics behavior of U.S. SCWR design by simulating an infinite lattice of assemblies. The results show that the moderator heating leads to a more symmetric effective moderator density, and has a significant impact on the axial power shape. From this, sensitivity calculations are performed to show how the assembly performs with\nperturbations in assembly power, mass flow rate, bypass ratio and heat transfer coefficient.\n\nIn order to study transient and flow distribution between assemblies in SCWR, a coupled PARCS/RELAP5 code package specialized for current SCWR design is developed in this study. The variable mapping input file and related subroutines in PARCS are modified to transfer the physical properties of coolant and moderator separately between the coupled codes, and necessary code modification is also done in\nPARCS to automatically perform neutronics feedback based on not only fuel and coolant but also moderator physical properties. A finer data grid in the RELAP5 water table is adopted above the supercritical point to enable the thermal-hydrodynamics simulation in this range. A whole SCWR core model for the coupled PARCS/RELAP5 is established for this study and used in the rest of the study.\n\nFlow reversal in downward flowing moderator channels is discovered in steady state. It is due to the positive flow rate feedback to flow density change necessary for pressure balance. Choosing different orifice sizes based on corresponding assembly powers can prevent the reversal. The comparison of results from the coupled simulations with/without flow reversal shows that the reversed moderator flow introduces a large axial power peak at the bottom of the core and reduces the core reactivity. \n\nA burnup calculation shows that under the current design parameters the reactor cannot sustain criticality for one year, therefore further investigation on burnup is needed. A possible moderator reversal is found during the burnup calculation suggesting that the change in core axial power distribution during burnup should be considered while designing the various orifice sizes to prevent the reversal.\n\nA SCWR system model is developed which adds balance of the plant to the core model. Three transients are studied: loss of feedwater, loss of off-site power and loss of turbine load without scram. The results show that the maximum cladding surface temperatures satisfy the material limit. The location of maximum cladding surface temperature is not in the maximum power assembly. This suggests the normal hot channel analysis method may not applicable to SCWR.\n\nFuture work on sub-channel analysis, achieving full cycle burnup and more safety analyses is proposed.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Hu, Po},\n\tyear = {2008},\n}\n", "data": { - "key": "DQVPWQ5T", - "version": 26429, + "key": "NN84DEPH", + "version": 30691, "itemType": "thesis", - "title": "Attribute Management in ACIS Based Geometry Files", + "title": "Coupled Neutronics/Thermal-hydraulics Analyses of Supercritical Water Reactor", "creators": [ { "creatorType": "author", - "firstName": "Andrew", - "lastName": "Scholbrock" + "firstName": "Po", + "lastName": "Hu" } ], - "abstractNote": "Computer aided design provides for a means to represent physical quantities in a computer as well as the concepts related to it in order to provide an efficient design process. Using computer simulation over physical tests allow for quicker and cheaper results. However much of the potential that computer aided design has to offer is not being utilized due to the cumbersome interfaces that currently stand between engineers and computers. Specifically when dealing with attributes (labels that help define physical properties of the geometric representation) current geometry tools do not provide means to adapt attributes as needed in the simulation based design process. This research looks into creating a tool to apply and manipulate attributes on geometric entities while providing an efficient means for a user to interact with a geometric model.", - "thesisType": "BS Engineering Physics", + "abstractNote": "The Supercritical Water Reactor (SCWR) is a next generation nuclear reactor concept well known for its system simplification and high thermal efficiency. The current study develops analysis capability for the U.S. reference design by extending existing LWR analysis codes and study SCWR behaviors under steady state, burnup and transient conditions.\n\nAn extended version of PARCS that can analyze SCWR in steady state is developed first. The modified code is used to demonstrate the importance of moderator heating on the neutronics behavior of U.S. SCWR design by simulating an infinite lattice of assemblies. The results show that the moderator heating leads to a more symmetric effective moderator density, and has a significant impact on the axial power shape. From this, sensitivity calculations are performed to show how the assembly performs with\nperturbations in assembly power, mass flow rate, bypass ratio and heat transfer coefficient.\n\nIn order to study transient and flow distribution between assemblies in SCWR, a coupled PARCS/RELAP5 code package specialized for current SCWR design is developed in this study. The variable mapping input file and related subroutines in PARCS are modified to transfer the physical properties of coolant and moderator separately between the coupled codes, and necessary code modification is also done in\nPARCS to automatically perform neutronics feedback based on not only fuel and coolant but also moderator physical properties. A finer data grid in the RELAP5 water table is adopted above the supercritical point to enable the thermal-hydrodynamics simulation in this range. A whole SCWR core model for the coupled PARCS/RELAP5 is established for this study and used in the rest of the study.\n\nFlow reversal in downward flowing moderator channels is discovered in steady state. It is due to the positive flow rate feedback to flow density change necessary for pressure balance. Choosing different orifice sizes based on corresponding assembly powers can prevent the reversal. The comparison of results from the coupled simulations with/without flow reversal shows that the reversed moderator flow introduces a large axial power peak at the bottom of the core and reduces the core reactivity. \n\nA burnup calculation shows that under the current design parameters the reactor cannot sustain criticality for one year, therefore further investigation on burnup is needed. A possible moderator reversal is found during the burnup calculation suggesting that the change in core axial power distribution during burnup should be considered while designing the various orifice sizes to prevent the reversal.\n\nA SCWR system model is developed which adds balance of the plant to the core model. Three transients are studied: loss of feedwater, loss of off-site power and loss of turbine load without scram. The results show that the maximum cladding surface temperatures satisfy the material limit. The location of maximum cladding surface temperature is not in the maximum power assembly. This suggests the normal hot channel analysis method may not applicable to SCWR.\n\nFuture work on sub-channel analysis, achieving full cycle burnup and more safety analyses is proposed.", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2008", - "numPages": "35", + "numPages": "125", "language": "English", "shortTitle": "", - "url": "", + "url": "https://www.proquest.com/pqdtglobal/docview/304451711/abstract/7B56EE2026464FF0PQ/1?sourcetype=Dissertations%20&%20Theses", "accessDate": "", "archive": "", "archiveLocation": "", @@ -2577,16 +2745,16 @@ "tags": [], "collections": [ "6259B6TV", - "KXB4B9JM" + "34I86HPD" ], "relations": {}, - "dateAdded": "2012-11-10T17:42:24Z", - "dateModified": "2020-12-30T15:00:12Z" + "dateAdded": "2013-11-13T21:31:58Z", + "dateModified": "2024-09-08T17:01:01Z" } }, { - "key": "JCVBJ8KI", - "version": 26428, + "key": "DQVPWQ5T", + "version": 26429, "library": { "type": "group", "id": 10058, @@ -2600,18 +2768,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/JCVBJ8KI", + "href": "https://api.zotero.org/groups/10058/items/DQVPWQ5T", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/JCVBJ8KI", + "href": "https://www.zotero.org/groups/10058/items/DQVPWQ5T", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/5JCSKA95", + "href": "https://api.zotero.org/groups/10058/items/X6G6TC9H", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 946932 + "attachmentSize": 269170 } }, "meta": { @@ -2626,30 +2794,30 @@ } } }, - "creatorSummary": "Priaulx", + "creatorSummary": "Scholbrock", "parsedDate": "2008", - "numChildren": 2 + "numChildren": 3 }, - "bibtex": "\n@phdthesis{priaulx_development_2008,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Development of a {PARCS}/{HELIOS} {Model} for the {University} of {Wisconsin} {Nuclear} {Reactor}},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Priaulx, Michael},\n\tyear = {2008},\n}\n", + "bibtex": "\n@phdthesis{scholbrock_attribute_2008,\n\taddress = {Madison, WI, United States},\n\ttype = {{BS} {Engineering} {Physics}},\n\ttitle = {Attribute {Management} in {ACIS} {Based} {Geometry} {Files}},\n\tabstract = {Computer aided design provides for a means to represent physical quantities in a computer as well as the concepts related to it in order to provide an efficient design process. Using computer simulation over physical tests allow for quicker and cheaper results. However much of the potential that computer aided design has to offer is not being utilized due to the cumbersome interfaces that currently stand between engineers and computers. Specifically when dealing with attributes (labels that help define physical properties of the geometric representation) current geometry tools do not provide means to adapt attributes as needed in the simulation based design process. This research looks into creating a tool to apply and manipulate attributes on geometric entities while providing an efficient means for a user to interact with a geometric model.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Scholbrock, Andrew},\n\tyear = {2008},\n}\n", "data": { - "key": "JCVBJ8KI", - "version": 26428, + "key": "DQVPWQ5T", + "version": 26429, "itemType": "thesis", - "title": "Development of a PARCS/HELIOS Model for the University of Wisconsin Nuclear Reactor", + "title": "Attribute Management in ACIS Based Geometry Files", "creators": [ { "creatorType": "author", - "firstName": "Michael", - "lastName": "Priaulx" + "firstName": "Andrew", + "lastName": "Scholbrock" } ], - "abstractNote": "", - "thesisType": "MS Nuclear Engineering and Engineering Physics", + "abstractNote": "Computer aided design provides for a means to represent physical quantities in a computer as well as the concepts related to it in order to provide an efficient design process. Using computer simulation over physical tests allow for quicker and cheaper results. However much of the potential that computer aided design has to offer is not being utilized due to the cumbersome interfaces that currently stand between engineers and computers. Specifically when dealing with attributes (labels that help define physical properties of the geometric representation) current geometry tools do not provide means to adapt attributes as needed in the simulation based design process. This research looks into creating a tool to apply and manipulate attributes on geometric entities while providing an efficient means for a user to interact with a geometric model.", + "thesisType": "BS Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2008", - "numPages": "", - "language": "", + "numPages": "35", + "language": "English", "shortTitle": "", "url": "", "accessDate": "", @@ -2662,15 +2830,15 @@ "tags": [], "collections": [ "6259B6TV", - "Y4UI9B4X" + "KXB4B9JM" ], "relations": {}, - "dateAdded": "2016-11-27T22:05:38Z", - "dateModified": "2020-12-30T15:02:01Z" + "dateAdded": "2012-11-10T17:42:24Z", + "dateModified": "2020-12-30T15:00:12Z" } }, { - "key": "WR6HI27D", + "key": "JCVBJ8KI", "version": 26428, "library": { "type": "group", @@ -2685,18 +2853,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/WR6HI27D", + "href": "https://api.zotero.org/groups/10058/items/JCVBJ8KI", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/WR6HI27D", + "href": "https://www.zotero.org/groups/10058/items/JCVBJ8KI", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/4NHVDMZP", + "href": "https://api.zotero.org/groups/10058/items/5JCSKA95", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 742105 + "attachmentSize": 946932 } }, "meta": { @@ -2711,30 +2879,30 @@ } } }, - "creatorSummary": "Grady", + "creatorSummary": "Priaulx", "parsedDate": "2008", - "numChildren": 3 + "numChildren": 2 }, - "bibtex": "\n@phdthesis{grady_development_2008,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Development of {Economic} {Accounting} for {Nuclear} {Waste} in {Fuel} {Cycle} {Analysis}},\n\tabstract = {This research focuses on the development of an economic model for waste\n entering a repository. This work couples a repository loading model with an economic\n accounting system to determine a cost based on repository usage.\n \n\nThe repository loading model determines the amount of repository space used by\n an arbitrary waste stream. Using the economic model in VISION.econ, the arbitrary\n waste stream can be assigned a cost. The cost for the space used is calibrated by\n computing the cost per meter of repository space if spent fuel is directly emplaced. This\n allows for accurate comparison between direct disposal and different recycling schemes.\n The length-based disposal cost accounts for fuel from different fuel types, burnups, and\n High-Level Waste (HLW) with an arbitrary isotope mix.\n\nKey derivatives of this work are an accounting system that can account for the\n repository savings of reprocessing and the ability to compare direct disposal to\n reprocessing with varying separation schemes. From this work, it was determined that the\n current mass-based accounting system for HLW disposal costs can be significantly\n different than the length-based accounting system proposed in this work when advanced\n reprocessing schemes are implemented. Furthermore, this work shows the length-based\n accounting system may be needed to find the disposal cost at which reprocessing is\neconomically equivalent to direct disposal.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Grady, Ryan},\n\tyear = {2008},\n}\n", + "bibtex": "\n@phdthesis{priaulx_development_2008,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Development of a {PARCS}/{HELIOS} {Model} for the {University} of {Wisconsin} {Nuclear} {Reactor}},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Priaulx, Michael},\n\tyear = {2008},\n}\n", "data": { - "key": "WR6HI27D", + "key": "JCVBJ8KI", "version": 26428, "itemType": "thesis", - "title": "Development of Economic Accounting for Nuclear Waste in Fuel Cycle Analysis", + "title": "Development of a PARCS/HELIOS Model for the University of Wisconsin Nuclear Reactor", "creators": [ { "creatorType": "author", - "firstName": "Ryan", - "lastName": "Grady" + "firstName": "Michael", + "lastName": "Priaulx" } ], - "abstractNote": "This research focuses on the development of an economic model for waste\n entering a repository. This work couples a repository loading model with an economic\n accounting system to determine a cost based on repository usage.\n \n\nThe repository loading model determines the amount of repository space used by\n an arbitrary waste stream. Using the economic model in VISION.econ, the arbitrary\n waste stream can be assigned a cost. The cost for the space used is calibrated by\n computing the cost per meter of repository space if spent fuel is directly emplaced. This\n allows for accurate comparison between direct disposal and different recycling schemes.\n The length-based disposal cost accounts for fuel from different fuel types, burnups, and\n High-Level Waste (HLW) with an arbitrary isotope mix.\n\nKey derivatives of this work are an accounting system that can account for the\n repository savings of reprocessing and the ability to compare direct disposal to\n reprocessing with varying separation schemes. From this work, it was determined that the\n current mass-based accounting system for HLW disposal costs can be significantly\n different than the length-based accounting system proposed in this work when advanced\n reprocessing schemes are implemented. Furthermore, this work shows the length-based\n accounting system may be needed to find the disposal cost at which reprocessing is\neconomically equivalent to direct disposal.", + "abstractNote": "", "thesisType": "MS Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2008", - "numPages": "90", - "language": "English", + "numPages": "", + "language": "", "shortTitle": "", "url": "", "accessDate": "", @@ -2750,13 +2918,13 @@ "Y4UI9B4X" ], "relations": {}, - "dateAdded": "2013-11-13T20:43:37Z", - "dateModified": "2020-12-30T15:01:21Z" + "dateAdded": "2016-11-27T22:05:38Z", + "dateModified": "2020-12-30T15:02:01Z" } }, { - "key": "NN84DEPH", - "version": 26426, + "key": "WR6HI27D", + "version": 26428, "library": { "type": "group", "id": 10058, @@ -2770,18 +2938,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/NN84DEPH", + "href": "https://api.zotero.org/groups/10058/items/WR6HI27D", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/NN84DEPH", + "href": "https://www.zotero.org/groups/10058/items/WR6HI27D", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/VQQSC9M8", + "href": "https://api.zotero.org/groups/10058/items/4NHVDMZP", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 3932950 + "attachmentSize": 742105 } }, "meta": { @@ -2796,29 +2964,29 @@ } } }, - "creatorSummary": "Hu", + "creatorSummary": "Grady", "parsedDate": "2008", "numChildren": 3 }, - "bibtex": "\n@phdthesis{hu_coupled_2008,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Coupled {Neutronics}/{Thermal}-hydraulics {Analyses} of {Supercritical} {Water} {Reactor}},\n\tabstract = {The Supercritical Water Reactor (SCWR) is a next generation nuclear reactor concept well known for its system simplification and high thermal efficiency. The current study develops analysis capability for the U.S. reference design by extending existing LWR analysis codes and study SCWR behaviors under steady state, burnup and transient conditions.\n\nAn extended version of PARCS that can analyze SCWR in steady state is developed first. The modified code is used to demonstrate the importance of moderator heating on the neutronics behavior of U.S. SCWR design by simulating an infinite lattice of assemblies. The results show that the moderator heating leads to a more symmetric effective moderator density, and has a significant impact on the axial power shape. From this, sensitivity calculations are performed to show how the assembly performs with\nperturbations in assembly power, mass flow rate, bypass ratio and heat transfer coefficient.\n\nIn order to study transient and flow distribution between assemblies in SCWR, a coupled PARCS/RELAP5 code package specialized for current SCWR design is developed in this study. The variable mapping input file and related subroutines in PARCS are modified to transfer the physical properties of coolant and moderator separately between the coupled codes, and necessary code modification is also done in\nPARCS to automatically perform neutronics feedback based on not only fuel and coolant but also moderator physical properties. A finer data grid in the RELAP5 water table is adopted above the supercritical point to enable the thermal-hydrodynamics simulation in this range. A whole SCWR core model for the coupled PARCS/RELAP5 is established for this study and used in the rest of the study.\n\nFlow reversal in downward flowing moderator channels is discovered in steady state. It is due to the positive flow rate feedback to flow density change necessary for pressure balance. Choosing different orifice sizes based on corresponding assembly powers can prevent the reversal. The comparison of results from the coupled simulations with/without flow reversal shows that the reversed moderator flow introduces a large axial power peak at the bottom of the core and reduces the core reactivity. \n\nA burnup calculation shows that under the current design parameters the reactor cannot sustain criticality for one year, therefore further investigation on burnup is needed. A possible moderator reversal is found during the burnup calculation suggesting that the change in core axial power distribution during burnup should be considered while designing the various orifice sizes to prevent the reversal.\n\nA SCWR system model is developed which adds balance of the plant to the core model. Three transients are studied: loss of feedwater, loss of off-site power and loss of turbine load without scram. The results show that the maximum cladding surface temperatures satisfy the material limit. The location of maximum cladding surface temperature is not in the maximum power assembly. This suggests the normal hot channel analysis method may not applicable to SCWR.\n\nFuture work on sub-channel analysis, achieving full cycle burnup and more safety analyses is proposed.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Hu, Po},\n\tyear = {2008},\n}\n", + "bibtex": "\n@phdthesis{grady_development_2008,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Development of {Economic} {Accounting} for {Nuclear} {Waste} in {Fuel} {Cycle} {Analysis}},\n\tabstract = {This research focuses on the development of an economic model for waste\n entering a repository. This work couples a repository loading model with an economic\n accounting system to determine a cost based on repository usage.\n \n\nThe repository loading model determines the amount of repository space used by\n an arbitrary waste stream. Using the economic model in VISION.econ, the arbitrary\n waste stream can be assigned a cost. The cost for the space used is calibrated by\n computing the cost per meter of repository space if spent fuel is directly emplaced. This\n allows for accurate comparison between direct disposal and different recycling schemes.\n The length-based disposal cost accounts for fuel from different fuel types, burnups, and\n High-Level Waste (HLW) with an arbitrary isotope mix.\n\nKey derivatives of this work are an accounting system that can account for the\n repository savings of reprocessing and the ability to compare direct disposal to\n reprocessing with varying separation schemes. From this work, it was determined that the\n current mass-based accounting system for HLW disposal costs can be significantly\n different than the length-based accounting system proposed in this work when advanced\n reprocessing schemes are implemented. Furthermore, this work shows the length-based\n accounting system may be needed to find the disposal cost at which reprocessing is\neconomically equivalent to direct disposal.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Grady, Ryan},\n\tyear = {2008},\n}\n", "data": { - "key": "NN84DEPH", - "version": 26426, + "key": "WR6HI27D", + "version": 26428, "itemType": "thesis", - "title": "Coupled Neutronics/Thermal-hydraulics Analyses of Supercritical Water Reactor", + "title": "Development of Economic Accounting for Nuclear Waste in Fuel Cycle Analysis", "creators": [ { "creatorType": "author", - "firstName": "Po", - "lastName": "Hu" + "firstName": "Ryan", + "lastName": "Grady" } ], - "abstractNote": "The Supercritical Water Reactor (SCWR) is a next generation nuclear reactor concept well known for its system simplification and high thermal efficiency. The current study develops analysis capability for the U.S. reference design by extending existing LWR analysis codes and study SCWR behaviors under steady state, burnup and transient conditions.\n\nAn extended version of PARCS that can analyze SCWR in steady state is developed first. The modified code is used to demonstrate the importance of moderator heating on the neutronics behavior of U.S. SCWR design by simulating an infinite lattice of assemblies. The results show that the moderator heating leads to a more symmetric effective moderator density, and has a significant impact on the axial power shape. From this, sensitivity calculations are performed to show how the assembly performs with\nperturbations in assembly power, mass flow rate, bypass ratio and heat transfer coefficient.\n\nIn order to study transient and flow distribution between assemblies in SCWR, a coupled PARCS/RELAP5 code package specialized for current SCWR design is developed in this study. The variable mapping input file and related subroutines in PARCS are modified to transfer the physical properties of coolant and moderator separately between the coupled codes, and necessary code modification is also done in\nPARCS to automatically perform neutronics feedback based on not only fuel and coolant but also moderator physical properties. A finer data grid in the RELAP5 water table is adopted above the supercritical point to enable the thermal-hydrodynamics simulation in this range. A whole SCWR core model for the coupled PARCS/RELAP5 is established for this study and used in the rest of the study.\n\nFlow reversal in downward flowing moderator channels is discovered in steady state. It is due to the positive flow rate feedback to flow density change necessary for pressure balance. Choosing different orifice sizes based on corresponding assembly powers can prevent the reversal. The comparison of results from the coupled simulations with/without flow reversal shows that the reversed moderator flow introduces a large axial power peak at the bottom of the core and reduces the core reactivity. \n\nA burnup calculation shows that under the current design parameters the reactor cannot sustain criticality for one year, therefore further investigation on burnup is needed. A possible moderator reversal is found during the burnup calculation suggesting that the change in core axial power distribution during burnup should be considered while designing the various orifice sizes to prevent the reversal.\n\nA SCWR system model is developed which adds balance of the plant to the core model. Three transients are studied: loss of feedwater, loss of off-site power and loss of turbine load without scram. The results show that the maximum cladding surface temperatures satisfy the material limit. The location of maximum cladding surface temperature is not in the maximum power assembly. This suggests the normal hot channel analysis method may not applicable to SCWR.\n\nFuture work on sub-channel analysis, achieving full cycle burnup and more safety analyses is proposed.", - "thesisType": "PhD Nuclear Engineering and Engineering Physics", + "abstractNote": "This research focuses on the development of an economic model for waste\n entering a repository. This work couples a repository loading model with an economic\n accounting system to determine a cost based on repository usage.\n \n\nThe repository loading model determines the amount of repository space used by\n an arbitrary waste stream. Using the economic model in VISION.econ, the arbitrary\n waste stream can be assigned a cost. The cost for the space used is calibrated by\n computing the cost per meter of repository space if spent fuel is directly emplaced. This\n allows for accurate comparison between direct disposal and different recycling schemes.\n The length-based disposal cost accounts for fuel from different fuel types, burnups, and\n High-Level Waste (HLW) with an arbitrary isotope mix.\n\nKey derivatives of this work are an accounting system that can account for the\n repository savings of reprocessing and the ability to compare direct disposal to\n reprocessing with varying separation schemes. From this work, it was determined that the\n current mass-based accounting system for HLW disposal costs can be significantly\n different than the length-based accounting system proposed in this work when advanced\n reprocessing schemes are implemented. Furthermore, this work shows the length-based\n accounting system may be needed to find the disposal cost at which reprocessing is\neconomically equivalent to direct disposal.", + "thesisType": "MS Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2008", - "numPages": "125", + "numPages": "90", "language": "English", "shortTitle": "", "url": "", @@ -2832,16 +3000,16 @@ "tags": [], "collections": [ "6259B6TV", - "34I86HPD" + "Y4UI9B4X" ], "relations": {}, - "dateAdded": "2013-11-13T21:31:58Z", - "dateModified": "2013-11-13T21:35:23Z" + "dateAdded": "2013-11-13T20:43:37Z", + "dateModified": "2020-12-30T15:01:21Z" } }, { - "key": "EGFFCEWU", - "version": 26429, + "key": "FWHZB9AQ", + "version": 30698, "library": { "type": "group", "id": 10058, @@ -2855,18 +3023,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/EGFFCEWU", + "href": "https://api.zotero.org/groups/10058/items/FWHZB9AQ", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/EGFFCEWU", + "href": "https://www.zotero.org/groups/10058/items/FWHZB9AQ", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/7CUDCCZX", + "href": "https://api.zotero.org/groups/10058/items/W5ZG6RTT", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 3388126 + "attachmentSize": 5162297 } }, "meta": { @@ -2881,32 +3049,32 @@ } } }, - "creatorSummary": "Radel", + "creatorSummary": "Edwards", "parsedDate": "2007", - "numChildren": 2 + "numChildren": 3 }, - "bibtex": "\n@phdthesis{radel_repository_2007,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Repository {Modeling} for {Fuel} {Cycle} {Scenario} {Analysis}},\n\tabstract = {This research is focused on developing a model to determine repository loading for an arbitrary isotopic vector based on thermal heat loads for Yucca Mountain. The model will be implemented into a fuel cycle scenario analysis code to investigate repository benefit of various fuel cycles from an integrated systems perspective.\n\nThree limiting temperature cases were previously identified from dose limits on the repository: the drift wall at emplacement and closure must remain below 200 °C and the mid- drift point must remain below 96 °C at all times. Based on a pre-existing detailed thermal model of Yucca Mountain, streamlined models were developed for these limiting cases, each with a functional form that captures the appropriate transient effects. The emplacement limit was dependent on the initial heat load as well as the rate at which the heat load was changing. The closure limit was approximated by a constant heat load limit, as the decay heat does not change rapidly near the time of closure. The model for the mid-drift limit uses superposition of individual isotope contributions to the mid-drift temperature rather than decay heat values.\n\nImplementation in the VISION systems analysis code, offers a powerful tool for studying the effects of an intergraded fuel cycle on repository loading values. A complete repository loading model has never been coupled with a fuel cycle systems code. Effects of delays in the fuel cycle, changes in separation processes, variations in reactor combinations, and other dynamic fuel cycle parameters can now be investigated using this model. \n\nResults discussed in this paper show that an increase in separation efficiency above 0.2\\% would have less than a 1\\% impact on repository loading. However, separation of Cs and Sr into an alternate waste steam results in increased loading of 285 times over a traditional once through cycle for some fuel cycle scenarios. The ability to have a varying time until closure in the systems model also shows a significant impact, reducing the benefit over a once through cycle from 5 times to 2.4 times because of temperature limits at closure.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Radel, Tracy E.},\n\tyear = {2007},\n}\n", + "bibtex": "\n@phdthesis{edwards_determination_2007,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Determination of {Pure} {Neutron} {Radiolysis} {Yields} for use in {Chemical} {Modeling} of {Supercritical} {Water}},\n\turl = {https://www.proquest.com/pqdtglobal/docview/304770212/abstract/CAA0CBD252A34BB8PQ/7?sourcetype=Dissertations%20&%20Theses},\n\tabstract = {This work has determined pure neutron radical yields at elevated temperature and pressure up to supercritical conditions using a reactor core radiation. The data will be necessary to provides realistic conditions for material corrosion experiments for the supercritical water reactor (SCWR) through water chemistry modeling. The work has been performed at the University of Wisconsin Nuclear Reactor using an apparatus designed to transport supercritical water near the reactor core. Low LET yield data used\nin the experiment was provided by a similar project at the Notre Dame Radiation Lab.\n\nRadicals formed by radiolysis were measured through chemical scavenging reactions. The aqueous electron was measured by two methods, a reaction with N2O to produce molecular nitrogen and a reaction with SF6 to produce fluoride ions. The hydrogen radical was measured through a reaction with ethanol-D6 (CD3CD2OD) to form HD. Molecular hydrogen was measured directly. Gaseous products were measured with a mass spectrometer and ions were measured with an ion selective electrode. Radiation\nenergy deposition was calibrated for neutron and gamma radiation separately with a neutron activation analysis and a radiolysis experiment. Pure neutron yields were calculated by subtracting gamma contribution using the calibrated gamma energy deposition and yield results from work at the Notre Dame Radiation Laboratory.\n\nPure neutron yields have been experimentally determined for aqueous electrons from 25o to 400o C at 248 bar and for the hydrogen radical from 25o C to 350o C at 248 bar. Isothermal data has been acquired for the aqueous electron at 380o C and 400o C as a function of density. Molecular hydrogen yields were measured as a function of temperature and pressure, although there was evidence that chemical reactions with the walls of the water tubing were creating molecular hydrogen in addition to that formed\nthrough radiolysis. Critical hydrogen concentration behavior was investigated but a final result was not determined because a measurable oxygen yield was not seen at the outlet of the radiolysis loop.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Edwards, Eric J.},\n\tyear = {2007},\n}\n", "data": { - "key": "EGFFCEWU", - "version": 26429, + "key": "FWHZB9AQ", + "version": 30698, "itemType": "thesis", - "title": "Repository Modeling for Fuel Cycle Scenario Analysis", + "title": "Determination of Pure Neutron Radiolysis Yields for use in Chemical Modeling of Supercritical Water", "creators": [ { "creatorType": "author", - "firstName": "Tracy E.", - "lastName": "Radel" + "firstName": "Eric J.", + "lastName": "Edwards" } ], - "abstractNote": "This research is focused on developing a model to determine repository loading for an arbitrary isotopic vector based on thermal heat loads for Yucca Mountain. The model will be implemented into a fuel cycle scenario analysis code to investigate repository benefit of various fuel cycles from an integrated systems perspective.\n\nThree limiting temperature cases were previously identified from dose limits on the repository: the drift wall at emplacement and closure must remain below 200 °C and the mid- drift point must remain below 96 °C at all times. Based on a pre-existing detailed thermal model of Yucca Mountain, streamlined models were developed for these limiting cases, each with a functional form that captures the appropriate transient effects. The emplacement limit was dependent on the initial heat load as well as the rate at which the heat load was changing. The closure limit was approximated by a constant heat load limit, as the decay heat does not change rapidly near the time of closure. The model for the mid-drift limit uses superposition of individual isotope contributions to the mid-drift temperature rather than decay heat values.\n\nImplementation in the VISION systems analysis code, offers a powerful tool for studying the effects of an intergraded fuel cycle on repository loading values. A complete repository loading model has never been coupled with a fuel cycle systems code. Effects of delays in the fuel cycle, changes in separation processes, variations in reactor combinations, and other dynamic fuel cycle parameters can now be investigated using this model. \n\nResults discussed in this paper show that an increase in separation efficiency above 0.2% would have less than a 1% impact on repository loading. However, separation of Cs and Sr into an alternate waste steam results in increased loading of 285 times over a traditional once through cycle for some fuel cycle scenarios. The ability to have a varying time until closure in the systems model also shows a significant impact, reducing the benefit over a once through cycle from 5 times to 2.4 times because of temperature limits at closure.", - "thesisType": "MS Nuclear Engineering and Engineering Physics", + "abstractNote": "This work has determined pure neutron radical yields at elevated temperature and pressure up to supercritical conditions using a reactor core radiation. The data will be necessary to provides realistic conditions for material corrosion experiments for the supercritical water reactor (SCWR) through water chemistry modeling. The work has been performed at the University of Wisconsin Nuclear Reactor using an apparatus designed to transport supercritical water near the reactor core. Low LET yield data used\nin the experiment was provided by a similar project at the Notre Dame Radiation Lab.\n\nRadicals formed by radiolysis were measured through chemical scavenging reactions. The aqueous electron was measured by two methods, a reaction with N2O to produce molecular nitrogen and a reaction with SF6 to produce fluoride ions. The hydrogen radical was measured through a reaction with ethanol-D6 (CD3CD2OD) to form HD. Molecular hydrogen was measured directly. Gaseous products were measured with a mass spectrometer and ions were measured with an ion selective electrode. Radiation\nenergy deposition was calibrated for neutron and gamma radiation separately with a neutron activation analysis and a radiolysis experiment. Pure neutron yields were calculated by subtracting gamma contribution using the calibrated gamma energy deposition and yield results from work at the Notre Dame Radiation Laboratory.\n\nPure neutron yields have been experimentally determined for aqueous electrons from 25o to 400o C at 248 bar and for the hydrogen radical from 25o C to 350o C at 248 bar. Isothermal data has been acquired for the aqueous electron at 380o C and 400o C as a function of density. Molecular hydrogen yields were measured as a function of temperature and pressure, although there was evidence that chemical reactions with the walls of the water tubing were creating molecular hydrogen in addition to that formed\nthrough radiolysis. Critical hydrogen concentration behavior was investigated but a final result was not determined because a measurable oxygen yield was not seen at the outlet of the radiolysis loop.", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2007", - "numPages": "92", + "numPages": "195", "language": "English", "shortTitle": "", - "url": "", + "url": "https://www.proquest.com/pqdtglobal/docview/304770212/abstract/CAA0CBD252A34BB8PQ/7?sourcetype=Dissertations%20&%20Theses", "accessDate": "", "archive": "", "archiveLocation": "", @@ -2917,16 +3085,16 @@ "tags": [], "collections": [ "6259B6TV", - "Y4UI9B4X" + "34I86HPD" ], "relations": {}, - "dateAdded": "2013-11-13T21:25:19Z", - "dateModified": "2020-12-30T15:01:13Z" + "dateAdded": "2013-11-13T21:28:42Z", + "dateModified": "2024-09-08T17:02:52Z" } }, { - "key": "G79X7K4R", - "version": 26428, + "key": "XAF9XZUK", + "version": 30693, "library": { "type": "group", "id": 10058, @@ -2940,18 +3108,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/G79X7K4R", + "href": "https://api.zotero.org/groups/10058/items/XAF9XZUK", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/G79X7K4R", + "href": "https://www.zotero.org/groups/10058/items/XAF9XZUK", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/8ICA7SW4", + "href": "https://api.zotero.org/groups/10058/items/QPWAF6EP", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 906940 + "attachmentSize": 1132998 } }, "meta": { @@ -2966,32 +3134,32 @@ } } }, - "creatorSummary": "Setter", + "creatorSummary": "Phruksarojanakun", "parsedDate": "2007", - "numChildren": 3 + "numChildren": 1 }, - "bibtex": "\n@phdthesis{setter_neutrongamma_2007,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Neutron/{Gamma} {Mixed} {Spectrum} {Radiolysis}-{Based} {Aqueous} {Dosimetry}},\n\tabstract = {This work develops a method to use an aqueous dosimeter in a mixed radiation field to determine separate measurements of neutron and gamma dose. Based on radiolysis of both Fricke and Methyl Viologen (MV) solutions, activation analysis and reactor simulation are combined to determine neutron dose and neutron radiolysis. This is subtracted from the total measured radiolysis to infer a gamma dose. The Fricke dosimeter was able to give repeatable results for the neutron and gamma doses over a number of days for a variety of shielding configurations. Impurities in the MV dosimeter prevented it from providing repeatable results, but qualitative comparison to the Fricke dosimeter indicated that it could be a viable approach. The method found that the reactor simulation, using MCNP5, can be used for accurate neutron simulations but does not account for all the source terms for gamma dose simulation. A neutron G-value for the Fricke dosimeter was developed by combining proton radiolysis simulations with results from MCNP5 and NJOY.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Setter, Timothy},\n\tyear = {2007},\n}\n", + "bibtex": "\n@phdthesis{phruksarojanakun_monte_2007,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Monte {Carlo} {Isotopic} {Inventory} {Analysis} for {Complex} {Nuclear} {Systems}},\n\turl = {https://www.proquest.com/pqdtglobal/docview/304779499/abstract/EFCC586CE33A4BFFPQ/1?sourcetype=Dissertations%20&%20Theses},\n\tabstract = {Monte Carlo Inventory Simulation Engine or MCise is a newly developed method for calculating isotopic inventory of materials. The method offers the promise of modeling materials with complex processes and irradiation histories, which pose challenges for current deterministic tools. Monte Carlo techniques based on following the history of individual atoms allows those atoms to follow randomly determined flow paths, enter or leave the system at arbitrary locations, and be subjected to radiation or chemical processes at different points in the flow path.\n\nThe method has strong analogies to Monte Carlo neutral particle transport. The fundamental of analog method is fully developed, including considerations for simple, complex and loop flows. The validity of the analog method is demonstrated with test problems under various flow conditions. The method reproduces the results of a deterministic inventory code for\ncomparable problems. While a successful and efficient parallel implementation has permitted an inexpensive way to improve statistical precision by increasing the number of sampled atoms, this approach does not always provide the most efficient avenue for improvement. Therefore, six variance reduction tools are implemented as alternatives to improve precision\nof Monte Carlo simulations. Forced Reaction is designed to force an atom to undergo a predefined number of reactions in a given irradiation environment. Biased Reaction Branching is primarily focused on improving statistical results of the isotopes that are produced from rare reaction pathways. Biased Source Sampling is aimed at increasing frequencies of sampling rare initial isotopes as the starting particles. Reaction Path Splitting increases the population by splitting the atom at each reaction point, creating one new atom for each decay or transmutation product. Delta Tracking is recommended for a high-frequency pulsing to greatly reduce the computing time. Lastly, Weight Window is introduced as a strategy to decrease large deviations of weight due to the uses of variance reduction techniques.\n\nA figure of merit is necessary to evaluate the efficiency of a variance reduction technique. A number of possibilities for the figure of merit are explored, two of which offer robust figures of merit. One figure of merit is based on the relative error of a known target isotope (1/R2 T ) and another on the overall detection limit corrected by the relative error (1/Dk R2 T ). An\nautomated Adaptive Variance-reduction Adjustment (AVA) tool is developed to iteratively define necessary parameters for some variance reduction techniques in a problem with a target isotope. Initial sample problems demonstrate that AVA improves both precision and accuracy of a target result in an efficient manner.\n\nPotential applications of MCise include molten salt fueled reactors and liquid breeders in fusion blankets. As an example, the inventory analysis of an actinide fluoride eutectic liquid fuel in the In-Zinerator, a sub-critical power reactor driven by a fusion source, is examined using MCise. The result reassures MCise as a reliable tool for inventory analysis of complex nuclear systems.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Phruksarojanakun, Phiphat},\n\tyear = {2007},\n}\n", "data": { - "key": "G79X7K4R", - "version": 26428, + "key": "XAF9XZUK", + "version": 30693, "itemType": "thesis", - "title": "Neutron/Gamma Mixed Spectrum Radiolysis-Based Aqueous Dosimetry", + "title": "Monte Carlo Isotopic Inventory Analysis for Complex Nuclear Systems", "creators": [ { "creatorType": "author", - "firstName": "Timothy", - "lastName": "Setter" + "firstName": "Phiphat", + "lastName": "Phruksarojanakun" } ], - "abstractNote": "This work develops a method to use an aqueous dosimeter in a mixed radiation field to determine separate measurements of neutron and gamma dose. Based on radiolysis of both Fricke and Methyl Viologen (MV) solutions, activation analysis and reactor simulation are combined to determine neutron dose and neutron radiolysis. This is subtracted from the total measured radiolysis to infer a gamma dose. The Fricke dosimeter was able to give repeatable results for the neutron and gamma doses over a number of days for a variety of shielding configurations. Impurities in the MV dosimeter prevented it from providing repeatable results, but qualitative comparison to the Fricke dosimeter indicated that it could be a viable approach. The method found that the reactor simulation, using MCNP5, can be used for accurate neutron simulations but does not account for all the source terms for gamma dose simulation. A neutron G-value for the Fricke dosimeter was developed by combining proton radiolysis simulations with results from MCNP5 and NJOY.", - "thesisType": "MS Nuclear Engineering and Engineering Physics", + "abstractNote": "Monte Carlo Inventory Simulation Engine or MCise is a newly developed method for calculating isotopic inventory of materials. The method offers the promise of modeling materials with complex processes and irradiation histories, which pose challenges for current deterministic tools. Monte Carlo techniques based on following the history of individual atoms allows those atoms to follow randomly determined flow paths, enter or leave the system at arbitrary locations, and be subjected to radiation or chemical processes at different points in the flow path.\n\nThe method has strong analogies to Monte Carlo neutral particle transport. The fundamental of analog method is fully developed, including considerations for simple, complex and loop flows. The validity of the analog method is demonstrated with test problems under various flow conditions. The method reproduces the results of a deterministic inventory code for\ncomparable problems. While a successful and efficient parallel implementation has permitted an inexpensive way to improve statistical precision by increasing the number of sampled atoms, this approach does not always provide the most efficient avenue for improvement. Therefore, six variance reduction tools are implemented as alternatives to improve precision\nof Monte Carlo simulations. Forced Reaction is designed to force an atom to undergo a predefined number of reactions in a given irradiation environment. Biased Reaction Branching is primarily focused on improving statistical results of the isotopes that are produced from rare reaction pathways. Biased Source Sampling is aimed at increasing frequencies of sampling rare initial isotopes as the starting particles. Reaction Path Splitting increases the population by splitting the atom at each reaction point, creating one new atom for each decay or transmutation product. Delta Tracking is recommended for a high-frequency pulsing to greatly reduce the computing time. Lastly, Weight Window is introduced as a strategy to decrease large deviations of weight due to the uses of variance reduction techniques.\n\nA figure of merit is necessary to evaluate the efficiency of a variance reduction technique. A number of possibilities for the figure of merit are explored, two of which offer robust figures of merit. One figure of merit is based on the relative error of a known target isotope (1/R2 T ) and another on the overall detection limit corrected by the relative error (1/Dk R2 T ). An\nautomated Adaptive Variance-reduction Adjustment (AVA) tool is developed to iteratively define necessary parameters for some variance reduction techniques in a problem with a target isotope. Initial sample problems demonstrate that AVA improves both precision and accuracy of a target result in an efficient manner.\n\nPotential applications of MCise include molten salt fueled reactors and liquid breeders in fusion blankets. As an example, the inventory analysis of an actinide fluoride eutectic liquid fuel in the In-Zinerator, a sub-critical power reactor driven by a fusion source, is examined using MCise. The result reassures MCise as a reliable tool for inventory analysis of complex nuclear systems.", + "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2007", - "numPages": "114", + "numPages": "154", "language": "English", "shortTitle": "", - "url": "", + "url": "https://www.proquest.com/pqdtglobal/docview/304779499/abstract/EFCC586CE33A4BFFPQ/1?sourcetype=Dissertations%20&%20Theses", "accessDate": "", "archive": "", "archiveLocation": "", @@ -3002,16 +3170,16 @@ "tags": [], "collections": [ "6259B6TV", - "Y4UI9B4X" + "34I86HPD" ], "relations": {}, - "dateAdded": "2013-11-13T20:49:25Z", - "dateModified": "2020-12-30T15:01:18Z" + "dateAdded": "2013-11-16T20:04:30Z", + "dateModified": "2024-09-08T17:01:40Z" } }, { - "key": "FWHZB9AQ", - "version": 26426, + "key": "EGFFCEWU", + "version": 26429, "library": { "type": "group", "id": 10058, @@ -3025,18 +3193,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/FWHZB9AQ", + "href": "https://api.zotero.org/groups/10058/items/EGFFCEWU", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/FWHZB9AQ", + "href": "https://www.zotero.org/groups/10058/items/EGFFCEWU", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/W5ZG6RTT", + "href": "https://api.zotero.org/groups/10058/items/7CUDCCZX", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 5162297 + "attachmentSize": 3388126 } }, "meta": { @@ -3051,29 +3219,29 @@ } } }, - "creatorSummary": "Edwards", + "creatorSummary": "Radel", "parsedDate": "2007", - "numChildren": 3 + "numChildren": 2 }, - "bibtex": "\n@phdthesis{edwards_determination_2007,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Determination of {Pure} {Neutron} {Radiolysis} {Yields} for use in {Chemical} {Modeling} of {Supercritical} {Water}},\n\tabstract = {This work has determined pure neutron radical yields at elevated temperature and pressure up to supercritical conditions using a reactor core radiation. The data will be necessary to provides realistic conditions for material corrosion experiments for the supercritical water reactor (SCWR) through water chemistry modeling. The work has been performed at the University of Wisconsin Nuclear Reactor using an apparatus designed to transport supercritical water near the reactor core. Low LET yield data used\nin the experiment was provided by a similar project at the Notre Dame Radiation Lab.\n\nRadicals formed by radiolysis were measured through chemical scavenging reactions. The aqueous electron was measured by two methods, a reaction with N2O to produce molecular nitrogen and a reaction with SF6 to produce fluoride ions. The hydrogen radical was measured through a reaction with ethanol-D6 (CD3CD2OD) to form HD. Molecular hydrogen was measured directly. Gaseous products were measured with a mass spectrometer and ions were measured with an ion selective electrode. Radiation\nenergy deposition was calibrated for neutron and gamma radiation separately with a neutron activation analysis and a radiolysis experiment. Pure neutron yields were calculated by subtracting gamma contribution using the calibrated gamma energy deposition and yield results from work at the Notre Dame Radiation Laboratory.\n\nPure neutron yields have been experimentally determined for aqueous electrons from 25o to 400o C at 248 bar and for the hydrogen radical from 25o C to 350o C at 248 bar. Isothermal data has been acquired for the aqueous electron at 380o C and 400o C as a function of density. Molecular hydrogen yields were measured as a function of temperature and pressure, although there was evidence that chemical reactions with the walls of the water tubing were creating molecular hydrogen in addition to that formed\nthrough radiolysis. Critical hydrogen concentration behavior was investigated but a final result was not determined because a measurable oxygen yield was not seen at the outlet of the radiolysis loop.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Edwards, Eric J.},\n\tyear = {2007},\n}\n", + "bibtex": "\n@phdthesis{radel_repository_2007,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Repository {Modeling} for {Fuel} {Cycle} {Scenario} {Analysis}},\n\tabstract = {This research is focused on developing a model to determine repository loading for an arbitrary isotopic vector based on thermal heat loads for Yucca Mountain. The model will be implemented into a fuel cycle scenario analysis code to investigate repository benefit of various fuel cycles from an integrated systems perspective.\n\nThree limiting temperature cases were previously identified from dose limits on the repository: the drift wall at emplacement and closure must remain below 200 °C and the mid- drift point must remain below 96 °C at all times. Based on a pre-existing detailed thermal model of Yucca Mountain, streamlined models were developed for these limiting cases, each with a functional form that captures the appropriate transient effects. The emplacement limit was dependent on the initial heat load as well as the rate at which the heat load was changing. The closure limit was approximated by a constant heat load limit, as the decay heat does not change rapidly near the time of closure. The model for the mid-drift limit uses superposition of individual isotope contributions to the mid-drift temperature rather than decay heat values.\n\nImplementation in the VISION systems analysis code, offers a powerful tool for studying the effects of an intergraded fuel cycle on repository loading values. A complete repository loading model has never been coupled with a fuel cycle systems code. Effects of delays in the fuel cycle, changes in separation processes, variations in reactor combinations, and other dynamic fuel cycle parameters can now be investigated using this model. \n\nResults discussed in this paper show that an increase in separation efficiency above 0.2\\% would have less than a 1\\% impact on repository loading. However, separation of Cs and Sr into an alternate waste steam results in increased loading of 285 times over a traditional once through cycle for some fuel cycle scenarios. The ability to have a varying time until closure in the systems model also shows a significant impact, reducing the benefit over a once through cycle from 5 times to 2.4 times because of temperature limits at closure.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Radel, Tracy E.},\n\tyear = {2007},\n}\n", "data": { - "key": "FWHZB9AQ", - "version": 26426, + "key": "EGFFCEWU", + "version": 26429, "itemType": "thesis", - "title": "Determination of Pure Neutron Radiolysis Yields for use in Chemical Modeling of Supercritical Water", + "title": "Repository Modeling for Fuel Cycle Scenario Analysis", "creators": [ { "creatorType": "author", - "firstName": "Eric J.", - "lastName": "Edwards" + "firstName": "Tracy E.", + "lastName": "Radel" } ], - "abstractNote": "This work has determined pure neutron radical yields at elevated temperature and pressure up to supercritical conditions using a reactor core radiation. The data will be necessary to provides realistic conditions for material corrosion experiments for the supercritical water reactor (SCWR) through water chemistry modeling. The work has been performed at the University of Wisconsin Nuclear Reactor using an apparatus designed to transport supercritical water near the reactor core. Low LET yield data used\nin the experiment was provided by a similar project at the Notre Dame Radiation Lab.\n\nRadicals formed by radiolysis were measured through chemical scavenging reactions. The aqueous electron was measured by two methods, a reaction with N2O to produce molecular nitrogen and a reaction with SF6 to produce fluoride ions. The hydrogen radical was measured through a reaction with ethanol-D6 (CD3CD2OD) to form HD. Molecular hydrogen was measured directly. Gaseous products were measured with a mass spectrometer and ions were measured with an ion selective electrode. Radiation\nenergy deposition was calibrated for neutron and gamma radiation separately with a neutron activation analysis and a radiolysis experiment. Pure neutron yields were calculated by subtracting gamma contribution using the calibrated gamma energy deposition and yield results from work at the Notre Dame Radiation Laboratory.\n\nPure neutron yields have been experimentally determined for aqueous electrons from 25o to 400o C at 248 bar and for the hydrogen radical from 25o C to 350o C at 248 bar. Isothermal data has been acquired for the aqueous electron at 380o C and 400o C as a function of density. Molecular hydrogen yields were measured as a function of temperature and pressure, although there was evidence that chemical reactions with the walls of the water tubing were creating molecular hydrogen in addition to that formed\nthrough radiolysis. Critical hydrogen concentration behavior was investigated but a final result was not determined because a measurable oxygen yield was not seen at the outlet of the radiolysis loop.", - "thesisType": "PhD Nuclear Engineering and Engineering Physics", + "abstractNote": "This research is focused on developing a model to determine repository loading for an arbitrary isotopic vector based on thermal heat loads for Yucca Mountain. The model will be implemented into a fuel cycle scenario analysis code to investigate repository benefit of various fuel cycles from an integrated systems perspective.\n\nThree limiting temperature cases were previously identified from dose limits on the repository: the drift wall at emplacement and closure must remain below 200 °C and the mid- drift point must remain below 96 °C at all times. Based on a pre-existing detailed thermal model of Yucca Mountain, streamlined models were developed for these limiting cases, each with a functional form that captures the appropriate transient effects. The emplacement limit was dependent on the initial heat load as well as the rate at which the heat load was changing. The closure limit was approximated by a constant heat load limit, as the decay heat does not change rapidly near the time of closure. The model for the mid-drift limit uses superposition of individual isotope contributions to the mid-drift temperature rather than decay heat values.\n\nImplementation in the VISION systems analysis code, offers a powerful tool for studying the effects of an intergraded fuel cycle on repository loading values. A complete repository loading model has never been coupled with a fuel cycle systems code. Effects of delays in the fuel cycle, changes in separation processes, variations in reactor combinations, and other dynamic fuel cycle parameters can now be investigated using this model. \n\nResults discussed in this paper show that an increase in separation efficiency above 0.2% would have less than a 1% impact on repository loading. However, separation of Cs and Sr into an alternate waste steam results in increased loading of 285 times over a traditional once through cycle for some fuel cycle scenarios. The ability to have a varying time until closure in the systems model also shows a significant impact, reducing the benefit over a once through cycle from 5 times to 2.4 times because of temperature limits at closure.", + "thesisType": "MS Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2007", - "numPages": "195", + "numPages": "92", "language": "English", "shortTitle": "", "url": "", @@ -3087,16 +3255,16 @@ "tags": [], "collections": [ "6259B6TV", - "34I86HPD" + "Y4UI9B4X" ], "relations": {}, - "dateAdded": "2013-11-13T21:28:42Z", - "dateModified": "2013-11-13T21:31:49Z" + "dateAdded": "2013-11-13T21:25:19Z", + "dateModified": "2020-12-30T15:01:13Z" } }, { - "key": "XAF9XZUK", - "version": 26426, + "key": "G79X7K4R", + "version": 26428, "library": { "type": "group", "id": 10058, @@ -3110,18 +3278,18 @@ }, "links": { "self": { - "href": "https://api.zotero.org/groups/10058/items/XAF9XZUK", + "href": "https://api.zotero.org/groups/10058/items/G79X7K4R", "type": "application/json" }, "alternate": { - "href": "https://www.zotero.org/groups/10058/items/XAF9XZUK", + "href": "https://www.zotero.org/groups/10058/items/G79X7K4R", "type": "text/html" }, "attachment": { - "href": "https://api.zotero.org/groups/10058/items/QPWAF6EP", + "href": "https://api.zotero.org/groups/10058/items/8ICA7SW4", "type": "application/json", "attachmentType": "application/pdf", - "attachmentSize": 1132998 + "attachmentSize": 906940 } }, "meta": { @@ -3136,29 +3304,29 @@ } } }, - "creatorSummary": "Phruksarojanakun", + "creatorSummary": "Setter", "parsedDate": "2007", - "numChildren": 1 + "numChildren": 3 }, - "bibtex": "\n@phdthesis{phruksarojanakun_monte_2007,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Monte {Carlo} {Isotopic} {Inventory} {Analysis} for {Complex} {Nuclear} {Systems}},\n\tabstract = {Monte Carlo Inventory Simulation Engine or MCise is a newly developed method for calculating isotopic inventory of materials. The method offers the promise of modeling materials with complex processes and irradiation histories, which pose challenges for current deterministic tools. Monte Carlo techniques based on following the history of individual atoms allows those atoms to follow randomly determined flow paths, enter or leave the system at arbitrary locations, and be subjected to radiation or chemical processes at different points in the flow path.\n\nThe method has strong analogies to Monte Carlo neutral particle transport. The fundamental of analog method is fully developed, including considerations for simple, complex and loop flows. The validity of the analog method is demonstrated with test problems under various flow conditions. The method reproduces the results of a deterministic inventory code for\ncomparable problems. While a successful and efficient parallel implementation has permitted an inexpensive way to improve statistical precision by increasing the number of sampled atoms, this approach does not always provide the most efficient avenue for improvement. Therefore, six variance reduction tools are implemented as alternatives to improve precision\nof Monte Carlo simulations. Forced Reaction is designed to force an atom to undergo a predefined number of reactions in a given irradiation environment. Biased Reaction Branching is primarily focused on improving statistical results of the isotopes that are produced from rare reaction pathways. Biased Source Sampling is aimed at increasing frequencies of sampling rare initial isotopes as the starting particles. Reaction Path Splitting increases the population by splitting the atom at each reaction point, creating one new atom for each decay or transmutation product. Delta Tracking is recommended for a high-frequency pulsing to greatly reduce the computing time. Lastly, Weight Window is introduced as a strategy to decrease large deviations of weight due to the uses of variance reduction techniques.\n\nA figure of merit is necessary to evaluate the efficiency of a variance reduction technique. A number of possibilities for the figure of merit are explored, two of which offer robust figures of merit. One figure of merit is based on the relative error of a known target isotope (1/R2 T ) and another on the overall detection limit corrected by the relative error (1/Dk R2 T ). An\nautomated Adaptive Variance-reduction Adjustment (AVA) tool is developed to iteratively define necessary parameters for some variance reduction techniques in a problem with a target isotope. Initial sample problems demonstrate that AVA improves both precision and accuracy of a target result in an efficient manner.\n\nPotential applications of MCise include molten salt fueled reactors and liquid breeders in fusion blankets. As an example, the inventory analysis of an actinide fluoride eutectic liquid fuel in the In-Zinerator, a sub-critical power reactor driven by a fusion source, is examined using MCise. The result reassures MCise as a reliable tool for inventory analysis of complex nuclear systems.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Phruksarojanakun, Phiphat},\n\tyear = {2007},\n}\n", + "bibtex": "\n@phdthesis{setter_neutrongamma_2007,\n\taddress = {Madison, WI, United States},\n\ttype = {{MS} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {Neutron/{Gamma} {Mixed} {Spectrum} {Radiolysis}-{Based} {Aqueous} {Dosimetry}},\n\tabstract = {This work develops a method to use an aqueous dosimeter in a mixed radiation field to determine separate measurements of neutron and gamma dose. Based on radiolysis of both Fricke and Methyl Viologen (MV) solutions, activation analysis and reactor simulation are combined to determine neutron dose and neutron radiolysis. This is subtracted from the total measured radiolysis to infer a gamma dose. The Fricke dosimeter was able to give repeatable results for the neutron and gamma doses over a number of days for a variety of shielding configurations. Impurities in the MV dosimeter prevented it from providing repeatable results, but qualitative comparison to the Fricke dosimeter indicated that it could be a viable approach. The method found that the reactor simulation, using MCNP5, can be used for accurate neutron simulations but does not account for all the source terms for gamma dose simulation. A neutron G-value for the Fricke dosimeter was developed by combining proton radiolysis simulations with results from MCNP5 and NJOY.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Setter, Timothy},\n\tyear = {2007},\n}\n", "data": { - "key": "XAF9XZUK", - "version": 26426, + "key": "G79X7K4R", + "version": 26428, "itemType": "thesis", - "title": "Monte Carlo Isotopic Inventory Analysis for Complex Nuclear Systems", + "title": "Neutron/Gamma Mixed Spectrum Radiolysis-Based Aqueous Dosimetry", "creators": [ { "creatorType": "author", - "firstName": "Phiphat", - "lastName": "Phruksarojanakun" + "firstName": "Timothy", + "lastName": "Setter" } ], - "abstractNote": "Monte Carlo Inventory Simulation Engine or MCise is a newly developed method for calculating isotopic inventory of materials. The method offers the promise of modeling materials with complex processes and irradiation histories, which pose challenges for current deterministic tools. Monte Carlo techniques based on following the history of individual atoms allows those atoms to follow randomly determined flow paths, enter or leave the system at arbitrary locations, and be subjected to radiation or chemical processes at different points in the flow path.\n\nThe method has strong analogies to Monte Carlo neutral particle transport. The fundamental of analog method is fully developed, including considerations for simple, complex and loop flows. The validity of the analog method is demonstrated with test problems under various flow conditions. The method reproduces the results of a deterministic inventory code for\ncomparable problems. While a successful and efficient parallel implementation has permitted an inexpensive way to improve statistical precision by increasing the number of sampled atoms, this approach does not always provide the most efficient avenue for improvement. Therefore, six variance reduction tools are implemented as alternatives to improve precision\nof Monte Carlo simulations. Forced Reaction is designed to force an atom to undergo a predefined number of reactions in a given irradiation environment. Biased Reaction Branching is primarily focused on improving statistical results of the isotopes that are produced from rare reaction pathways. Biased Source Sampling is aimed at increasing frequencies of sampling rare initial isotopes as the starting particles. Reaction Path Splitting increases the population by splitting the atom at each reaction point, creating one new atom for each decay or transmutation product. Delta Tracking is recommended for a high-frequency pulsing to greatly reduce the computing time. Lastly, Weight Window is introduced as a strategy to decrease large deviations of weight due to the uses of variance reduction techniques.\n\nA figure of merit is necessary to evaluate the efficiency of a variance reduction technique. A number of possibilities for the figure of merit are explored, two of which offer robust figures of merit. One figure of merit is based on the relative error of a known target isotope (1/R2 T ) and another on the overall detection limit corrected by the relative error (1/Dk R2 T ). An\nautomated Adaptive Variance-reduction Adjustment (AVA) tool is developed to iteratively define necessary parameters for some variance reduction techniques in a problem with a target isotope. Initial sample problems demonstrate that AVA improves both precision and accuracy of a target result in an efficient manner.\n\nPotential applications of MCise include molten salt fueled reactors and liquid breeders in fusion blankets. As an example, the inventory analysis of an actinide fluoride eutectic liquid fuel in the In-Zinerator, a sub-critical power reactor driven by a fusion source, is examined using MCise. The result reassures MCise as a reliable tool for inventory analysis of complex nuclear systems.", - "thesisType": "PhD Nuclear Engineering and Engineering Physics", + "abstractNote": "This work develops a method to use an aqueous dosimeter in a mixed radiation field to determine separate measurements of neutron and gamma dose. Based on radiolysis of both Fricke and Methyl Viologen (MV) solutions, activation analysis and reactor simulation are combined to determine neutron dose and neutron radiolysis. This is subtracted from the total measured radiolysis to infer a gamma dose. The Fricke dosimeter was able to give repeatable results for the neutron and gamma doses over a number of days for a variety of shielding configurations. Impurities in the MV dosimeter prevented it from providing repeatable results, but qualitative comparison to the Fricke dosimeter indicated that it could be a viable approach. The method found that the reactor simulation, using MCNP5, can be used for accurate neutron simulations but does not account for all the source terms for gamma dose simulation. A neutron G-value for the Fricke dosimeter was developed by combining proton radiolysis simulations with results from MCNP5 and NJOY.", + "thesisType": "MS Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "2007", - "numPages": "154", + "numPages": "114", "language": "English", "shortTitle": "", "url": "", @@ -3172,11 +3340,11 @@ "tags": [], "collections": [ "6259B6TV", - "34I86HPD" + "Y4UI9B4X" ], "relations": {}, - "dateAdded": "2013-11-16T20:04:30Z", - "dateModified": "2013-11-16T20:07:48Z" + "dateAdded": "2013-11-13T20:49:25Z", + "dateModified": "2020-12-30T15:01:18Z" } }, { @@ -3701,7 +3869,7 @@ }, { "key": "TIVQ7GSS", - "version": 26426, + "version": 30710, "library": { "type": "group", "id": 10058, @@ -3756,10 +3924,10 @@ "parsedDate": "1999-04", "numChildren": 1 }, - "bibtex": "\n@phdthesis{wilson_alara:_1999,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {{ALARA}: {Analytic} and {Laplacian} {Adaptive} {Radioactivity} {Analysis}},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Wilson, P.P.H.},\n\tmonth = apr,\n\tyear = {1999},\n\tkeywords = {ALARA, Accuracy, Linear Chains, Neutron Irradiation, Speed},\n}\n", + "bibtex": "\n@phdthesis{wilson_alara_1999,\n\taddress = {Madison, WI, United States},\n\ttype = {{PhD} {Nuclear} {Engineering} and {Engineering} {Physics}},\n\ttitle = {{ALARA}: {Analytic} and {Laplacian} {Adaptive} {Radioactivity} {Analysis}},\n\turl = {https://www.proquest.com/pqdtglobal/docview/304538704/abstract/340332EB12864F9CPQ/7?sourcetype=Dissertations%20&%20Theses},\n\tabstract = {While many codes have been written to compute the induced activation and changes in composition caused by neutron irradiation, most of those which are still being updated are only slowly adding functionality and not improving the accuracy, speed and usability of their existing methods. ALARA moves forward in all four of these areas, with primary importance being placed on the accuracy and speed of solution.\n\nBy carefully analyzing the various ways to model the physical system, the methods to solve the mathematical problem and the interaction between these two issues, ALARA chooses an optimum combination to achieve high accuracy, fast computation, and enhanced versatility and ease of use. In addition to a set of base features, standard to any activation code, ALARA offers a number of extensions, including arbitrary hierarchical irradiation schedules and a form of reverse problem for calculating the detailed activation of specific isotopes.\nThe physical system is modeled using advanced linear chains, which include the contributions from straightened loops in the reaction scheme, while the truncation philosophy minimizes the discrepancies between the model and the real problem. The mathematical method is then adaptively chosen based on the characteristics of each linear chain to use analytically exact methods when possible and an accurate expansion technique otherwise.\n\nALARA has been successfully validated against established fusion activation codes using a standard activation benchmark problem. In addition to demonstrating ALARA's accuracy, this validation excerise has demonstrated its speed. Furthermore, by extending the benchmark problem to validate its advanced features, ALARA's flexibility has been proven.\n\nWith its modern computational techniques and continuing development, it is hoped that ALARA will become a widely used code for the activation analysis of nuclear systems.},\n\tlanguage = {English},\n\tschool = {University of Wisconsin-Madison},\n\tauthor = {Wilson, P.P.H.},\n\tmonth = apr,\n\tyear = {1999},\n\tkeywords = {ALARA, Accuracy, Linear Chains, Neutron Irradiation, Speed},\n}\n", "data": { "key": "TIVQ7GSS", - "version": 26426, + "version": 30710, "itemType": "thesis", "title": "ALARA: Analytic and Laplacian Adaptive Radioactivity Analysis", "creators": [ @@ -3769,15 +3937,15 @@ "lastName": "Wilson" } ], - "abstractNote": "", + "abstractNote": "While many codes have been written to compute the induced activation and changes in composition caused by neutron irradiation, most of those which are still being updated are only slowly adding functionality and not improving the accuracy, speed and usability of their existing methods. ALARA moves forward in all four of these areas, with primary importance being placed on the accuracy and speed of solution.\n\nBy carefully analyzing the various ways to model the physical system, the methods to solve the mathematical problem and the interaction between these two issues, ALARA chooses an optimum combination to achieve high accuracy, fast computation, and enhanced versatility and ease of use. In addition to a set of base features, standard to any activation code, ALARA offers a number of extensions, including arbitrary hierarchical irradiation schedules and a form of reverse problem for calculating the detailed activation of specific isotopes.\nThe physical system is modeled using advanced linear chains, which include the contributions from straightened loops in the reaction scheme, while the truncation philosophy minimizes the discrepancies between the model and the real problem. The mathematical method is then adaptively chosen based on the characteristics of each linear chain to use analytically exact methods when possible and an accurate expansion technique otherwise.\n\nALARA has been successfully validated against established fusion activation codes using a standard activation benchmark problem. In addition to demonstrating ALARA's accuracy, this validation excerise has demonstrated its speed. Furthermore, by extending the benchmark problem to validate its advanced features, ALARA's flexibility has been proven.\n\nWith its modern computational techniques and continuing development, it is hoped that ALARA will become a widely used code for the activation analysis of nuclear systems.", "thesisType": "PhD Nuclear Engineering and Engineering Physics", "university": "University of Wisconsin-Madison", "place": "Madison, WI, United States", "date": "Apr 1999", - "numPages": "", - "language": "", + "numPages": "134", + "language": "English", "shortTitle": "", - "url": "", + "url": "https://www.proquest.com/pqdtglobal/docview/304538704/abstract/340332EB12864F9CPQ/7?sourcetype=Dissertations%20&%20Theses", "accessDate": "", "archive": "", "archiveLocation": "", @@ -3812,12 +3980,12 @@ "dc:replaces": "http://zotero.org/groups/10058/items/V97ZAT44" }, "dateAdded": "2012-06-20T15:13:39Z", - "dateModified": "2018-02-17T23:44:03Z" + "dateModified": "2024-09-08T17:18:31Z" } }, { "key": "H3CZQSKG", - "version": 26426, + "version": 30708, "library": { "type": "group", "id": 10058, @@ -3861,10 +4029,10 @@ "parsedDate": "1999", "numChildren": 1 }, - "bibtex": "\n@phdthesis{wilson_neutronics_1999,\n\taddress = {Karlsruhe, Germany},\n\ttype = {Dr.-{Ing}. {Maschinenbau}},\n\ttitle = {Neutronics of the {IFMIF} {Neutron} {Source}: {Development} and {Analysis}},\n\tschool = {Technical University of Karlsruhe},\n\tauthor = {Wilson, Paul P. H.},\n\tyear = {1999},\n\tnote = {Maschinenbau; Nuclear Engineering Responses PPHW UW Thesis Ref \\#25 FZKA-6218},\n\tkeywords = {ALARA, Accelerator-Driven, Deuterium, High Flux Test Module (HFTM), High Flux Test Region (HFTR), International Fusion Materials Irradiation Facility (IFMIF), Irradiation, Liquid Lithium, McDeLicious Code, Monte Carlo Neutron Transport Code, Neutron, damChar},\n}\n", + "bibtex": "\n@phdthesis{wilson_neutronics_1999,\n\taddress = {Karlsruhe, Germany},\n\ttype = {Dr.-{Ing}. {Maschinenbau}},\n\ttitle = {Neutronics of the {IFMIF} {Neutron} {Source}: {Development} and {Analysis}},\n\turl = {https://publikationen.bibliothek.kit.edu/3199},\n\tschool = {Technical University of Karlsruhe},\n\tauthor = {Wilson, Paul P. H.},\n\tyear = {1999},\n\tnote = {Maschinenbau; Nuclear Engineering Responses PPHW UW Thesis Ref \\#25 FZKA-6218},\n\tkeywords = {ALARA, Accelerator-Driven, Deuterium, High Flux Test Module (HFTM), High Flux Test Region (HFTR), International Fusion Materials Irradiation Facility (IFMIF), Irradiation, Liquid Lithium, McDeLicious Code, Monte Carlo Neutron Transport Code, Neutron, damChar},\n}\n", "data": { "key": "H3CZQSKG", - "version": 26426, + "version": 30708, "itemType": "thesis", "title": "Neutronics of the IFMIF Neutron Source: Development and Analysis", "creators": [ @@ -3882,7 +4050,7 @@ "numPages": "", "language": "", "shortTitle": "", - "url": "", + "url": "https://publikationen.bibliothek.kit.edu/3199", "accessDate": "", "archive": "", "archiveLocation": "", @@ -3935,7 +4103,7 @@ ], "relations": {}, "dateAdded": "2012-06-11T19:05:08Z", - "dateModified": "2018-02-17T23:44:23Z" + "dateModified": "2024-09-08T17:07:22Z" } }, { @@ -4028,5 +4196,60 @@ "dateAdded": "2012-06-20T15:43:37Z", "dateModified": "2020-12-30T15:00:05Z" } + }, + { + "key": "RMRXW7ZL", + "version": 29692, + "library": { + "type": "group", + "id": 10058, + "name": "CNERG", + "links": { + "alternate": { + "href": "https://www.zotero.org/groups/10058", + "type": "text/html" + } + } + }, + "links": { + "self": { + "href": "https://api.zotero.org/groups/10058/items/RMRXW7ZL", + "type": "application/json" + }, + "alternate": { + "href": "https://www.zotero.org/groups/10058/items/RMRXW7ZL", + "type": "text/html" + }, + "up": { + "href": "https://api.zotero.org/groups/10058/items/T3G87978", + "type": "application/json" + } + }, + "meta": { + "createdByUser": { + "id": 112658, + "username": "gonuke", + "name": "", + "links": { + "alternate": { + "href": "https://www.zotero.org/gonuke", + "type": "text/html" + } + } + }, + "numChildren": 0 + }, + "bibtex": "\n\n", + "data": { + "key": "RMRXW7ZL", + "version": 29692, + "parentItem": "T3G87978", + "itemType": "note", + "note": "

Advisor: Paul P.H. Wilson.;Ph.D. University of Wisconsin--Madison 2023.;Includes bibliographical references (pages 140-145).

", + "tags": [], + "relations": {}, + "dateAdded": "2024-03-18T22:26:41Z", + "dateModified": "2024-03-18T22:26:41Z" + } } ] \ No newline at end of file diff --git a/_data/zotero.datestamp b/_data/zotero.datestamp index 70b40e83..10e63bb9 100644 --- a/_data/zotero.datestamp +++ b/_data/zotero.datestamp @@ -1 +1 @@ -Sun Sep 8 06:28:38 UTC 2024 +Sun Sep 15 06:28:48 UTC 2024